Merge branch 'main' into opentimestamps

pull/247/head
Patrick Robertson 2025-03-14 12:36:03 +00:00
commit b8da7607e8
167 zmienionych plików z 3227 dodań i 2264 usunięć

24
.github/workflows/ruff.yaml vendored 100644
Wyświetl plik

@ -0,0 +1,24 @@
name: Ruff Formatting & Linting
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff
- name: Run Ruff
run: ruff check --output-format=github . && ruff format --check

Wyświetl plik

@ -0,0 +1,10 @@
# Run Ruff formatter on commits.
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.10
hooks:
- id: ruff-format
# Runs Ruff linting - just checks without fixing, but blocks commit if errors are found.
# - id: ruff
# args: ["--output-format=concise"]

79
Makefile 100644
Wyświetl plik

@ -0,0 +1,79 @@
# Variables
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = docs/source
BUILDDIR = docs/_build
.PHONY: help
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
@echo "Additional Commands:"
@echo " make test - Run all tests in 'tests/' with pytest"
@echo " make ruff-check - Run Ruff linting and formatting checks (safe)"
@echo " make ruff-clean - Auto-fix Ruff linting and formatting issues"
@echo " make docs - Generate documentation (same as 'make html')"
@echo " make clean-docs - Remove generated docs"
@echo " make docker-build - Build the Auto Archiver Docker image"
@echo " make docker-compose - Run Auto Archiver with Docker Compose"
@echo " make docker-compose-rebuild - Rebuild and run Auto Archiver with Docker Compose"
@echo " make show-docs - Build and open the documentation in a browser"
.PHONY: test
test:
@echo "Running tests..."
@pytest tests --disable-warnings
.PHONY: ruff-check
ruff-check:
@echo "Checking code style with Ruff (safe)..."
@ruff check .
.PHONY: ruff-clean
ruff-clean:
@echo "Fixing lint and formatting issues with Ruff..."
@ruff check . --fix
@ruff format .
.PHONY: docs
docs:
@echo "Building documentation..."
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)"
.PHONY: clean-docs
clean-docs:
@echo "Cleaning up generated documentation files..."
@$(SPHINXBUILD) -M clean "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
@rm -rf "$(SOURCEDIR)/autoapi/" "$(SOURCEDIR)/modules/autogen/"
@echo "Cleanup complete."
.PHONY: show-docs
show-docs:
@echo "Opening documentation in browser..."
@open "$(BUILDDIR)/html/index.html"
.PHONY: docker-build
docker-build:
@echo "Building local Auto Archiver Docker image..."
@docker compose build # Uses the same build context as docker-compose.yml
.PHONY: docker-compose
docker-compose:
@echo "Running Auto Archiver with Docker Compose..."
@docker compose up
.PHONY: docker-compose-rebuild
docker-compose-rebuild:
@echo "Rebuilding and running Auto Archiver with Docker Compose..."
@docker compose up --build
# Catch-all for Sphinx commands
.PHONY: Makefile
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

Wyświetl plik

@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

Wyświetl plik

@ -10,12 +10,12 @@ MODULES_FOLDER = Path(__file__).parent.parent.parent.parent / "src" / "auto_arch
SAVE_FOLDER = Path(__file__).parent.parent / "source" / "modules" / "autogen"
type_color = {
'feeder': "<span style='color: #FFA500'>[feeder](/core_modules.md#feeder-modules)</a></span>",
'extractor': "<span style='color: #00FF00'>[extractor](/core_modules.md#extractor-modules)</a></span>",
'enricher': "<span style='color: #0000FF'>[enricher](/core_modules.md#enricher-modules)</a></span>",
'database': "<span style='color: #FF00FF'>[database](/core_modules.md#database-modules)</a></span>",
'storage': "<span style='color: #FFFF00'>[storage](/core_modules.md#storage-modules)</a></span>",
'formatter': "<span style='color: #00FFFF'>[formatter](/core_modules.md#formatter-modules)</a></span>",
"feeder": "<span style='color: #FFA500'>[feeder](/core_modules.md#feeder-modules)</a></span>",
"extractor": "<span style='color: #00FF00'>[extractor](/core_modules.md#extractor-modules)</a></span>",
"enricher": "<span style='color: #0000FF'>[enricher](/core_modules.md#enricher-modules)</a></span>",
"database": "<span style='color: #FF00FF'>[database](/core_modules.md#database-modules)</a></span>",
"storage": "<span style='color: #FFFF00'>[storage](/core_modules.md#storage-modules)</a></span>",
"formatter": "<span style='color: #00FFFF'>[formatter](/core_modules.md#formatter-modules)</a></span>",
}
TABLE_HEADER = ("Option", "Description", "Default", "Type")
@ -34,6 +34,7 @@ steps:
"""
def generate_module_docs():
yaml = YAML()
SAVE_FOLDER.mkdir(exist_ok=True)
@ -48,49 +49,49 @@ def generate_module_docs():
# generate the markdown file from the __manifest__.py file.
manifest = module.manifest
for type in manifest['type']:
for type in manifest["type"]:
modules_by_type.setdefault(type, []).append(module)
description = "\n".join(l.lstrip() for l in manifest['description'].split("\n"))
types = ", ".join(type_color[t] for t in manifest['type'])
description = "\n".join(line.lstrip() for line in manifest["description"].split("\n"))
types = ", ".join(type_color[t] for t in manifest["type"])
readme_str = f"""
# {manifest['name']}
# {manifest["name"]}
```{{admonition}} Module type
{types}
```
{description}
"""
steps_str = "\n".join(f" {t}s:\n - {module.name}" for t in manifest['type'])
steps_str = "\n".join(f" {t}s:\n - {module.name}" for t in manifest["type"])
if not manifest['configs']:
if not manifest["configs"]:
config_string = f"# No configuration options for {module.name}.*\n"
else:
config_table = header_row
config_yaml = {}
global_yaml[module.name] = CommentedMap()
global_yaml.yaml_set_comment_before_after_key(module.name, f"\n\n{module.display_name} configuration options")
global_yaml.yaml_set_comment_before_after_key(
module.name, f"\n\n{module.display_name} configuration options"
)
for key, value in manifest['configs'].items():
type = value.get('type', 'string')
if type == 'json_loader':
value['type'] = 'json'
elif type == 'str':
for key, value in manifest["configs"].items():
type = value.get("type", "string")
if type == "json_loader":
value["type"] = "json"
elif type == "str":
type = "string"
default = value.get('default', '')
default = value.get("default", "")
config_yaml[key] = default
global_yaml[module.name][key] = default
if value.get('help', ''):
global_yaml[module.name].yaml_add_eol_comment(value.get('help', ''), key)
if value.get("help", ""):
global_yaml[module.name].yaml_add_eol_comment(value.get("help", ""), key)
help = "**Required**. " if value.get('required', False) else "Optional. "
help += value.get('help', '')
help = "**Required**. " if value.get("required", False) else "Optional. "
help += value.get("help", "")
config_table += f"| `{module.name}.{key}` | {help} | {value.get('default', '')} | {type} |\n"
global_table += f"| `{module.name}.{key}` | {help} | {default} | {type} |\n"
readme_str += "\n## Configuration Options\n"
@ -98,18 +99,18 @@ def generate_module_docs():
config_string = io.BytesIO()
yaml.dump({module.name: config_yaml}, config_string)
config_string = config_string.getvalue().decode('utf-8')
config_string = config_string.getvalue().decode("utf-8")
yaml_string = EXAMPLE_YAML.format(steps_str=steps_str, config_string=config_string)
readme_str += f"```{{code}} yaml\n{yaml_string}\n```\n"
if manifest['configs']:
if manifest["configs"]:
readme_str += "\n### Command Line:\n"
readme_str += config_table
# add a link to the autodoc refs
readme_str += f"\n[API Reference](../../../autoapi/{module.name}/index)\n"
# create the module.type folder, use the first type just for where to store the file
for type in manifest['type']:
for type in manifest["type"]:
type_folder = SAVE_FOLDER / type
type_folder.mkdir(exist_ok=True)
with open(type_folder / f"{module.name}.md", "w") as f:
@ -117,10 +118,10 @@ def generate_module_docs():
f.write(readme_str)
generate_index(modules_by_type)
del global_yaml['placeholder']
del global_yaml["placeholder"]
global_string = io.BytesIO()
global_yaml = yaml.dump(global_yaml, global_string)
global_string = global_string.getvalue().decode('utf-8')
global_string = global_string.getvalue().decode("utf-8")
global_yaml = f"```yaml\n{global_string}\n```"
with open(SAVE_FOLDER / "configs_cheatsheet.md", "w") as f:
f.write("### Configuration File\n" + global_yaml + "\n### Command Line\n" + global_table)

Wyświetl plik

@ -5,7 +5,7 @@ import os
from importlib.metadata import metadata
from datetime import datetime
sys.path.append(os.path.abspath('../scripts'))
sys.path.append(os.path.abspath("../scripts"))
from scripts import generate_module_docs
from auto_archiver.version import __version__
@ -20,7 +20,7 @@ project = package_metadata["name"]
copyright = str(datetime.now().year)
author = "Bellingcat"
release = package_metadata["version"]
language = 'en'
language = "en"
# -- General configuration ---------------------------------------------------
extensions = [
@ -34,17 +34,19 @@ extensions = [
# 'sphinx.ext.autosummary', # Summarize module/class/function docs
]
templates_path = ['_templates']
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ""]
# -- AutoAPI Configuration ---------------------------------------------------
autoapi_type = 'python'
autoapi_type = "python"
autoapi_dirs = ["../../src/auto_archiver/core/", "../../src/auto_archiver/utils/"]
# get all the modules and add them to the autoapi_dirs
autoapi_dirs.extend([f"../../src/auto_archiver/modules/{m}" for m in os.listdir("../../src/auto_archiver/modules")])
autodoc_typehints = "signature" # Include type hints in the signature
autoapi_ignore = ["*/version.py", ] # Ignore specific modules
autoapi_ignore = [
"*/version.py",
] # Ignore specific modules
autoapi_keep_files = True # Option to retain intermediate JSON files for debugging
autoapi_add_toctree_entry = True # Include API docs in the TOC
autoapi_python_use_implicit_namespaces = True
@ -76,7 +78,7 @@ source_suffix = {
}
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_book_theme'
html_theme = "sphinx_book_theme"
html_static_path = ["../_static"]
html_css_files = ["custom.css"]
html_title = f"Auto Archiver v{__version__}"
@ -87,7 +89,6 @@ html_theme_options = {
}
copybutton_prompt_text = r">>> |\.\.\."
copybutton_prompt_is_regexp = True
copybutton_only_copy_prompt_lines = False

Wyświetl plik

@ -32,4 +32,5 @@ testing
docs
release
settings_page
style_guide
```

Wyświetl plik

@ -0,0 +1,67 @@
# Style Guide
The project uses [Ruff](https://docs.astral.sh/ruff/) for linting and formatting.
Our style configurations are set in the `pyproject.toml` file. If needed, you can modify them there.
### **Formatting (Auto-Run Before Commit) 🛠️**
We have a pre-commit hook to run the formatter before you commit.
This requires you to set it up once locally, then it will run automatically when you commit changes.
```shell
poetry run pre-commit install
```
Ruff can also be to run automatically.
Alternative: Ruff can also be [integrated with most editors](https://docs.astral.sh/ruff/editors/setup/) for real-time formatting.
### **Linting (Check Before Pushing) 🔍**
We recommend you also run the linter before pushing code.
We have [Makefile](../../../Makefile) commands to run common tasks.
Tip: if you're on Windows you might need to install `make` first, or alternatively you can use ruff commands directly.
**Lint Check:** This outputs a report of any issues found, without attempting to fix them:
```shell
make ruff-check
```
Tip: To see a more detailed linting report, you can remove the following line from the `pyproject.toml` file:
```toml
[tool.ruff]
# Remove this for a more detailed lint report
output-format = "concise"
```
**Lint Fix:** This command will attempt to fix some of the issues it picked up with the lint check.
Note not all warnings can be fixed automatically.
⚠️ Warning: This can cause breaking changes. ⚠️
Most fixes are safe, but some non-standard practices such as dynamic loading are not picked up by linters. Ensure you check any modifications by this before committing them.
```shell
make ruff-fix
```
**Changing Configurations ⚙️**
Our rules are quite lenient for general usage, but if you want to run more rigorous checks you can then run checks with additional rules to see more nuanced errors which you can review manually.
Check out the [ruff documentation](https://docs.astral.sh/ruff/configuration/) for the full list of rules.
One example is to extend the selected rules for linting the `pyproject.toml` file:
```toml
[tool.ruff.lint]
# Extend the rules to check for by adding them to this option:
# See documentation for more details: https://docs.astral.sh/ruff/rules/
extend-select = ["B"]
```
Then re-run the `make ruff-check` command to see the new rules in action.

Wyświetl plik

@ -51,6 +51,7 @@ The invocations below will run the auto-archiver Docker image using a configurat
docker run --rm -v $PWD/secrets:/app/secrets -v $PWD/local_archive:/app/local_archive bellingcat/auto-archiver
# uses the same configuration, but with the `gsheet_feeder`, a header on row 2 and with some different column names
# Note this expects you to have followed the [Google Sheets setup](how_to/google_sheets.md) and added your service_account.json to the `secrets/` folder
# notice that columns is a dictionary so you need to pass it as JSON and it will override only the values provided
docker run --rm -v $PWD/secrets:/app/secrets -v $PWD/local_archive:/app/local_archive bellingcat/auto-archiver --feeders=gsheet_feeder --gsheet_feeder.sheet="use it on another sheets doc" --gsheet_feeder.header=2 --gsheet_feeder.columns='{"url": "link"}'
# Runs auto-archiver for the first time, but in 'full' mode, enabling all modules to get a full settings file

189
poetry.lock wygenerowano
Wyświetl plik

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "accessible-pygments"
@ -51,7 +51,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@ -94,12 +94,12 @@ files = [
]
[package.extras]
benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "authlib"
@ -145,7 +145,7 @@ files = [
]
[package.extras]
dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"]
dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""]
[[package]]
name = "beautifulsoup4"
@ -481,6 +481,18 @@ files = [
[package.dependencies]
pycparser = "*"
[[package]]
name = "cfgv"
version = "3.4.0"
description = "Validate configuration and produce human readable error messages."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
{file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
]
[[package]]
name = "charset-normalizer"
version = "3.4.1"
@ -696,6 +708,18 @@ calendars = ["convertdate (>=2.2.1)", "hijridate"]
fasttext = ["fasttext (>=0.9.1)", "numpy (>=1.19.3,<2)"]
langdetect = ["langdetect (>=1.0.0)"]
[[package]]
name = "distlib"
version = "0.3.9"
description = "Distribution utilities"
optional = false
python-versions = "*"
groups = ["dev"]
files = [
{file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"},
{file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"},
]
[[package]]
name = "docutils"
version = "0.21.2"
@ -742,6 +766,23 @@ future = "*"
[package.extras]
dev = ["Sphinx (==2.1.0)", "future (==0.17.1)", "numpy (==1.16.4)", "pytest (==4.6.1)", "pytest-mock (==1.10.4)", "tox (==3.12.1)"]
[[package]]
name = "filelock"
version = "3.17.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"},
{file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"},
]
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "future"
version = "1.0.0"
@ -775,7 +816,7 @@ requests = ">=2.18.0,<3.0.0.dev0"
[package.extras]
async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"]
grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"]
grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev) ; python_version >= \"3.11\"", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0) ; python_version >= \"3.11\""]
grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
@ -919,6 +960,21 @@ files = [
[package.dependencies]
pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""}
[[package]]
name = "identify"
version = "2.6.9"
description = "File identification library for Python"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "identify-2.6.9-py2.py3-none-any.whl", hash = "sha256:c98b4322da415a8e5a70ff6e51fbc2d2932c015532d77e9f8537b4ba7813b150"},
{file = "identify-2.6.9.tar.gz", hash = "sha256:d40dfe3142a1421d8518e3d3985ef5ac42890683e32306ad614a29490abeb6bf"},
]
[package.extras]
license = ["ukkonen"]
[[package]]
name = "idna"
version = "3.10"
@ -1059,7 +1115,7 @@ colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""}
win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""}
[package.extras]
dev = ["Sphinx (==8.1.3)", "build (==1.2.2)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.5.0)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.13.0)", "mypy (==v1.4.1)", "myst-parser (==4.0.0)", "pre-commit (==4.0.1)", "pytest (==6.1.2)", "pytest (==8.3.2)", "pytest-cov (==2.12.1)", "pytest-cov (==5.0.0)", "pytest-cov (==6.0.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.1.0)", "sphinx-rtd-theme (==3.0.2)", "tox (==3.27.1)", "tox (==4.23.2)", "twine (==6.0.1)"]
dev = ["Sphinx (==8.1.3) ; python_version >= \"3.11\"", "build (==1.2.2) ; python_version >= \"3.11\"", "colorama (==0.4.5) ; python_version < \"3.8\"", "colorama (==0.4.6) ; python_version >= \"3.8\"", "exceptiongroup (==1.1.3) ; python_version >= \"3.7\" and python_version < \"3.11\"", "freezegun (==1.1.0) ; python_version < \"3.8\"", "freezegun (==1.5.0) ; python_version >= \"3.8\"", "mypy (==v0.910) ; python_version < \"3.6\"", "mypy (==v0.971) ; python_version == \"3.6\"", "mypy (==v1.13.0) ; python_version >= \"3.8\"", "mypy (==v1.4.1) ; python_version == \"3.7\"", "myst-parser (==4.0.0) ; python_version >= \"3.11\"", "pre-commit (==4.0.1) ; python_version >= \"3.9\"", "pytest (==6.1.2) ; python_version < \"3.8\"", "pytest (==8.3.2) ; python_version >= \"3.8\"", "pytest-cov (==2.12.1) ; python_version < \"3.8\"", "pytest-cov (==5.0.0) ; python_version == \"3.8\"", "pytest-cov (==6.0.0) ; python_version >= \"3.9\"", "pytest-mypy-plugins (==1.9.3) ; python_version >= \"3.6\" and python_version < \"3.8\"", "pytest-mypy-plugins (==3.1.0) ; python_version >= \"3.8\"", "sphinx-rtd-theme (==3.0.2) ; python_version >= \"3.11\"", "tox (==3.27.1) ; python_version < \"3.8\"", "tox (==4.23.2) ; python_version >= \"3.8\"", "twine (==6.0.1) ; python_version >= \"3.11\""]
[[package]]
name = "markdown-it-py"
@ -1260,6 +1316,18 @@ rtd = ["ipython", "sphinx (>=7)", "sphinx-autodoc2 (>=0.5.0,<0.6.0)", "sphinx-bo
testing = ["beautifulsoup4", "coverage[toml]", "defusedxml", "pygments (<2.19)", "pytest (>=8,<9)", "pytest-cov", "pytest-param-files (>=0.6.0,<0.7.0)", "pytest-regressions", "sphinx-pytest"]
testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,<0.7.0)"]
[[package]]
name = "nodeenv"
version = "1.9.1"
description = "Node.js virtual environment builder"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
groups = ["dev"]
files = [
{file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"},
{file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"},
]
[[package]]
name = "numpy"
version = "2.1.3"
@ -1526,9 +1594,26 @@ docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline
fpx = ["olefile"]
mic = ["olefile"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"]
typing = ["typing-extensions"]
typing = ["typing-extensions ; python_version < \"3.10\""]
xmp = ["defusedxml"]
[[package]]
name = "platformdirs"
version = "4.3.6"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
]
[package.extras]
docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
type = ["mypy (>=1.11.2)"]
[[package]]
name = "pluggy"
version = "1.5.0"
@ -1545,6 +1630,25 @@ files = [
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pre-commit"
version = "4.1.0"
description = "A framework for managing and maintaining multi-language pre-commit hooks."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "pre_commit-4.1.0-py2.py3-none-any.whl", hash = "sha256:d29e7cb346295bcc1cc75fc3e92e343495e3ea0196c9ec6ba53f49f10ab6ae7b"},
{file = "pre_commit-4.1.0.tar.gz", hash = "sha256:ae3f018575a588e30dfddfab9a05448bfbd6b73d78709617b5a2b853549716d4"},
]
[package.dependencies]
cfgv = ">=2.0.0"
identify = ">=1.0.0"
nodeenv = ">=0.11.1"
pyyaml = ">=5.1"
virtualenv = ">=20.10.0"
[[package]]
name = "proto-plus"
version = "1.26.0"
@ -1930,7 +2034,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
groups = ["docs"]
groups = ["dev", "docs"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@ -2274,6 +2378,34 @@ files = [
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
]
[[package]]
name = "ruff"
version = "0.9.10"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d"},
{file = "ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d"},
{file = "ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1"},
{file = "ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c"},
{file = "ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43"},
{file = "ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c"},
{file = "ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5"},
{file = "ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8"},
{file = "ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029"},
{file = "ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1"},
{file = "ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69"},
{file = "ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7"},
]
[[package]]
name = "s3transfer"
version = "0.11.4"
@ -2886,7 +3018,7 @@ files = [
pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""}
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@ -2909,7 +3041,28 @@ h11 = ">=0.8"
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"]
[[package]]
name = "virtualenv"
version = "20.29.3"
description = "Virtual Python Environment builder"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "virtualenv-20.29.3-py3-none-any.whl", hash = "sha256:3e3d00f5807e83b234dfb6122bf37cfadf4be216c53a49ac059d02414f819170"},
{file = "virtualenv-20.29.3.tar.gz", hash = "sha256:95e39403fcf3940ac45bc717597dba16110b74506131845d9b687d5e73d947ac"},
]
[package.dependencies]
distlib = ">=0.3.7,<1"
filelock = ">=3.12.2,<4"
platformdirs = ">=3.9.1,<5"
[package.extras]
docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""]
[[package]]
name = "vk-api"
@ -3171,7 +3324,7 @@ files = [
]
[package.extras]
dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
dev = ["black (>=19.3b0) ; python_version >= \"3.6\"", "pytest (>=4.6.2)"]
[[package]]
name = "wsproto"
@ -3202,8 +3355,8 @@ files = [
[package.extras]
build = ["build", "hatchling", "pip", "setuptools (>=71.0.2)", "wheel"]
curl-cffi = ["curl-cffi (==0.5.10)", "curl-cffi (>=0.5.10,!=0.6.*,<0.7.2)"]
default = ["brotli", "brotlicffi", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=1.26.17,<3)", "websockets (>=13.0)"]
curl-cffi = ["curl-cffi (==0.5.10) ; os_name == \"nt\" and implementation_name == \"cpython\"", "curl-cffi (>=0.5.10,!=0.6.*,<0.7.2) ; os_name != \"nt\" and implementation_name == \"cpython\""]
default = ["brotli ; implementation_name == \"cpython\"", "brotlicffi ; implementation_name != \"cpython\"", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=1.26.17,<3)", "websockets (>=13.0)"]
dev = ["autopep8 (>=2.0,<3.0)", "pre-commit", "pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)", "ruff (>=0.9.0,<0.10.0)"]
pyinstaller = ["pyinstaller (>=6.11.1)"]
secretstorage = ["cffi", "secretstorage"]
@ -3213,4 +3366,4 @@ test = ["pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
content-hash = "e42f3bc122fe5d98deb6aa224ddf531b6f45a50b7c61213721ff5c8258e424e3"
content-hash = "beb354960b8d8af491a13e09cb565c7e3099a2b150167c16147aa0438e970018"

Wyświetl plik

@ -65,6 +65,8 @@ pytest = "^8.3.4"
autopep8 = "^2.3.1"
pytest-loguru = "^0.4.0"
pytest-mock = "^3.14.0"
ruff = "^0.9.10"
pre-commit = "^4.1.0"
[tool.poetry.group.docs.dependencies]
sphinx = "^8.1.3"
@ -91,3 +93,28 @@ markers = [
"download: marks tests that download content from the network",
"incremental: marks a class to run tests incrementally. If a test fails in the class, the remaining tests will be skipped",
]
[tool.ruff]
#exclude = ["docs"]
line-length = 120
# Remove this for a more detailed lint report
output-format = "concise"
# TODO: temp ignore rule for timestamping_enricher to allow for open PR
exclude = ["src/auto_archiver/modules/timestamping_enricher/*"]
[tool.ruff.lint]
# Extend the rules to check for by adding them to this option:
# See documentation for more details: https://docs.astral.sh/ruff/rules/
#extend-select = ["B"]
[tool.ruff.lint.per-file-ignores]
# Ignore import violations in __init__.py files
"__init__.py" = ["F401", "F403"]
# Ignore 'useless expression' in manifest files.
"__manifest__.py" = ["B018"]
[tool.ruff.format]
docstring-code-format = false

Wyświetl plik

@ -1,5 +1,6 @@
import os.path
import click, json
import click
import json
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
@ -70,11 +71,7 @@ def main(credentials, token):
print(emailAddress)
# Call the Drive v3 API and return some files
results = (
service.files()
.list(pageSize=10, fields="nextPageToken, files(id, name)")
.execute()
)
results = service.files().list(pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get("files", [])
if not items:

Wyświetl plik

@ -8,12 +8,14 @@ from auto_archiver.core.module import ModuleFactory
from auto_archiver.core.consts import MODULE_TYPES
from auto_archiver.core.config import EMPTY_CONFIG
class SchemaEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# Get available modules
module_factory = ModuleFactory()
available_modules = module_factory.available_modules()
@ -21,32 +23,40 @@ available_modules = module_factory.available_modules()
modules_by_type = {}
# Categorize modules by type
for module in available_modules:
for type in module.manifest.get('type', []):
for type in module.manifest.get("type", []):
modules_by_type.setdefault(type, []).append(module)
all_modules_ordered_by_type = sorted(available_modules, key=lambda x: (MODULE_TYPES.index(x.type[0]), not x.requires_setup))
all_modules_ordered_by_type = sorted(
available_modules, key=lambda x: (MODULE_TYPES.index(x.type[0]), not x.requires_setup)
)
yaml: YAML = YAML()
config_string = io.BytesIO()
yaml.dump(EMPTY_CONFIG, config_string)
config_string = config_string.getvalue().decode('utf-8')
config_string = config_string.getvalue().decode("utf-8")
output_schema = {
'modules': dict((module.name,
"modules": dict(
(
module.name,
{
'name': module.name,
'display_name': module.display_name,
'manifest': module.manifest,
'configs': module.configs or None
}
) for module in all_modules_ordered_by_type),
'steps': dict((f"{module_type}s", [module.name for module in modules_by_type[module_type]]) for module_type in MODULE_TYPES),
'configs': [m.name for m in all_modules_ordered_by_type if m.configs],
'module_types': MODULE_TYPES,
'empty_config': config_string
"name": module.name,
"display_name": module.display_name,
"manifest": module.manifest,
"configs": module.configs or None,
},
)
for module in all_modules_ordered_by_type
),
"steps": dict(
(f"{module_type}s", [module.name for module in modules_by_type[module_type]]) for module_type in MODULE_TYPES
),
"configs": [m.name for m in all_modules_ordered_by_type if m.configs],
"module_types": MODULE_TYPES,
"empty_config": config_string,
}
current_file_dir = os.path.dirname(os.path.abspath(__file__))
output_file = os.path.join(current_file_dir, 'settings/src/schema.json')
with open(output_file, 'w') as file:
output_file = os.path.join(current_file_dir, "settings/src/schema.json")
with open(output_file, "w") as file:
json.dump(output_schema, file, indent=4, cls=SchemaEncoder)

Wyświetl plik

@ -12,7 +12,6 @@ Then run this script to create a new session file.
You will need to provide your phone number and a 2FA code the first time you run this script.
"""
import os
from telethon.sync import TelegramClient
from loguru import logger
@ -26,4 +25,3 @@ SESSION_FILE = "secrets/anon-insta"
os.makedirs("secrets", exist_ok=True)
with TelegramClient(SESSION_FILE, API_ID, API_HASH) as client:
logger.success(f"New session file created: {SESSION_FILE}.session")

Wyświetl plik

@ -1,9 +1,13 @@
"""Entry point for the auto_archiver package."""
from auto_archiver.core.orchestrator import ArchivingOrchestrator
import sys
def main():
for _ in ArchivingOrchestrator()._command_line_run(sys.argv[1:]): pass
for _ in ArchivingOrchestrator()._command_line_run(sys.argv[1:]):
pass
if __name__ == "__main__":
main()

Wyświetl plik

@ -1,6 +1,5 @@
""" Core modules to handle things such as orchestration, metadata and configs..
"""Core modules to handle things such as orchestration, metadata and configs.."""
"""
from .metadata import Metadata
from .media import Media
from .base_module import BaseModule

Wyświetl plik

@ -1,9 +1,8 @@
from __future__ import annotations
from typing import Mapping, Any, Type, TYPE_CHECKING
from typing import Mapping, Any, TYPE_CHECKING
from abc import ABC
from copy import deepcopy, copy
from copy import deepcopy
from tempfile import TemporaryDirectory
from auto_archiver.utils import url as UrlUtil
from auto_archiver.core.consts import MODULE_TYPES as CONF_MODULE_TYPES
@ -13,8 +12,8 @@ from loguru import logger
if TYPE_CHECKING:
from .module import ModuleFactory
class BaseModule(ABC):
class BaseModule(ABC):
"""
Base module class. All modules should inherit from this class.
@ -46,14 +45,13 @@ class BaseModule(ABC):
@property
def storages(self) -> list:
return self.config.get('storages', [])
return self.config.get("storages", [])
def config_setup(self, config: dict):
# this is important. Each instance is given its own deepcopied config, so modules cannot
# change values to affect other modules
config = deepcopy(config)
authentication = deepcopy(config.pop('authentication', {}))
authentication = deepcopy(config.pop("authentication", {}))
self.authentication = authentication
self.config = config
@ -61,7 +59,8 @@ class BaseModule(ABC):
setattr(self, key, val)
def setup(self):
# For any additional setup required by modules, e.g. autehntication
# For any additional setup required by modules outside of the configs in the manifesst,
# e.g. authentication
pass
def auth_for_site(self, site: str, extract_cookies=True) -> Mapping[str, Any]:
@ -90,11 +89,10 @@ class BaseModule(ABC):
# TODO: think about if/how we can deal with sites that have multiple domains (main one is x.com/twitter.com)
# for now the user must enter them both, like "x.com,twitter.com" in their config. Maybe we just hard-code?
site = UrlUtil.domain_for_url(site).lstrip("www.")
site = UrlUtil.domain_for_url(site).removeprefix("www.")
# add the 'www' version of the site to the list of sites to check
authdict = {}
for to_try in [site, f"www.{site}"]:
if to_try in self.authentication:
authdict.update(self.authentication[to_try])
@ -104,17 +102,20 @@ class BaseModule(ABC):
if not authdict:
for key in self.authentication.keys():
if key in site or site in key:
logger.debug(f"Could not find exact authentication information for site '{site}'. \
logger.debug(
f"Could not find exact authentication information for site '{site}'. \
did find information for '{key}' which is close, is this what you meant? \
If so, edit your authentication settings to make sure it exactly matches.")
If so, edit your authentication settings to make sure it exactly matches."
)
def get_ytdlp_cookiejar(args):
import yt_dlp
from yt_dlp import parse_options
logger.debug(f"Extracting cookies from settings: {args[1]}")
# parse_options returns a named tuple as follows, we only need the ydl_options part
# collections.namedtuple('ParsedOptions', ('parser', 'options', 'urls', 'ydl_opts'))
ytdlp_opts = getattr(parse_options(args), 'ydl_opts')
ytdlp_opts = getattr(parse_options(args), "ydl_opts")
return yt_dlp.YoutubeDL(ytdlp_opts).cookiejar
get_cookiejar_options = None
@ -125,20 +126,19 @@ If so, edit your authentication settings to make sure it exactly matches.")
# 3. cookies_from_browser setting in global config
# 4. cookies_file setting in global config
if 'cookies_from_browser' in authdict:
get_cookiejar_options = ['--cookies-from-browser', authdict['cookies_from_browser']]
elif 'cookies_file' in authdict:
get_cookiejar_options = ['--cookies', authdict['cookies_file']]
elif 'cookies_from_browser' in self.authentication:
authdict['cookies_from_browser'] = self.authentication['cookies_from_browser']
get_cookiejar_options = ['--cookies-from-browser', self.authentication['cookies_from_browser']]
elif 'cookies_file' in self.authentication:
authdict['cookies_file'] = self.authentication['cookies_file']
get_cookiejar_options = ['--cookies', self.authentication['cookies_file']]
if "cookies_from_browser" in authdict:
get_cookiejar_options = ["--cookies-from-browser", authdict["cookies_from_browser"]]
elif "cookies_file" in authdict:
get_cookiejar_options = ["--cookies", authdict["cookies_file"]]
elif "cookies_from_browser" in self.authentication:
authdict["cookies_from_browser"] = self.authentication["cookies_from_browser"]
get_cookiejar_options = ["--cookies-from-browser", self.authentication["cookies_from_browser"]]
elif "cookies_file" in self.authentication:
authdict["cookies_file"] = self.authentication["cookies_file"]
get_cookiejar_options = ["--cookies", self.authentication["cookies_file"]]
if get_cookiejar_options:
authdict['cookies_jar'] = get_ytdlp_cookiejar(get_cookiejar_options)
authdict["cookies_jar"] = get_ytdlp_cookiejar(get_cookiejar_options)
return authdict

Wyświetl plik

@ -6,7 +6,7 @@ flexible setup in various environments.
"""
import argparse
from ruamel.yaml import YAML, CommentedMap, add_representer
from ruamel.yaml import YAML, CommentedMap
import json
from loguru import logger
@ -19,12 +19,14 @@ _yaml: YAML = YAML()
DEFAULT_CONFIG_FILE = "secrets/orchestration.yaml"
EMPTY_CONFIG = _yaml.load("""
EMPTY_CONFIG = _yaml.load(
"""
# Auto Archiver Configuration
# Steps are the modules that will be run in the order they are defined
steps:""" + "".join([f"\n {module}s: []" for module in MODULE_TYPES]) + \
"""
steps:"""
+ "".join([f"\n {module}s: []" for module in MODULE_TYPES])
+ """
# Global configuration
@ -51,50 +53,54 @@ authentication: {}
logging:
level: INFO
""")
"""
)
# note: 'logging' is explicitly added above in order to better format the config file
# Arg Parse Actions/Classes
class AuthenticationJsonParseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
auth_dict = json.loads(values)
setattr(namespace, self.dest, auth_dict)
except json.JSONDecodeError as e:
raise argparse.ArgumentTypeError(f"Invalid JSON input for argument '{self.dest}': {e}")
raise argparse.ArgumentTypeError(f"Invalid JSON input for argument '{self.dest}': {e}") from e
def load_from_file(path):
try:
with open(path, 'r') as f:
with open(path, "r") as f:
try:
auth_dict = json.load(f)
except json.JSONDecodeError:
f.seek(0)
# maybe it's yaml, try that
auth_dict = _yaml.load(f)
if auth_dict.get('authentication'):
auth_dict = auth_dict['authentication']
auth_dict['load_from_file'] = path
if auth_dict.get("authentication"):
auth_dict = auth_dict["authentication"]
auth_dict["load_from_file"] = path
return auth_dict
except:
except Exception:
return None
if isinstance(auth_dict, dict) and auth_dict.get('from_file'):
auth_dict = load_from_file(auth_dict['from_file'])
if isinstance(auth_dict, dict) and auth_dict.get("from_file"):
auth_dict = load_from_file(auth_dict["from_file"])
elif isinstance(auth_dict, str):
# if it's a string
auth_dict = load_from_file(auth_dict)
if not isinstance(auth_dict, dict):
raise argparse.ArgumentTypeError("Authentication must be a dictionary of site names and their authentication methods")
global_options = ['cookies_from_browser', 'cookies_file', 'load_from_file']
raise argparse.ArgumentTypeError(
"Authentication must be a dictionary of site names and their authentication methods"
)
global_options = ["cookies_from_browser", "cookies_file", "load_from_file"]
for key, auth in auth_dict.items():
if key in global_options:
continue
if not isinstance(key, str) or not isinstance(auth, dict):
raise argparse.ArgumentTypeError(f"Authentication must be a dictionary of site names and their authentication methods. Valid global configs are {global_options}")
raise argparse.ArgumentTypeError(
f"Authentication must be a dictionary of site names and their authentication methods. Valid global configs are {global_options}"
)
setattr(namespace, self.dest, auth_dict)
@ -105,8 +111,8 @@ class UniqueAppendAction(argparse.Action):
if value not in getattr(namespace, self.dest):
getattr(namespace, self.dest).append(value)
class DefaultValidatingParser(argparse.ArgumentParser):
class DefaultValidatingParser(argparse.ArgumentParser):
def error(self, message):
"""
Override of error to format a nicer looking error message using logger
@ -135,8 +141,10 @@ class DefaultValidatingParser(argparse.ArgumentParser):
return super().parse_known_args(args, namespace)
# Config Utils
def to_dot_notation(yaml_conf: CommentedMap | dict) -> dict:
dotdict = {}
@ -150,6 +158,7 @@ def to_dot_notation(yaml_conf: CommentedMap | dict) -> dict:
process_subdict(yaml_conf)
return dotdict
def from_dot_notation(dotdict: dict) -> dict:
normal_dict = {}
@ -170,9 +179,11 @@ def from_dot_notation(dotdict: dict) -> dict:
def is_list_type(value):
return isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set)
def is_dict_type(value):
return isinstance(value, dict) or isinstance(value, CommentedMap)
def merge_dicts(dotdict: dict, yaml_dict: CommentedMap) -> CommentedMap:
yaml_dict: CommentedMap = deepcopy(yaml_dict)
@ -183,7 +194,7 @@ def merge_dicts(dotdict: dict, yaml_dict: CommentedMap) -> CommentedMap:
yaml_subdict[key] = value
continue
if key == 'steps':
if key == "steps":
for module_type, modules in value.items():
# overwrite the 'steps' from the config file with the ones from the CLI
yaml_subdict[key][module_type] = modules
@ -198,6 +209,7 @@ def merge_dicts(dotdict: dict, yaml_dict: CommentedMap) -> CommentedMap:
update_dict(from_dot_notation(dotdict), yaml_dict)
return yaml_dict
def read_yaml(yaml_filename: str) -> CommentedMap:
config = None
try:
@ -211,6 +223,7 @@ def read_yaml(yaml_filename: str) -> CommentedMap:
return config
# TODO: make this tidier/find a way to notify of which keys should not be stored
@ -218,13 +231,14 @@ def store_yaml(config: CommentedMap, yaml_filename: str) -> None:
config_to_save = deepcopy(config)
auth_dict = config_to_save.get("authentication", {})
if auth_dict and auth_dict.get('load_from_file'):
if auth_dict and auth_dict.get("load_from_file"):
# remove all other values from the config, don't want to store it in the config file
auth_dict = {"load_from_file": auth_dict["load_from_file"]}
config_to_save.pop('urls', None)
config_to_save.pop("urls", None)
with open(yaml_filename, "w", encoding="utf-8") as outf:
_yaml.dump(config_to_save, outf)
def is_valid_config(config: CommentedMap) -> bool:
return config and config != EMPTY_CONFIG

Wyświetl plik

@ -1,25 +1,19 @@
class SetupError(ValueError):
pass
MODULE_TYPES = [
'feeder',
'extractor',
'enricher',
'database',
'storage',
'formatter'
]
MODULE_TYPES = ["feeder", "extractor", "enricher", "database", "storage", "formatter"]
MANIFEST_FILE = "__manifest__.py"
DEFAULT_MANIFEST = {
'name': '', # the display name of the module
'author': 'Bellingcat', # creator of the module, leave this as Bellingcat or set your own name!
'type': [], # the type of the module, can be one or more of MODULE_TYPES
'requires_setup': True, # whether or not this module requires additional setup such as setting API Keys or installing additional software
'description': '', # a description of the module
'dependencies': {}, # external dependencies, e.g. python packages or binaries, in dictionary format
'entry_point': '', # the entry point for the module, in the format 'module_name::ClassName'. This can be left blank to use the default entry point of module_name::ModuleName
'version': '1.0', # the version of the module
'configs': {} # any configuration options this module has, these will be exposed to the user in the config file or via the command line
"name": "", # the display name of the module
"author": "Bellingcat", # creator of the module, leave this as Bellingcat or set your own name!
"type": [], # the type of the module, can be one or more of MODULE_TYPES
"requires_setup": True, # whether or not this module requires additional setup such as setting API Keys or installing additional software
"description": "", # a description of the module
"dependencies": {}, # external dependencies, e.g. python packages or binaries, in dictionary format
"entry_point": "", # the entry point for the module, in the format 'module_name::ClassName'. This can be left blank to use the default entry point of module_name::ModuleName
"version": "1.0", # the version of the module
"configs": {}, # any configuration options this module has, these will be exposed to the user in the config file or via the command line
}

Wyświetl plik

@ -9,6 +9,7 @@ from typing import Union
from auto_archiver.core import Metadata, BaseModule
class Database(BaseModule):
"""
Base class for implementing database modules in the media archiving framework.

Wyświetl plik

@ -8,10 +8,12 @@ the archiving step and before storage or formatting.
Enrichers are optional but highly useful for making the archived data more powerful.
"""
from __future__ import annotations
from abc import abstractmethod
from auto_archiver.core import Metadata, BaseModule
class Enricher(BaseModule):
"""Base classes and utilities for enrichers in the Auto Archiver system.

Wyświetl plik

@ -5,13 +5,11 @@
"""
from __future__ import annotations
from pathlib import Path
from abc import abstractmethod
from dataclasses import dataclass
import mimetypes
import os
import mimetypes
import requests
from loguru import logger
from retrying import retry
@ -77,14 +75,14 @@ class Extractor(BaseModule):
downloads a URL to provided filename, or inferred from URL, returns local filename
"""
if not to_filename:
to_filename = url.split('/')[-1].split('?')[0]
to_filename = url.split("/")[-1].split("?")[0]
if len(to_filename) > 64:
to_filename = to_filename[-64:]
to_filename = os.path.join(self.tmp_dir, to_filename)
if verbose:
logger.debug(f"downloading {url[0:50]=} {to_filename=}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}
try:
d = requests.get(url, stream=True, headers=headers, timeout=30)
@ -92,12 +90,12 @@ class Extractor(BaseModule):
# get mimetype from the response headers
if not mimetypes.guess_type(to_filename)[0]:
content_type = d.headers.get('Content-Type') or self._guess_file_type(url)
content_type = d.headers.get("Content-Type") or self._guess_file_type(url)
extension = mimetypes.guess_extension(content_type)
if extension:
to_filename += extension
with open(to_filename, 'wb') as f:
with open(to_filename, "wb") as f:
for chunk in d.iter_content(chunk_size=8192):
f.write(chunk)
return to_filename

Wyświetl plik

@ -7,8 +7,8 @@ from abc import abstractmethod
from auto_archiver.core import Metadata
from auto_archiver.core import BaseModule
class Feeder(BaseModule):
class Feeder(BaseModule):
"""
Base class for implementing feeders in the media archiving framework.

Wyświetl plik

@ -26,6 +26,7 @@ class Media:
- properties: Additional metadata or transformations for the media.
- _mimetype: The media's mimetype (e.g., image/jpeg, video/mp4).
"""
filename: str
_key: str = None
urls: List[str] = field(default_factory=list)
@ -51,7 +52,8 @@ class Media:
This function returns a generator for all the inner media.
"""
if include_self: yield self
if include_self:
yield self
for prop in self.properties.values():
if isinstance(prop, Media):
for inner_media in prop.all_inner_media(include_self=True):
@ -113,15 +115,17 @@ class Media:
# checks for video streams with ffmpeg, or min file size for a video
# self.is_video() should be used together with this method
try:
streams = ffmpeg.probe(self.filename, select_streams='v')['streams']
streams = ffmpeg.probe(self.filename, select_streams="v")["streams"]
logger.warning(f"STREAMS FOR {self.filename} {streams}")
return any(s.get("duration_ts", 0) > 0 for s in streams)
except Error: return False # ffmpeg errors when reading bad files
except Error:
return False # ffmpeg errors when reading bad files
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
try:
fsize = os.path.getsize(self.filename)
return fsize > 20_000
except: pass
except Exception as e:
pass
return True

Wyświetl plik

@ -13,7 +13,7 @@ from __future__ import annotations
import hashlib
from typing import Any, List, Union, Dict
from dataclasses import dataclass, field
from dataclasses_json import dataclass_json, config
from dataclasses_json import dataclass_json
import datetime
from urllib.parse import urlparse
from dateutil.parser import parse as parse_dt
@ -21,6 +21,7 @@ from loguru import logger
from .media import Media
@dataclass_json # annotation order matters
@dataclass
class Metadata:
@ -40,19 +41,23 @@ class Metadata:
- If `True`, this instance's values are overwritten by `right`.
- If `False`, the inverse applies.
"""
if not right: return self
if not right:
return self
if overwrite_left:
if right.status and len(right.status):
self.status = right.status
self._context.update(right._context)
for k, v in right.metadata.items():
assert k not in self.metadata or type(v) == type(self.get(k))
if type(v) not in [dict, list, set] or k not in self.metadata:
assert k not in self.metadata or type(v) is type(self.get(k))
if not isinstance(v, (dict, list, set)) or k not in self.metadata:
self.set(k, v)
else: # key conflict
if type(v) in [dict, set]: self.set(k, self.get(k) | v)
elif type(v) == list: self.set(k, self.get(k) + v)
if isinstance(v, (dict, set)):
self.set(k, self.get(k) | v)
elif type(v) is list:
self.set(k, self.get(k) + v)
self.media.extend(right.media)
else: # invert and do same logic
return right.merge(self)
return self
@ -80,25 +85,27 @@ class Metadata:
return self.metadata.get(key, default)
def success(self, context: str = None) -> Metadata:
if context: self.status = f"{context}: success"
else: self.status = "success"
if context:
self.status = f"{context}: success"
else:
self.status = "success"
return self
def is_success(self) -> bool:
return "success" in self.status
def is_empty(self) -> bool:
meaningfull_ids = set(self.metadata.keys()) - set(["_processed_at", "url", "total_bytes", "total_size", "archive_duration_seconds"])
meaningfull_ids = set(self.metadata.keys()) - set(
["_processed_at", "url", "total_bytes", "total_size", "archive_duration_seconds"]
)
return not self.is_success() and len(self.media) == 0 and len(meaningfull_ids) == 0
@property # getter .netloc
def netloc(self) -> str:
return urlparse(self.get_url()).netloc
# custom getter/setters
def set_url(self, url: str) -> Metadata:
assert type(url) is str and len(url) > 0, "invalid URL"
return self.set("url", url)
@ -120,36 +127,43 @@ class Metadata:
return self.get("title")
def set_timestamp(self, timestamp: datetime.datetime) -> Metadata:
if type(timestamp) == str:
if isinstance(timestamp, str):
timestamp = parse_dt(timestamp)
assert type(timestamp) == datetime.datetime, "set_timestamp expects a datetime instance"
assert isinstance(timestamp, datetime.datetime), "set_timestamp expects a datetime instance"
return self.set("timestamp", timestamp)
def get_timestamp(self, utc=True, iso=True) -> datetime.datetime:
def get_timestamp(self, utc=True, iso=True) -> datetime.datetime | str | None:
ts = self.get("timestamp")
if not ts: return
if not ts:
return None
try:
if type(ts) == str: ts = datetime.datetime.fromisoformat(ts)
if type(ts) == float: ts = datetime.datetime.fromtimestamp(ts)
if utc: ts = ts.replace(tzinfo=datetime.timezone.utc)
if iso: return ts.isoformat()
return ts
if isinstance(ts, str):
ts = datetime.datetime.fromisoformat(ts)
elif isinstance(ts, float):
ts = datetime.datetime.fromtimestamp(ts)
if utc:
ts = ts.replace(tzinfo=datetime.timezone.utc)
return ts.isoformat() if iso else ts
except Exception as e:
logger.error(f"Unable to parse timestamp {ts}: {e}")
return
return None
def add_media(self, media: Media, id: str = None) -> Metadata:
# adds a new media, optionally including an id
if media is None: return
if media is None:
return
if id is not None:
assert not len([1 for m in self.media if m.get("id") == id]), f"cannot add 2 pieces of media with the same id {id}"
assert not len([1 for m in self.media if m.get("id") == id]), (
f"cannot add 2 pieces of media with the same id {id}"
)
media.set("id", id)
self.media.append(media)
return media
def get_media_by_id(self, id: str, default=None) -> Media:
for m in self.media:
if m.get("id") == id: return m
if m.get("id") == id:
return m
return default
def remove_duplicate_media_by_hash(self) -> None:
@ -159,7 +173,8 @@ class Metadata:
with open(filename, "rb") as f:
while True:
buf = f.read(chunksize)
if not buf: break
if not buf:
break
hash_algo.update(buf)
return hash_algo.hexdigest()
@ -167,15 +182,18 @@ class Metadata:
new_media = []
for m in self.media:
h = m.get("hash")
if not h: h = calculate_hash_in_chunks(hashlib.sha256(), int(1.6e7), m.filename)
if len(h) and h in media_hashes: continue
if not h:
h = calculate_hash_in_chunks(hashlib.sha256(), int(1.6e7), m.filename)
if len(h) and h in media_hashes:
continue
media_hashes.add(h)
new_media.append(m)
self.media = new_media
def get_first_image(self, default=None) -> Media:
for m in self.media:
if "image" in m.mimetype: return m
if "image" in m.mimetype:
return m
return default
def set_final_media(self, final: Media) -> Metadata:
@ -193,17 +211,20 @@ class Metadata:
def __str__(self) -> str:
return self.__repr__()
@staticmethod
def choose_most_complete(results: List[Metadata]) -> Metadata:
# returns the most complete result from a list of results
# prioritizes results with more media, then more metadata
if len(results) == 0: return None
if len(results) == 1: return results[0]
if len(results) == 0:
return None
if len(results) == 1:
return results[0]
most_complete = results[0]
for r in results[1:]:
if len(r.media) > len(most_complete.media): most_complete = r
elif len(r.media) == len(most_complete.media) and len(r.metadata) > len(most_complete.metadata): most_complete = r
if len(r.media) > len(most_complete.media):
most_complete = r
elif len(r.media) == len(most_complete.media) and len(r.metadata) > len(most_complete.metadata):
most_complete = r
return most_complete
def set_context(self, key: str, val: Any) -> Metadata:

Wyświetl plik

@ -3,6 +3,7 @@ Defines the Step abstract base class, which acts as a blueprint for steps in the
by handling user configuration, validating the steps properties, and implementing dynamic instantiation.
"""
from __future__ import annotations
from dataclasses import dataclass
@ -24,8 +25,8 @@ if TYPE_CHECKING:
HAS_SETUP_PATHS = False
class ModuleFactory:
class ModuleFactory:
def __init__(self):
self._lazy_modules = {}
@ -46,10 +47,12 @@ class ModuleFactory:
# see odoo/module/module.py -> initialize_sys_path
if path not in auto_archiver.modules.__path__:
if HAS_SETUP_PATHS == True:
logger.warning(f"You are attempting to re-initialise the module paths with: '{path}' for a 2nd time. \
if HAS_SETUP_PATHS:
logger.warning(
f"You are attempting to re-initialise the module paths with: '{path}' for a 2nd time. \
This could lead to unexpected behaviour. It is recommended to only use a single modules path. \
If you wish to load modules from different paths then load a 2nd python interpreter (e.g. using multiprocessing).")
If you wish to load modules from different paths then load a 2nd python interpreter (e.g. using multiprocessing)."
)
auto_archiver.modules.__path__.append(path)
# sort based on the length of the path, so that the longest path is last in the list
@ -81,13 +84,14 @@ class ModuleFactory:
available = self.available_modules(limit_to_modules=[module_name], suppress_warnings=suppress_warnings)
if not available:
message = f"Module '{module_name}' not found. Are you sure it's installed/exists?"
if 'archiver' in module_name:
if "archiver" in module_name:
message += f" Did you mean {module_name.replace('archiver', 'extractor')}?"
raise IndexError(message)
return available[0]
def available_modules(self, limit_to_modules: List[str]= [], suppress_warnings: bool = False) -> List[LazyBaseModule]:
def available_modules(
self, limit_to_modules: List[str] = [], suppress_warnings: bool = False
) -> List[LazyBaseModule]:
# search through all valid 'modules' paths. Default is 'modules' in the current directory
# see odoo/modules/module.py -> get_modules
@ -127,15 +131,16 @@ class ModuleFactory:
return all_modules
@dataclass
class LazyBaseModule:
"""
A lazy module class, which only loads the manifest and does not load the module itself.
This is useful for getting information about a module without actually loading it.
"""
name: str
description: str
path: str
@ -152,30 +157,30 @@ class LazyBaseModule:
@property
def type(self):
return self.manifest['type']
return self.manifest["type"]
@property
def entry_point(self):
if not self._entry_point and not self.manifest['entry_point']:
if not self._entry_point and not self.manifest["entry_point"]:
# try to create the entry point from the module name
self._entry_point = f"{self.name}::{self.name.replace('_', ' ').title().replace(' ', '')}"
return self._entry_point
@property
def dependencies(self) -> dict:
return self.manifest['dependencies']
return self.manifest["dependencies"]
@property
def configs(self) -> dict:
return self.manifest['configs']
return self.manifest["configs"]
@property
def requires_setup(self) -> bool:
return self.manifest['requires_setup']
return self.manifest["requires_setup"]
@property
def display_name(self) -> str:
return self.manifest['name']
return self.manifest["name"]
@property
def manifest(self) -> dict:
@ -189,17 +194,16 @@ class LazyBaseModule:
try:
manifest.update(ast.literal_eval(f.read()))
except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError) as e:
raise ValueError(f"Error loading manifest from file {self.path}/{MANIFEST_FILE}: {e}")
raise ValueError(f"Error loading manifest from file {self.path}/{MANIFEST_FILE}: {e}") from e
self._manifest = manifest
self._entry_point = manifest['entry_point']
self.description = manifest['description']
self.version = manifest['version']
self._entry_point = manifest["entry_point"]
self.description = manifest["description"]
self.version = manifest["version"]
return manifest
def load(self, config) -> BaseModule:
if self._instance:
return self._instance
@ -210,8 +214,10 @@ class LazyBaseModule:
# clear out any empty strings that a user may have erroneously added
continue
if not check(dep):
logger.error(f"Module '{self.name}' requires external dependency '{dep}' which is not available/setup. \
Have you installed the required dependencies for the '{self.name}' module? See the README for more information.")
logger.error(
f"Module '{self.name}' requires external dependency '{dep}' which is not available/setup. \
Have you installed the required dependencies for the '{self.name}' module? See the README for more information."
)
exit(1)
def check_python_dep(dep):
@ -222,7 +228,7 @@ class LazyBaseModule:
# we must now load this module and set it up with the config
m.load(config)
return True
except:
except Exception:
logger.error(f"Unable to setup module '{dep}' for use in module '{self.name}'")
return False
except IndexError:
@ -231,13 +237,12 @@ class LazyBaseModule:
return find_spec(dep)
check_deps(self.dependencies.get('python', []), check_python_dep)
check_deps(self.dependencies.get('bin', []), lambda dep: shutil.which(dep))
check_deps(self.dependencies.get("python", []), check_python_dep)
check_deps(self.dependencies.get("bin", []), lambda dep: shutil.which(dep))
logger.debug(f"Loading module '{self.display_name}'...")
for qualname in [self.name, f'auto_archiver.modules.{self.name}']:
for qualname in [self.name, f"auto_archiver.modules.{self.name}"]:
try:
# first import the whole module, to make sure it's working properly
__import__(qualname)
@ -246,10 +251,10 @@ class LazyBaseModule:
pass
# then import the file for the entry point
file_name, class_name = self.entry_point.split('::')
sub_qualname = f'{qualname}.{file_name}'
file_name, class_name = self.entry_point.split("::")
sub_qualname = f"{qualname}.{file_name}"
__import__(f'{qualname}.{file_name}', fromlist=[self.entry_point])
__import__(f"{qualname}.{file_name}", fromlist=[self.entry_point])
# finally, get the class instance
instance: BaseModule = getattr(sys.modules[sub_qualname], class_name)()
@ -259,7 +264,7 @@ class LazyBaseModule:
instance.module_factory = self.module_factory
# merge the default config with the user config
default_config = dict((k, v['default']) for k, v in self.configs.items() if 'default' in v)
default_config = dict((k, v["default"]) for k, v in self.configs.items() if "default" in v)
config[self.name] = default_config | config.get(self.name, {})
instance.config_setup(config)

Wyświetl plik

@ -19,8 +19,17 @@ import requests
from .metadata import Metadata, Media
from auto_archiver.version import __version__
from .config import read_yaml, store_yaml, to_dot_notation, merge_dicts, is_valid_config, \
DefaultValidatingParser, UniqueAppendAction, AuthenticationJsonParseAction, DEFAULT_CONFIG_FILE
from .config import (
read_yaml,
store_yaml,
to_dot_notation,
merge_dicts,
is_valid_config,
DefaultValidatingParser,
UniqueAppendAction,
AuthenticationJsonParseAction,
DEFAULT_CONFIG_FILE,
)
from .module import ModuleFactory, LazyBaseModule
from . import validators, Feeder, Extractor, Database, Storage, Formatter, Enricher
from .consts import MODULE_TYPES, SetupError
@ -30,8 +39,8 @@ if TYPE_CHECKING:
from .base_module import BaseModule
from .module import LazyBaseModule
class ArchivingOrchestrator:
class ArchivingOrchestrator:
# instance variables
module_factory: ModuleFactory
setup_finished: bool
@ -61,30 +70,63 @@ class ArchivingOrchestrator:
epilog="Check the code at https://github.com/bellingcat/auto-archiver",
formatter_class=RichHelpFormatter,
)
parser.add_argument('--help', '-h', action='store_true', dest='help', help='show a full help message and exit')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--config', action='store', dest="config_file", help='the filename of the YAML configuration file (defaults to \'config.yaml\')', default=DEFAULT_CONFIG_FILE)
parser.add_argument('--mode', action='store', dest='mode', type=str, choices=['simple', 'full'], help='the mode to run the archiver in', default='simple')
parser.add_argument("--help", "-h", action="store_true", dest="help", help="show a full help message and exit")
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"--config",
action="store",
dest="config_file",
help="the filename of the YAML configuration file (defaults to 'config.yaml')",
default=DEFAULT_CONFIG_FILE,
)
parser.add_argument(
"--mode",
action="store",
dest="mode",
type=str,
choices=["simple", "full"],
help="the mode to run the archiver in",
default="simple",
)
# override the default 'help' so we can inject all the configs and show those
parser.add_argument('-s', '--store', dest='store', default=False, help='Store the created config in the config file', action=argparse.BooleanOptionalAction)
parser.add_argument('--module_paths', dest='module_paths', nargs='+', default=[], help='additional paths to search for modules', action=UniqueAppendAction)
parser.add_argument(
"-s",
"--store",
dest="store",
default=False,
help="Store the created config in the config file",
action=argparse.BooleanOptionalAction,
)
parser.add_argument(
"--module_paths",
dest="module_paths",
nargs="+",
default=[],
help="additional paths to search for modules",
action=UniqueAppendAction,
)
self.basic_parser = parser
return parser
def check_steps(self, config):
for module_type in MODULE_TYPES:
if not config['steps'].get(f"{module_type}s", []):
if module_type == 'feeder' or module_type == 'formatter' and config['steps'].get(f"{module_type}"):
raise SetupError(f"It appears you have '{module_type}' set under 'steps' in your configuration file, but as of version 0.13.0 of Auto Archiver, you must use '{module_type}s'. Change this in your configuration file and try again. \
Here's how that would look: \n\nsteps:\n {module_type}s:\n - [your_{module_type}_name_here]\n {'extractors:...' if module_type == 'feeder' else '...'}\n")
if module_type == 'extractor' and config['steps'].get('archivers'):
raise SetupError(f"As of version 0.13.0 of Auto Archiver, the 'archivers' step name has been changed to 'extractors'. Change this in your configuration file and try again. \
Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_here]\n enrichers:...\n")
raise SetupError(f"No {module_type}s were configured. Make sure to set at least one {module_type} in your configuration file or on the command line (using --{module_type}s)")
if not config["steps"].get(f"{module_type}s", []):
if module_type == "feeder" or module_type == "formatter" and config["steps"].get(f"{module_type}"):
raise SetupError(
f"It appears you have '{module_type}' set under 'steps' in your configuration file, but as of version 0.13.0 of Auto Archiver, you must use '{module_type}s'. Change this in your configuration file and try again. \
Here's how that would look: \n\nsteps:\n {module_type}s:\n - [your_{module_type}_name_here]\n {'extractors:...' if module_type == 'feeder' else '...'}\n"
)
if module_type == "extractor" and config["steps"].get("archivers"):
raise SetupError(
"As of version 0.13.0 of Auto Archiver, the 'archivers' step name has been changed to 'extractors'. Change this in your configuration file and try again. \
Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_here]\n enrichers:...\n"
)
raise SetupError(
f"No {module_type}s were configured. Make sure to set at least one {module_type} in your configuration file or on the command line (using --{module_type}s)"
)
def setup_complete_parser(self, basic_config: dict, yaml_config: dict, unused_args: list[str]) -> None:
# modules parser to get the overridden 'steps' values
modules_parser = argparse.ArgumentParser(
add_help=False,
@ -92,7 +134,9 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
self.add_modules_args(modules_parser)
cli_modules, unused_args = modules_parser.parse_known_args(unused_args)
for module_type in MODULE_TYPES:
yaml_config['steps'][f"{module_type}s"] = getattr(cli_modules, f"{module_type}s", []) or yaml_config['steps'].get(f"{module_type}s", [])
yaml_config["steps"][f"{module_type}s"] = getattr(cli_modules, f"{module_type}s", []) or yaml_config[
"steps"
].get(f"{module_type}s", [])
parser = DefaultValidatingParser(
add_help=False,
@ -115,27 +159,29 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
enabled_modules = []
# first loads the modules from the config file, then from the command line
for module_type in MODULE_TYPES:
enabled_modules.extend(yaml_config['steps'].get(f"{module_type}s", []))
enabled_modules.extend(yaml_config["steps"].get(f"{module_type}s", []))
# clear out duplicates, but keep the order
enabled_modules = list(dict.fromkeys(enabled_modules))
avail_modules = self.module_factory.available_modules(limit_to_modules=enabled_modules, suppress_warnings=True)
avail_modules = self.module_factory.available_modules(
limit_to_modules=enabled_modules, suppress_warnings=True
)
self.add_individual_module_args(avail_modules, parser)
elif basic_config.mode == 'simple':
elif basic_config.mode == "simple":
simple_modules = [module for module in self.module_factory.available_modules() if not module.requires_setup]
self.add_individual_module_args(simple_modules, parser)
# add them to the config
for module in simple_modules:
for module_type in module.type:
yaml_config['steps'].setdefault(f"{module_type}s", []).append(module.name)
yaml_config["steps"].setdefault(f"{module_type}s", []).append(module.name)
else:
# load all modules, they're not using the 'simple' mode
all_modules = self.module_factory.available_modules()
# add all the modules to the steps
for module in all_modules:
for module_type in module.type:
yaml_config['steps'].setdefault(f"{module_type}s", []).append(module.name)
yaml_config["steps"].setdefault(f"{module_type}s", []).append(module.name)
self.add_individual_module_args(all_modules, parser)
@ -171,34 +217,67 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
# Module loading from the command line
for module_type in MODULE_TYPES:
parser.add_argument(f'--{module_type}s', dest=f'{module_type}s', nargs='+', help=f'the {module_type}s to use', default=[], action=UniqueAppendAction)
parser.add_argument(
f"--{module_type}s",
dest=f"{module_type}s",
nargs="+",
help=f"the {module_type}s to use",
default=[],
action=UniqueAppendAction,
)
def add_additional_args(self, parser: argparse.ArgumentParser = None):
if not parser:
parser = self.parser
parser.add_argument('--authentication', dest='authentication', help='A dictionary of sites and their authentication methods \
parser.add_argument(
"--authentication",
dest="authentication",
help="A dictionary of sites and their authentication methods \
(token, username etc.) that extractors can use to log into \
a website. If passing this on the command line, use a JSON string. \
You may also pass a path to a valid JSON/YAML file which will be parsed.',
You may also pass a path to a valid JSON/YAML file which will be parsed.",
default={},
nargs="?",
action=AuthenticationJsonParseAction)
action=AuthenticationJsonParseAction,
)
# logging arguments
parser.add_argument('--logging.level', action='store', dest='logging.level', choices=['INFO', 'DEBUG', 'ERROR', 'WARNING'], help='the logging level to use', default='INFO', type=str.upper)
parser.add_argument('--logging.file', action='store', dest='logging.file', help='the logging file to write to', default=None)
parser.add_argument('--logging.rotation', action='store', dest='logging.rotation', help='the logging rotation to use', default=None)
def add_individual_module_args(self, modules: list[LazyBaseModule] = None, parser: argparse.ArgumentParser = None) -> None:
parser.add_argument(
"--logging.level",
action="store",
dest="logging.level",
choices=["INFO", "DEBUG", "ERROR", "WARNING"],
help="the logging level to use",
default="INFO",
type=str.upper,
)
parser.add_argument(
"--logging.file", action="store", dest="logging.file", help="the logging file to write to", default=None
)
parser.add_argument(
"--logging.rotation",
action="store",
dest="logging.rotation",
help="the logging rotation to use",
default=None,
)
def add_individual_module_args(
self, modules: list[LazyBaseModule] = None, parser: argparse.ArgumentParser = None
) -> None:
if not modules:
modules = self.module_factory.available_modules()
for module in modules:
if module.name == 'cli_feeder':
if module.name == "cli_feeder":
# special case. For the CLI feeder, allow passing URLs directly on the command line without setting --cli_feeder.urls=
parser.add_argument('urls', nargs='*', default=[], help='URL(s) to archive, either a single URL or a list of urls, should not come from config.yaml')
parser.add_argument(
"urls",
nargs="*",
default=[],
help="URL(s) to archive, either a single URL or a list of urls, should not come from config.yaml",
)
continue
if not module.configs:
@ -209,21 +288,21 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
group = parser.add_argument_group(module.display_name or module.name, f"{module.description[:100]}...")
for name, kwargs in module.configs.items():
if not kwargs.get('metavar', None):
if not kwargs.get("metavar", None):
# make a nicer metavar, metavar is what's used in the help, e.g. --cli_feeder.urls [METAVAR]
kwargs['metavar'] = name.upper()
kwargs["metavar"] = name.upper()
if kwargs.get('required', False):
if kwargs.get("required", False):
# required args shouldn't have a 'default' value, remove it
kwargs.pop('default', None)
kwargs.pop("default", None)
kwargs.pop('cli_set', None)
should_store = kwargs.pop('should_store', False)
kwargs['dest'] = f"{module.name}.{kwargs.pop('dest', name)}"
kwargs.pop("cli_set", None)
should_store = kwargs.pop("should_store", False)
kwargs["dest"] = f"{module.name}.{kwargs.pop('dest', name)}"
try:
kwargs['type'] = getattr(validators, kwargs.get('type', '__invalid__'))
kwargs["type"] = getattr(validators, kwargs.get("type", "__invalid__"))
except AttributeError:
kwargs['type'] = __builtins__.get(kwargs.get('type'), str)
kwargs["type"] = __builtins__.get(kwargs.get("type"), str)
arg = group.add_argument(f"--{module.name}.{name}", **kwargs)
arg.should_store = should_store
@ -238,12 +317,11 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
self.basic_parser.exit()
def setup_logging(self, config):
logging_config = config["logging"]
logging_config = config['logging']
if logging_config.get('enabled', True) is False:
if logging_config.get("enabled", True) is False:
# disabled logging settings, they're set on a higher level
logger.disable('auto_archiver')
logger.disable("auto_archiver")
return
# setup loguru logging
@ -254,9 +332,11 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
# add other logging info
if self.logger_id is None: # note - need direct comparison to None since need to consider falsy value 0
self.logger_id = logger.add(sys.stderr, level=logging_config['level'])
if log_file := logging_config['file']:
logger.add(log_file) if not logging_config['rotation'] else logger.add(log_file, rotation=logging_config['rotation'])
self.logger_id = logger.add(sys.stderr, level=logging_config["level"])
if log_file := logging_config["file"]:
logger.add(log_file) if not logging_config["rotation"] else logger.add(
log_file, rotation=logging_config["rotation"]
)
def install_modules(self, modules_by_type):
"""
@ -267,24 +347,29 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
invalid_modules = []
for module_type in MODULE_TYPES:
step_items = []
modules_to_load = modules_by_type[f"{module_type}s"]
if not modules_to_load:
raise SetupError(f"No {module_type}s were configured. Make sure to set at least one {module_type} in your configuration file or on the command line (using --{module_type}s)")
raise SetupError(
f"No {module_type}s were configured. Make sure to set at least one {module_type} in your configuration file or on the command line (using --{module_type}s)"
)
def check_steps_ok():
if not len(step_items):
if len(modules_to_load):
logger.error(f"Unable to load any {module_type}s. Tried the following, but none were available: {modules_to_load}")
raise SetupError(f"NO {module_type.upper()}S LOADED. Please check your configuration and try again.")
logger.error(
f"Unable to load any {module_type}s. Tried the following, but none were available: {modules_to_load}"
)
raise SetupError(
f"NO {module_type.upper()}S LOADED. Please check your configuration and try again."
)
if (module_type == 'feeder' or module_type == 'formatter') and len(step_items) > 1:
raise SetupError(f"Only one {module_type} is allowed, found {len(step_items)} {module_type}s. Please remove one of the following from your configuration file: {modules_to_load}")
if (module_type == "feeder" or module_type == "formatter") and len(step_items) > 1:
raise SetupError(
f"Only one {module_type} is allowed, found {len(step_items)} {module_type}s. Please remove one of the following from your configuration file: {modules_to_load}"
)
for module in modules_to_load:
if module in invalid_modules:
continue
@ -293,7 +378,7 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
loaded_module: BaseModule = self.module_factory.get_module(module, self.config)
except (KeyboardInterrupt, Exception) as e:
logger.error(f"Error during setup of modules: {e}\n{traceback.format_exc()}")
if loaded_module and module_type == 'extractor':
if loaded_module and module_type == "extractor":
loaded_module.cleanup()
raise e
@ -308,7 +393,9 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
def load_config(self, config_file: str) -> dict:
if not os.path.exists(config_file) and config_file != DEFAULT_CONFIG_FILE:
logger.error(f"The configuration file {config_file} was not found. Make sure the file exists and try again, or run without the --config file to use the default settings.")
logger.error(
f"The configuration file {config_file} was not found. Make sure the file exists and try again, or run without the --config file to use the default settings."
)
raise FileNotFoundError(f"Configuration file {config_file} not found")
return read_yaml(config_file)
@ -338,10 +425,10 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
def check_for_updates(self):
response = requests.get("https://pypi.org/pypi/auto-archiver/json").json()
latest_version = response['info']['version']
latest_version = response["info"]["version"]
# check version compared to current version
if latest_version != __version__:
if os.environ.get('RUNNING_IN_DOCKER'):
if os.environ.get("RUNNING_IN_DOCKER"):
update_cmd = "`docker pull bellingcat/auto-archiver:latest`"
else:
update_cmd = "`pip install --upgrade auto-archiver`"
@ -351,7 +438,6 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
logger.warning(f"Make sure to update to the latest version using: {update_cmd}")
logger.warning("")
def setup(self, args: list):
"""
Function to configure all setup of the orchestrator: setup configs and load modules.
@ -362,21 +448,25 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
self.check_for_updates()
if self.setup_finished:
logger.warning("The `setup_config()` function should only ever be run once. \
logger.warning(
"The `setup_config()` function should only ever be run once. \
If you need to re-run the setup, please re-instantiate a new instance of the orchestrator. \
For code implementatations, you should call .setup_config() once then you may call .feed() \
multiple times to archive multiple URLs.")
multiple times to archive multiple URLs."
)
return
self.setup_basic_parser()
self.config = self.setup_config(args)
logger.info(f"======== Welcome to the AUTO ARCHIVER ({__version__}) ==========")
self.install_modules(self.config['steps'])
self.install_modules(self.config["steps"])
# log out the modules that were loaded
for module_type in MODULE_TYPES:
logger.info(f"{module_type.upper()}S: " + ", ".join(m.display_name for m in getattr(self, f"{module_type}s")))
logger.info(
f"{module_type.upper()}S: " + ", ".join(m.display_name for m in getattr(self, f"{module_type}s"))
)
self.setup_finished = True
@ -405,7 +495,6 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
e.cleanup()
def feed(self) -> Generator[Metadata]:
url_count = 0
for feeder in self.feeders:
for item in feeder:
@ -436,9 +525,9 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
self.cleanup()
exit()
except Exception as e:
logger.error(f'Got unexpected error on item {item}: {e}\n{traceback.format_exc()}')
logger.error(f"Got unexpected error on item {item}: {e}\n{traceback.format_exc()}")
for d in self.databases:
if type(e) == AssertionError:
if isinstance(e, AssertionError):
d.failed(item, str(e))
else:
d.failed(item, reason="unexpected error")
@ -473,7 +562,8 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
url = a.sanitize_url(url)
result.set_url(url)
if original_url != url: result.set("original_url", original_url)
if original_url != url:
result.set("original_url", original_url)
# 2 - notify start to DBs, propagate already archived if feature enabled in DBs
cached_result = None
@ -484,7 +574,8 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
if cached_result:
logger.debug("Found previously archived entry")
for d in self.databases:
try: d.done(cached_result, cached=True)
try:
d.done(cached_result, cached=True)
except Exception as e:
logger.error(f"ERROR database {d.name}: {e}: {traceback.format_exc()}")
return cached_result
@ -494,13 +585,15 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
logger.info(f"Trying extractor {a.name} for {url}")
try:
result.merge(a.download(result))
if result.is_success(): break
if result.is_success():
break
except Exception as e:
logger.error(f"ERROR archiver {a.name}: {e}: {traceback.format_exc()}")
# 4 - call enrichers to work with archived content
for e in self.enrichers:
try: e.enrich(result)
try:
e.enrich(result)
except Exception as exc:
logger.error(f"ERROR enricher {e.name}: {exc}: {traceback.format_exc()}")
@ -518,13 +611,13 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
# signal completion to databases and archivers
for d in self.databases:
try: d.done(result)
try:
d.done(result)
except Exception as e:
logger.error(f"ERROR database {d.name}: {e}: {traceback.format_exc()}")
return result
def setup_authentication(self, config: dict) -> dict:
"""
Setup authentication for all modules that require it
@ -532,7 +625,7 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
Split up strings into multiple sites if they are comma separated
"""
authentication = config.get('authentication', {})
authentication = config.get("authentication", {})
# extract out concatenated sites
for key, val in copy(authentication).items():
@ -542,7 +635,7 @@ Here's how that would look: \n\nsteps:\n extractors:\n - [your_extractor_name_
authentication[site] = val
del authentication[key]
config['authentication'] = authentication
config["authentication"] = authentication
return config
# Helper Properties

Wyświetl plik

@ -23,7 +23,6 @@ from __future__ import annotations
from abc import abstractmethod
from typing import IO
import os
import platform
from loguru import logger
from slugify import slugify
@ -33,8 +32,8 @@ from auto_archiver.utils.misc import random_str
from auto_archiver.core import Media, BaseModule, Metadata
from auto_archiver.modules.hash_enricher.hash_enricher import HashEnricher
class Storage(BaseModule):
class Storage(BaseModule):
"""
Base class for implementing storage modules in the media archiving framework.
@ -74,8 +73,8 @@ class Storage(BaseModule):
This method should not be called directly, but instead be called through the 'store' method,
which sets up the media for storage.
"""
logger.debug(f'[{self.__class__.__name__}] storing file {media.filename} with key {media.key}')
with open(media.filename, 'rb') as f:
logger.debug(f"[{self.__class__.__name__}] storing file {media.filename} with key {media.key}")
with open(media.filename, "rb") as f:
return self.uploadf(f, media, **kwargs)
def set_key(self, media: Media, url: str, metadata: Metadata) -> None:
@ -85,7 +84,7 @@ class Storage(BaseModule):
# media key is already set
return
folder = metadata.get_context('folder', '')
folder = metadata.get_context("folder", "")
filename, ext = os.path.splitext(media.filename)
# Handle path_generator logic
@ -105,12 +104,11 @@ class Storage(BaseModule):
filename = random_str(24)
elif filename_generator == "static":
# load the hash_enricher module
he = self.module_factory.get_module("hash_enricher", self.config)
he: HashEnricher = self.module_factory.get_module("hash_enricher", self.config)
hd = he.calculate_hash(media.filename)
filename = hd[:24]
else:
raise ValueError(f"Invalid filename_generator: {filename_generator}")
key = os.path.join(folder, path, f"{filename}{ext}")
media._key = key

Wyświetl plik

@ -3,11 +3,13 @@ from pathlib import Path
import argparse
import json
def example_validator(value):
if "example" not in value:
raise argparse.ArgumentTypeError(f"{value} is not a valid value for this argument")
return value
def positive_number(value):
if value < 0:
raise argparse.ArgumentTypeError(f"{value} is not a positive number")
@ -19,5 +21,6 @@ def valid_file(value):
raise argparse.ArgumentTypeError(f"File '{value}' does not exist.")
return value
def json_loader(cli_val):
return json.loads(cli_val)

Wyświetl plik

@ -11,8 +11,7 @@
"required": True,
"help": "API endpoint where calls are made to",
},
"api_token": {"default": None,
"help": "API Bearer token."},
"api_token": {"default": None, "help": "API Bearer token."},
"public": {
"default": False,
"type": "bool",

Wyświetl plik

@ -15,7 +15,8 @@ class AAApiDb(Database):
"""query the database for the existence of this item.
Helps avoid re-archiving the same URL multiple times.
"""
if not self.use_api_cache: return
if not self.use_api_cache:
return
params = {"url": item.get_url(), "limit": 15}
headers = {"Authorization": f"Bearer {self.api_token}", "accept": "application/json"}
@ -32,22 +33,25 @@ class AAApiDb(Database):
def done(self, item: Metadata, cached: bool = False) -> None:
"""archival result ready - should be saved to DB"""
if not self.store_results: return
if not self.store_results:
return
if cached:
logger.debug(f"skipping saving archive of {item.get_url()} to the AA API because it was cached")
return
logger.debug(f"saving archive of {item.get_url()} to the AA API.")
payload = {
'author_id': self.author_id,
'url': item.get_url(),
'public': self.public,
'group_id': self.group_id,
'tags': list(self.tags),
'result': item.to_json(),
"author_id": self.author_id,
"url": item.get_url(),
"public": self.public,
"group_id": self.group_id,
"tags": list(self.tags),
"result": item.to_json(),
}
headers = {"Authorization": f"Bearer {self.api_token}"}
response = requests.post(os.path.join(self.api_endpoint, "interop/submit-archive"), json=payload, headers=headers)
response = requests.post(
os.path.join(self.api_endpoint, "interop/submit-archive"), json=payload, headers=headers
)
if response.status_code == 201:
logger.success(f"AA API: {response.json()}")

Wyświetl plik

@ -15,7 +15,7 @@
"atlos_url": {
"default": "https://platform.atlos.org",
"help": "The URL of your Atlos instance (e.g., https://platform.atlos.org), without a trailing slash.",
"type": "str"
"type": "str",
},
},
"description": """
@ -42,5 +42,5 @@
- Requires an Atlos account with a project and a valid API token for authentication.
- Ensures only unprocessed, visible, and ready-to-archive URLs are returned.
- Feches any media items within an Atlos project, regardless of separation into incidents.
"""
""",
}

Wyświetl plik

@ -10,7 +10,6 @@ from auto_archiver.utils import calculate_file_hash
class AtlosFeederDbStorage(Feeder, Database, Storage):
def setup(self) -> requests.Session:
"""create and return a persistent session."""
self.session = requests.Session()
@ -18,9 +17,7 @@ class AtlosFeederDbStorage(Feeder, Database, Storage):
def _get(self, endpoint: str, params: Optional[dict] = None) -> dict:
"""Wrapper for GET requests to the Atlos API."""
url = f"{self.atlos_url}{endpoint}"
response = self.session.get(
url, headers={"Authorization": f"Bearer {self.api_token}"}, params=params
)
response = self.session.get(url, headers={"Authorization": f"Bearer {self.api_token}"}, params=params)
response.raise_for_status()
return response.json()
@ -85,10 +82,7 @@ class AtlosFeederDbStorage(Feeder, Database, Storage):
def _process_metadata(self, item: Metadata) -> dict:
"""Process metadata for storage on Atlos. Will convert any datetime
objects to ISO format."""
return {
k: v.isoformat() if hasattr(v, "isoformat") else v
for k, v in item.metadata.items()
}
return {k: v.isoformat() if hasattr(v, "isoformat") else v for k, v in item.metadata.items()}
def done(self, item: Metadata, cached: bool = False) -> None:
"""Mark an item as successfully archived in Atlos."""
@ -129,10 +123,7 @@ class AtlosFeederDbStorage(Feeder, Database, Storage):
# Check whether the media has already been uploaded
source_material = self._get(f"/api/v2/source_material/{atlos_id}")["result"]
existing_media = [
artifact.get("file_hash_sha256")
for artifact in source_material.get("artifacts", [])
]
existing_media = [artifact.get("file_hash_sha256") for artifact in source_material.get("artifacts", [])]
if media_hash in existing_media:
logger.info(f"{media.filename} with SHA256 {media_hash} already uploaded to Atlos")
return True
@ -150,4 +141,3 @@ class AtlosFeederDbStorage(Feeder, Database, Storage):
def uploadf(self, file: IO[bytes], key: str, **kwargs: dict) -> bool:
"""Upload a file-like object; not implemented."""
pass

Wyświetl plik

@ -1,16 +1,15 @@
{
'name': 'Command Line Feeder',
'type': ['feeder'],
'entry_point': 'cli_feeder::CLIFeeder',
'requires_setup': False,
'description': 'Feeds URLs to orchestrator from the command line',
'configs': {
'urls': {
'default': None,
'help': 'URL(s) to archive, either a single URL or a list of urls, should not come from config.yaml',
"name": "Command Line Feeder",
"type": ["feeder"],
"entry_point": "cli_feeder::CLIFeeder",
"requires_setup": False,
"configs": {
"urls": {
"default": None,
"help": "URL(s) to archive, either a single URL or a list of urls, should not come from config.yaml",
},
},
'description': """
"description": """
The Command Line Feeder is the default enabled feeder for the Auto Archiver. It allows you to pass URLs directly to the orchestrator from the command line
without the need to specify any additional configuration or command line arguments:

Wyświetl plik

@ -3,15 +3,17 @@ from loguru import logger
from auto_archiver.core.feeder import Feeder
from auto_archiver.core.metadata import Metadata
class CLIFeeder(Feeder):
class CLIFeeder(Feeder):
def setup(self) -> None:
self.urls = self.config['urls']
self.urls = self.config["urls"]
if not self.urls:
raise ValueError("No URLs provided. Please provide at least one URL via the command line, or set up an alternative feeder. Use --help for more information.")
raise ValueError(
"No URLs provided. Please provide at least one URL via the command line, or set up an alternative feeder. Use --help for more information."
)
def __iter__(self) -> Metadata:
urls = self.config['urls']
urls = self.config["urls"]
for url in urls:
logger.debug(f"Processing {url}")
m = Metadata().set_url(url)

Wyświetl plik

@ -2,9 +2,8 @@
"name": "CSV Database",
"type": ["database"],
"requires_setup": False,
"dependencies": {"python": ["loguru"]
},
'entry_point': 'csv_db::CSVDb',
"dependencies": {"python": ["loguru"]},
"entry_point": "csv_db::CSVDb",
"configs": {
"csv_file": {"default": "db.csv", "help": "CSV file name to save metadata to"},
},

Wyświetl plik

@ -18,5 +18,6 @@ class CSVDb(Database):
is_empty = not os.path.isfile(self.csv_file) or os.path.getsize(self.csv_file) == 0
with open(self.csv_file, "a", encoding="utf-8") as outf:
writer = DictWriter(outf, fieldnames=asdict(Metadata()))
if is_empty: writer.writeheader()
if is_empty:
writer.writeheader()
writer.writerow(asdict(item))

Wyświetl plik

@ -1,13 +1,9 @@
{
"name": "CSV Feeder",
"type": ["feeder"],
"requires_setup": False,
"dependencies": {
"python": ["loguru"],
"bin": [""]
},
'requires_setup': True,
'entry_point': "csv_feeder::CSVFeeder",
"dependencies": {"python": ["loguru"], "bin": [""]},
"requires_setup": True,
"entry_point": "csv_feeder::CSVFeeder",
"configs": {
"files": {
"default": None,
@ -20,7 +16,7 @@
"column": {
"default": None,
"help": "Column number or name to read the URLs from, 0-indexed",
}
},
},
"description": """
Reads URLs from CSV files and feeds them into the archiving process.
@ -33,5 +29,5 @@
### Setup
- Input files should be formatted with one URL per line, with or without a header row.
- If you have a header row, you can specify the column number or name to read URLs from using the 'column' config option.
"""
""",
}

Wyświetl plik

@ -5,11 +5,10 @@ from auto_archiver.core import Feeder
from auto_archiver.core import Metadata
from auto_archiver.utils import url_or_none
class CSVFeeder(Feeder):
column = None
def __iter__(self) -> Metadata:
for file in self.files:
with open(file, "r") as f:
@ -20,7 +19,9 @@ class CSVFeeder(Feeder):
try:
url_column = first_row.index(url_column)
except ValueError:
logger.error(f"Column {url_column} not found in header row: {first_row}. Did you set the 'column' config correctly?")
logger.error(
f"Column {url_column} not found in header row: {first_row}. Did you set the 'column' config correctly?"
)
return
elif not (url_or_none(first_row[url_column])):
# it's a header row, but we've been given a column number already

Wyświetl plik

@ -22,11 +22,18 @@
"help": "how to name stored files: 'random' creates a random string; 'static' uses a hash, with the settings of the 'hash_enricher' module (defaults to SHA256 if not enabled).",
"choices": ["random", "static"],
},
"root_folder_id": {"required": True,
"help": "root google drive folder ID to use as storage, found in URL: 'https://drive.google.com/drive/folders/FOLDER_ID'"},
"oauth_token": {"default": None,
"help": "JSON filename with Google Drive OAuth token: check auto-archiver repository scripts folder for create_update_gdrive_oauth_token.py. NOTE: storage used will count towards owner of GDrive folder, therefore it is best to use oauth_token_filename over service_account."},
"service_account": {"default": "secrets/service_account.json", "help": "service account JSON file path, same as used for Google Sheets. NOTE: storage used will count towards the developer account."},
"root_folder_id": {
"required": True,
"help": "root google drive folder ID to use as storage, found in URL: 'https://drive.google.com/drive/folders/FOLDER_ID'",
},
"oauth_token": {
"default": None,
"help": "JSON filename with Google Drive OAuth token: check auto-archiver repository scripts folder for create_update_gdrive_oauth_token.py. NOTE: storage used will count towards owner of GDrive folder, therefore it is best to use oauth_token_filename over service_account.",
},
"service_account": {
"default": "secrets/service_account.json",
"help": "service account JSON file path, same as used for Google Sheets. NOTE: storage used will count towards the developer account.",
},
},
"description": """
@ -94,5 +101,5 @@ This module integrates Google Drive as a storage backend, enabling automatic fol
https://davemateer.com/2022/04/28/google-drive-with-python#tokens
"""
""",
}

Wyświetl plik

@ -1,4 +1,3 @@
import json
import os
import time
@ -15,12 +14,9 @@ from auto_archiver.core import Media
from auto_archiver.core import Storage
class GDriveStorage(Storage):
def setup(self) -> None:
self.scopes = ['https://www.googleapis.com/auth/drive']
self.scopes = ["https://www.googleapis.com/auth/drive"]
# Initialize Google Drive service
self._setup_google_drive_service()
@ -37,25 +33,25 @@ class GDriveStorage(Storage):
def _initialize_with_oauth_token(self):
"""Initialize Google Drive service with OAuth token."""
with open(self.oauth_token, 'r') as stream:
with open(self.oauth_token, "r") as stream:
creds_json = json.load(stream)
creds_json['refresh_token'] = creds_json.get("refresh_token", "")
creds_json["refresh_token"] = creds_json.get("refresh_token", "")
creds = Credentials.from_authorized_user_info(creds_json, self.scopes)
if not creds.valid and creds.expired and creds.refresh_token:
creds.refresh(Request())
with open(self.oauth_token, 'w') as token_file:
with open(self.oauth_token, "w") as token_file:
logger.debug("Saving refreshed OAuth token.")
token_file.write(creds.to_json())
elif not creds.valid:
raise ValueError("Invalid OAuth token. Please regenerate the token.")
return build('drive', 'v3', credentials=creds)
return build("drive", "v3", credentials=creds)
def _initialize_with_service_account(self):
"""Initialize Google Drive service with service account."""
creds = service_account.Credentials.from_service_account_file(self.service_account, scopes=self.scopes)
return build('drive', 'v3', credentials=creds)
return build("drive", "v3", credentials=creds)
def get_cdn_url(self, media: Media) -> str:
"""
@ -79,7 +75,7 @@ class GDriveStorage(Storage):
return f"https://drive.google.com/file/d/{file_id}/view?usp=sharing"
def upload(self, media: Media, **kwargs) -> bool:
logger.debug(f'[{self.__class__.__name__}] storing file {media.filename} with key {media.key}')
logger.debug(f"[{self.__class__.__name__}] storing file {media.filename} with key {media.key}")
"""
1. for each sub-folder in the path check if exists or create
2. upload file to root_id/other_paths.../filename
@ -95,25 +91,30 @@ class GDriveStorage(Storage):
parent_id = upload_to
# upload file to gd
logger.debug(f'uploading {filename=} to folder id {upload_to}')
file_metadata = {
'name': [filename],
'parents': [upload_to]
}
logger.debug(f"uploading {filename=} to folder id {upload_to}")
file_metadata = {"name": [filename], "parents": [upload_to]}
media = MediaFileUpload(media.filename, resumable=True)
gd_file = self.service.files().create(supportsAllDrives=True, body=file_metadata, media_body=media, fields='id').execute()
logger.debug(f'uploadf: uploaded file {gd_file["id"]} successfully in folder={upload_to}')
gd_file = (
self.service.files()
.create(supportsAllDrives=True, body=file_metadata, media_body=media, fields="id")
.execute()
)
logger.debug(f"uploadf: uploaded file {gd_file['id']} successfully in folder={upload_to}")
# must be implemented even if unused
def uploadf(self, file: IO[bytes], key: str, **kwargs: dict) -> bool: pass
def uploadf(self, file: IO[bytes], key: str, **kwargs: dict) -> bool:
pass
def _get_id_from_parent_and_name(self, parent_id: str,
def _get_id_from_parent_and_name(
self,
parent_id: str,
name: str,
retries: int = 1,
sleep_seconds: int = 10,
use_mime_type: bool = False,
raise_on_missing: bool = True,
use_cache=False):
use_cache=False,
):
"""
Retrieves the id of a folder or file from its @name and the @parent_id folder
Optionally does multiple @retries and sleeps @sleep_seconds between them
@ -134,32 +135,39 @@ class GDriveStorage(Storage):
debug_header: str = f"[searching {name=} in {parent_id=}]"
query_string = f"'{parent_id}' in parents and name = '{name}' and trashed = false "
if use_mime_type:
query_string += f" and mimeType='application/vnd.google-apps.folder' "
query_string += " and mimeType='application/vnd.google-apps.folder' "
for attempt in range(retries):
results = self.service.files().list(
results = (
self.service.files()
.list(
# both below for Google Shared Drives
supportsAllDrives=True,
includeItemsFromAllDrives=True,
q=query_string,
spaces='drive', # ie not appDataFolder or photos
fields='files(id, name)'
).execute()
items = results.get('files', [])
spaces="drive", # ie not appDataFolder or photos
fields="files(id, name)",
)
.execute()
)
items = results.get("files", [])
if len(items) > 0:
logger.debug(f"{debug_header} found {len(items)} matches, returning last of {','.join([i['id'] for i in items])}")
_id = items[-1]['id']
if use_cache: self.api_cache[cache_key] = _id
logger.debug(
f"{debug_header} found {len(items)} matches, returning last of {','.join([i['id'] for i in items])}"
)
_id = items[-1]["id"]
if use_cache:
self.api_cache[cache_key] = _id
return _id
else:
logger.debug(f'{debug_header} not found, attempt {attempt+1}/{retries}.')
logger.debug(f"{debug_header} not found, attempt {attempt + 1}/{retries}.")
if attempt < retries - 1:
logger.debug(f'sleeping for {sleep_seconds} second(s)')
logger.debug(f"sleeping for {sleep_seconds} second(s)")
time.sleep(sleep_seconds)
if raise_on_missing:
raise ValueError(f'{debug_header} not found after {retries} attempt(s)')
raise ValueError(f"{debug_header} not found after {retries} attempt(s)")
return None
def _mkdir(self, name: str, parent_id: str):
@ -167,12 +175,7 @@ class GDriveStorage(Storage):
Creates a new GDrive folder @name inside folder @parent_id
Returns id of the created folder
"""
logger.debug(f'Creating new folder with {name=} inside {parent_id=}')
file_metadata = {
'name': [name],
'mimeType': 'application/vnd.google-apps.folder',
'parents': [parent_id]
}
gd_folder = self.service.files().create(supportsAllDrives=True, body=file_metadata, fields='id').execute()
return gd_folder.get('id')
logger.debug(f"Creating new folder with {name=} inside {parent_id=}")
file_metadata = {"name": [name], "mimeType": "application/vnd.google-apps.folder", "parents": [parent_id]}
gd_folder = self.service.files().create(supportsAllDrives=True, body=file_metadata, fields="id").execute()
return gd_folder.get("id")

Wyświetl plik

@ -4,15 +4,16 @@ from auto_archiver.core.extractor import Extractor
from auto_archiver.core.metadata import Metadata, Media
from .dropin import GenericDropin, InfoExtractor
class Bluesky(GenericDropin):
class Bluesky(GenericDropin):
def create_metadata(self, post: dict, ie_instance: InfoExtractor, archiver: Extractor, url: str) -> Metadata:
result = Metadata()
result.set_url(url)
result.set_title(post["record"]["text"])
result.set_timestamp(post["record"]["createdAt"])
for k, v in self._get_post_data(post).items():
if v: result.set(k, v)
if v:
result.set(k, v)
# download if embeds present (1 video XOR >=1 images)
for media in self._download_bsky_embeds(post, archiver):
@ -23,7 +24,7 @@ class Bluesky(GenericDropin):
def extract_post(self, url: str, ie_instance: InfoExtractor) -> dict:
# TODO: If/when this PR (https://github.com/yt-dlp/yt-dlp/pull/12098) is merged on ytdlp, remove the comments and delete the code below
handle, video_id = ie_instance._match_valid_url(url).group('handle', 'id')
handle, video_id = ie_instance._match_valid_url(url).group("handle", "id")
return ie_instance._extract_post(handle=handle, post_id=video_id)
def _download_bsky_embeds(self, post: dict, archiver: Extractor) -> list[Media]:
@ -37,16 +38,15 @@ class Bluesky(GenericDropin):
media_url = "https://bsky.social/xrpc/com.atproto.sync.getBlob?cid={}&did={}"
for image_media in image_medias:
url = media_url.format(image_media['image']['ref']['$link'], post['author']['did'])
url = media_url.format(image_media["image"]["ref"]["$link"], post["author"]["did"])
image_media = archiver.download_from_url(url)
media.append(Media(image_media))
for video_media in video_medias:
url = media_url.format(video_media['ref']['$link'], post['author']['did'])
url = media_url.format(video_media["ref"]["$link"], post["author"]["did"])
video_media = archiver.download_from_url(url)
media.append(Media(video_media))
return media
def _get_post_data(self, post: dict) -> dict:
"""
Extracts relevant information returned by the .getPostThread api call (excluding text/created_at): author, mentions, tags, links.

Wyświetl plik

@ -2,6 +2,7 @@ from yt_dlp.extractor.common import InfoExtractor
from auto_archiver.core.metadata import Metadata
from auto_archiver.core.extractor import Extractor
class GenericDropin:
"""Base class for dropins for the generic extractor.
@ -29,14 +30,12 @@ class GenericDropin:
"""
raise NotImplementedError("This method should be implemented in the subclass")
def create_metadata(self, post: dict, ie_instance: InfoExtractor, archiver: Extractor, url: str) -> Metadata:
"""
This method should create a Metadata object from the post data.
"""
raise NotImplementedError("This method should be implemented in the subclass")
def skip_ytdlp_download(self, url: str, ie_instance: InfoExtractor):
"""
This method should return True if you want to skip the ytdlp download method.

Wyświetl plik

@ -3,10 +3,9 @@ from .dropin import GenericDropin
class Facebook(GenericDropin):
def extract_post(self, url: str, ie_instance):
video_id = ie_instance._match_valid_url(url).group('id')
ie_instance._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
webpage = ie_instance._download_webpage(url, ie_instance._match_valid_url(url).group('id'))
video_id = ie_instance._match_valid_url(url).group("id")
ie_instance._download_webpage(url.replace("://m.facebook.com/", "://www.facebook.com/"), video_id)
webpage = ie_instance._download_webpage(url, ie_instance._match_valid_url(url).group("id"))
# TODO: fix once https://github.com/yt-dlp/yt-dlp/pull/12275 is merged
post_data = ie_instance._extract_metadata(webpage)
@ -14,5 +13,5 @@ class Facebook(GenericDropin):
def create_metadata(self, post: dict, ie_instance, archiver, url):
metadata = archiver.create_metadata(url)
metadata.set_title(post.get('title')).set_content(post.get('description')).set_post_data(post)
metadata.set_title(post.get("title")).set_content(post.get("description")).set_post_data(post)
return metadata

Wyświetl plik

@ -1,4 +1,5 @@
import datetime, os
import datetime
import os
import importlib
import subprocess
from typing import Generator, Type
@ -12,6 +13,11 @@ from loguru import logger
from auto_archiver.core.extractor import Extractor
from auto_archiver.core import Metadata, Media
class SkipYtdlp(Exception):
pass
class GenericExtractor(Extractor):
_dropins = {}
@ -20,8 +26,8 @@ class GenericExtractor(Extractor):
if self.ytdlp_update_interval < 0:
return
use_secrets = os.path.exists('secrets')
path = os.path.join('secrets' if use_secrets else '', '.ytdlp-update')
use_secrets = os.path.exists("secrets")
path = os.path.join("secrets" if use_secrets else "", ".ytdlp-update")
next_update_check = None
if os.path.exists(path):
with open(path, "r") as f:
@ -36,8 +42,11 @@ class GenericExtractor(Extractor):
def update_ytdlp(self):
logger.info("Checking and updating yt-dlp...")
logger.info(f"Tip: change the 'ytdlp_update_interval' setting to control how often yt-dlp is updated. Set to -1 to disable or 0 to enable on every run. Current setting: {self.ytdlp_update_interval}")
logger.info(
f"Tip: change the 'ytdlp_update_interval' setting to control how often yt-dlp is updated. Set to -1 to disable or 0 to enable on every run. Current setting: {self.ytdlp_update_interval}"
)
from importlib.metadata import version as get_version
old_version = get_version("yt-dlp")
try:
# try and update with pip (this works inside poetry environment and in a normal virtualenv)
@ -67,7 +76,9 @@ class GenericExtractor(Extractor):
"""
return any(self.suitable_extractors(url))
def download_additional_media(self, video_data: dict, info_extractor: InfoExtractor, metadata: Metadata) -> Metadata:
def download_additional_media(
self, video_data: dict, info_extractor: InfoExtractor, metadata: Metadata
) -> Metadata:
"""
Downloads additional media like images, comments, subtitles, etc.
@ -76,7 +87,7 @@ class GenericExtractor(Extractor):
# Just get the main thumbnail. More thumbnails are available in
# video_data['thumbnails'] should they be required
thumbnail_url = video_data.get('thumbnail')
thumbnail_url = video_data.get("thumbnail")
if thumbnail_url:
try:
cover_image_path = self.download_from_url(thumbnail_url)
@ -99,14 +110,64 @@ class GenericExtractor(Extractor):
Clean up the ytdlp generic video data to make it more readable and remove unnecessary keys that ytdlp adds
"""
base_keys = ['formats', 'thumbnail', 'display_id', 'epoch', 'requested_downloads',
'duration_string', 'thumbnails', 'http_headers', 'webpage_url_basename', 'webpage_url_domain',
'extractor', 'extractor_key', 'playlist', 'playlist_index', 'duration_string', 'protocol', 'requested_subtitles',
'format_id', 'acodec', 'vcodec', 'ext', 'epoch', '_has_drm', 'filesize', 'audio_ext', 'video_ext', 'vbr', 'abr',
'resolution', 'dynamic_range', 'aspect_ratio', 'cookies', 'format', 'quality', 'preference', 'artists',
'channel_id', 'subtitles', 'tbr', 'url', 'original_url', 'automatic_captions', 'playable_in_embed', 'live_status',
'_format_sort_fields', 'chapters', 'requested_formats', 'format_note',
'audio_channels', 'asr', 'fps', 'was_live', 'is_live', 'heatmap', 'age_limit', 'stretched_ratio']
base_keys = [
"formats",
"thumbnail",
"display_id",
"epoch",
"requested_downloads",
"duration_string",
"thumbnails",
"http_headers",
"webpage_url_basename",
"webpage_url_domain",
"extractor",
"extractor_key",
"playlist",
"playlist_index",
"duration_string",
"protocol",
"requested_subtitles",
"format_id",
"acodec",
"vcodec",
"ext",
"epoch",
"_has_drm",
"filesize",
"audio_ext",
"video_ext",
"vbr",
"abr",
"resolution",
"dynamic_range",
"aspect_ratio",
"cookies",
"format",
"quality",
"preference",
"artists",
"channel_id",
"subtitles",
"tbr",
"url",
"original_url",
"automatic_captions",
"playable_in_embed",
"live_status",
"_format_sort_fields",
"chapters",
"requested_formats",
"format_note",
"audio_channels",
"asr",
"fps",
"was_live",
"is_live",
"heatmap",
"age_limit",
"stretched_ratio",
]
dropin = self.dropin_for_name(info_extractor.ie_key())
if dropin:
@ -126,23 +187,30 @@ class GenericExtractor(Extractor):
result = self.download_additional_media(video_data, info_extractor, result)
# keep both 'title' and 'fulltitle', but prefer 'title', falling back to 'fulltitle' if it doesn't exist
result.set_title(video_data.pop('title', video_data.pop('fulltitle', "")))
result.set_title(video_data.pop("title", video_data.pop("fulltitle", "")))
result.set_url(url)
if "description" in video_data: result.set_content(video_data["description"])
if "description" in video_data:
result.set_content(video_data["description"])
# extract comments if enabled
if self.comments:
result.set("comments", [{
result.set(
"comments",
[
{
"text": c["text"],
"author": c["author"],
"timestamp": datetime.datetime.fromtimestamp(c.get("timestamp"), tz = datetime.timezone.utc)
} for c in video_data.get("comments", [])])
"timestamp": datetime.datetime.fromtimestamp(c.get("timestamp"), tz=datetime.timezone.utc),
}
for c in video_data.get("comments", [])
],
)
# then add the common metadata
if timestamp := video_data.pop("timestamp", None):
timestamp = datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc).isoformat()
result.set_timestamp(timestamp)
if upload_date := video_data.pop("upload_date", None):
upload_date = datetime.datetime.strptime(upload_date, '%Y%m%d').replace(tzinfo=datetime.timezone.utc)
upload_date = datetime.datetime.strptime(upload_date, "%Y%m%d").replace(tzinfo=datetime.timezone.utc)
result.set("upload_date", upload_date)
# then clean away any keys we don't want
@ -173,18 +241,20 @@ class GenericExtractor(Extractor):
post_data = dropin.extract_post(url, ie_instance)
return dropin.create_metadata(post_data, ie_instance, self, url)
def get_metadata_for_video(self, data: dict, info_extractor: Type[InfoExtractor], url: str, ydl: yt_dlp.YoutubeDL) -> Metadata:
def get_metadata_for_video(
self, data: dict, info_extractor: Type[InfoExtractor], url: str, ydl: yt_dlp.YoutubeDL
) -> Metadata:
# this time download
ydl.params['getcomments'] = self.comments
ydl.params["getcomments"] = self.comments
# TODO: for playlist or long lists of videos, how to download one at a time so they can be stored before the next one is downloaded?
data = ydl.extract_info(url, ie_key=info_extractor.ie_key(), download=True)
if "entries" in data:
entries = data.get("entries", [])
if not len(entries):
logger.warning('YoutubeDLArchiver could not find any video')
logger.warning("YoutubeDLArchiver could not find any video")
return False
else: entries = [data]
else:
entries = [data]
result = Metadata()
@ -192,17 +262,18 @@ class GenericExtractor(Extractor):
try:
filename = ydl.prepare_filename(entry)
if not os.path.exists(filename):
filename = filename.split('.')[0] + '.mkv'
filename = filename.split(".")[0] + ".mkv"
new_media = Media(filename)
for x in ["duration", "original_url", "fulltitle", "description", "upload_date"]:
if x in entry: new_media.set(x, entry[x])
if x in entry:
new_media.set(x, entry[x])
# read text from subtitles if enabled
if self.subtitles:
for lang, val in (data.get('requested_subtitles') or {}).items():
for lang, val in (data.get("requested_subtitles") or {}).items():
try:
subs = pysubs2.load(val.get('filepath'), encoding="utf-8")
subs = pysubs2.load(val.get("filepath"), encoding="utf-8")
text = " ".join([line.text for line in subs])
new_media.set(f"subtitles_{lang}", text)
except Exception as e:
@ -221,6 +292,7 @@ class GenericExtractor(Extractor):
return None
dropin_class_name = dropin_name.title()
def _load_dropin(dropin):
dropin_class = getattr(dropin, dropin_class_name)()
return self._dropins.setdefault(dropin_name, dropin_class)
@ -261,18 +333,19 @@ class GenericExtractor(Extractor):
use the extractor's _extract_post method to get the post metadata if possible.
"""
# when getting info without download, we also don't need the comments
ydl.params['getcomments'] = False
ydl.params["getcomments"] = False
result = False
dropin_submodule = self.dropin_for_name(info_extractor.ie_key())
try:
if dropin_submodule and dropin_submodule.skip_ytdlp_download(info_extractor, url):
raise Exception(f"Skipping using ytdlp to download files for {info_extractor.ie_key()}")
logger.debug(f"Skipping using ytdlp to download files for {info_extractor.ie_key()}")
raise SkipYtdlp()
# don't download since it can be a live stream
data = ydl.extract_info(url, ie_key=info_extractor.ie_key(), download=False)
if data.get('is_live', False) and not self.livestreams:
if data.get("is_live", False) and not self.livestreams:
logger.warning("Livestream detected, skipping due to 'livestreams' configuration setting")
return False
# it's a valid video, that the youtubdedl can download out of the box
@ -283,14 +356,23 @@ class GenericExtractor(Extractor):
# don't clutter the logs with issues about the 'generic' extractor not having a dropin
return False
logger.debug(f'Issue using "{info_extractor.IE_NAME}" extractor to download video (error: {repr(e)}), attempting to use extractor to get post data instead')
if not isinstance(e, SkipYtdlp):
logger.debug(
f'Issue using "{info_extractor.IE_NAME}" extractor to download video (error: {repr(e)}), attempting to use extractor to get post data instead'
)
try:
result = self.get_metadata_for_post(info_extractor, url, ydl)
except (yt_dlp.utils.DownloadError, yt_dlp.utils.ExtractorError) as post_e:
logger.error(f'Error downloading metadata for post: {post_e}')
logger.error("Error downloading metadata for post: {error}", error=str(post_e))
return False
except Exception as generic_e:
logger.debug(f'Attempt to extract using ytdlp extractor "{info_extractor.IE_NAME}" failed: \n {repr(generic_e)}', exc_info=True)
logger.debug(
'Attempt to extract using ytdlp extractor "{name}" failed: \n {error}',
name=info_extractor.IE_NAME,
error=str(generic_e),
exc_info=True,
)
return False
if result:
@ -313,38 +395,44 @@ class GenericExtractor(Extractor):
url = url.replace("https://ya.ru", "https://yandex.ru")
item.set("replaced_url", url)
ydl_options = {'outtmpl': os.path.join(self.tmp_dir, f'%(id)s.%(ext)s'),
'quiet': False, 'noplaylist': not self.allow_playlist ,
'writesubtitles': self.subtitles,'writeautomaticsub': self.subtitles,
"live_from_start": self.live_from_start, "proxy": self.proxy,
"max_downloads": self.max_downloads, "playlistend": self.max_downloads}
ydl_options = {
"outtmpl": os.path.join(self.tmp_dir, "%(id)s.%(ext)s"),
"quiet": False,
"noplaylist": not self.allow_playlist,
"writesubtitles": self.subtitles,
"writeautomaticsub": self.subtitles,
"live_from_start": self.live_from_start,
"proxy": self.proxy,
"max_downloads": self.max_downloads,
"playlistend": self.max_downloads,
}
# set up auth
auth = self.auth_for_site(url, extract_cookies=False)
# order of importance: username/pasword -> api_key -> cookie -> cookies_from_browser -> cookies_file
if auth:
if 'username' in auth and 'password' in auth:
logger.debug(f'Using provided auth username and password for {url}')
ydl_options['username'] = auth['username']
ydl_options['password'] = auth['password']
elif 'cookie' in auth:
logger.debug(f'Using provided auth cookie for {url}')
yt_dlp.utils.std_headers['cookie'] = auth['cookie']
elif 'cookies_from_browser' in auth:
logger.debug(f'Using extracted cookies from browser {auth["cookies_from_browser"]} for {url}')
ydl_options['cookiesfrombrowser'] = auth['cookies_from_browser']
elif 'cookies_file' in auth:
logger.debug(f'Using cookies from file {auth["cookies_file"]} for {url}')
ydl_options['cookiefile'] = auth['cookies_file']
if "username" in auth and "password" in auth:
logger.debug(f"Using provided auth username and password for {url}")
ydl_options["username"] = auth["username"]
ydl_options["password"] = auth["password"]
elif "cookie" in auth:
logger.debug(f"Using provided auth cookie for {url}")
yt_dlp.utils.std_headers["cookie"] = auth["cookie"]
elif "cookies_from_browser" in auth:
logger.debug(f"Using extracted cookies from browser {auth['cookies_from_browser']} for {url}")
ydl_options["cookiesfrombrowser"] = auth["cookies_from_browser"]
elif "cookies_file" in auth:
logger.debug(f"Using cookies from file {auth['cookies_file']} for {url}")
ydl_options["cookiefile"] = auth["cookies_file"]
ydl = yt_dlp.YoutubeDL(ydl_options) # allsubtitles and subtitleslangs not working as expected, so default lang is always "en"
ydl = yt_dlp.YoutubeDL(
ydl_options
) # allsubtitles and subtitleslangs not working as expected, so default lang is always "en"
for info_extractor in self.suitable_extractors(url):
result = self.download_for_extractor(info_extractor, url, ydl)
if result:
return result
return False

Wyświetl plik

@ -0,0 +1,72 @@
import requests
from loguru import logger
from auto_archiver.core import Metadata, Media
from datetime import datetime, timezone
from .dropin import GenericDropin
class Tiktok(GenericDropin):
"""
TikTok droping for the Generic Extractor that uses an unofficial API if/when ytdlp fails.
It's useful for capturing content that requires a login, like sensitive content.
"""
TIKWM_ENDPOINT = "https://www.tikwm.com/api/?url={url}"
def extract_post(self, url: str, ie_instance):
logger.debug(f"Using Tikwm API to attempt to download tiktok video from {url=}")
endpoint = self.TIKWM_ENDPOINT.format(url=url)
r = requests.get(endpoint)
if r.status_code != 200:
raise ValueError(f"unexpected status code '{r.status_code}' from tikwm.com for {url=}:")
try:
json_response = r.json()
except ValueError:
raise ValueError(f"failed to parse JSON response from tikwm.com for {url=}")
if not json_response.get("msg") == "success" or not (api_data := json_response.get("data", {})):
raise ValueError(f"failed to get a valid response from tikwm.com for {url=}: {repr(json_response)}")
# tries to get the non-watermarked version first
video_url = api_data.pop("play", api_data.pop("wmplay", None))
if not video_url:
raise ValueError(f"no valid video URL found in response from tikwm.com for {url=}")
api_data["video_url"] = video_url
return api_data
def create_metadata(self, post: dict, ie_instance, archiver, url):
# prepare result, start by downloading video
result = Metadata()
video_url = post.pop("video_url")
# get the cover if possible
cover_url = post.pop("origin_cover", post.pop("cover", post.pop("ai_dynamic_cover", None)))
if cover_url and (cover_downloaded := archiver.download_from_url(cover_url)):
result.add_media(Media(cover_downloaded))
# get the video or fail
video_downloaded = archiver.download_from_url(video_url, f"vid_{post.get('id', '')}")
if not video_downloaded:
logger.error(f"failed to download video from {video_url}")
return False
video_media = Media(video_downloaded)
if duration := post.pop("duration", None):
video_media.set("duration", duration)
result.add_media(video_media)
# add remaining metadata
result.set_title(post.pop("title", ""))
if created_at := post.pop("create_time", None):
result.set_timestamp(datetime.fromtimestamp(created_at, tz=timezone.utc))
if author := post.pop("author", None):
result.set("author", author)
result.set("api_data", post)
return result

Wyświetl plik

@ -9,11 +9,11 @@ from dateutil.parser import parse as parse_dt
from .dropin import GenericDropin
class Truth(GenericDropin):
class Truth(GenericDropin):
def extract_post(self, url, ie_instance: InfoExtractor) -> dict:
video_id = ie_instance._match_id(url)
truthsocial_url = f'https://truthsocial.com/api/v1/statuses/{video_id}'
truthsocial_url = f"https://truthsocial.com/api/v1/statuses/{video_id}"
return ie_instance._download_json(truthsocial_url, video_id)
def skip_ytdlp_download(self, url, ie_instance: Type[InfoExtractor]) -> bool:
@ -32,12 +32,23 @@ class Truth(GenericDropin):
result = Metadata()
result.set_url(url)
timestamp = post['created_at'] # format is 2022-12-29T19:51:18.161Z
timestamp = post["created_at"] # format is 2022-12-29T19:51:18.161Z
result.set_timestamp(parse_dt(timestamp))
result.set('description', post['content'])
result.set('author', post['account']['username'])
result.set("description", post["content"])
result.set("author", post["account"]["username"])
for key in ['replies_count', 'reblogs_count', 'favourites_count', ('account', 'followers_count'), ('account', 'following_count'), ('account', 'statuses_count'), ('account', 'display_name'), 'language', 'in_reply_to_account', 'replies_count']:
for key in [
"replies_count",
"reblogs_count",
"favourites_count",
("account", "followers_count"),
("account", "following_count"),
("account", "statuses_count"),
("account", "display_name"),
"language",
"in_reply_to_account",
"replies_count",
]:
if isinstance(key, tuple):
store_key = " ".join(key)
else:
@ -45,8 +56,8 @@ class Truth(GenericDropin):
result.set(store_key, traverse_obj(post, key))
# add the media
for media in post.get('media_attachments', []):
filename = archiver.download_from_url(media['url'])
result.add_media(Media(filename), id=media.get('id'))
for media in post.get("media_attachments", []):
filename = archiver.download_from_url(media["url"])
result.add_media(Media(filename), id=media.get("id"))
return result

Wyświetl plik

@ -1,4 +1,6 @@
import re, mimetypes, json
import re
import mimetypes
import json
from datetime import datetime
from loguru import logger
@ -10,9 +12,8 @@ from auto_archiver.core.extractor import Extractor
from .dropin import GenericDropin, InfoExtractor
class Twitter(GenericDropin):
def choose_variant(self, variants):
# choosing the highest quality possible
variant, width, height = None, 0, 0
@ -29,42 +30,41 @@ class Twitter(GenericDropin):
return variant
def extract_post(self, url: str, ie_instance: InfoExtractor):
twid = ie_instance._match_valid_url(url).group('id')
twid = ie_instance._match_valid_url(url).group("id")
return ie_instance._extract_status(twid=twid)
def create_metadata(self, tweet: dict, ie_instance: InfoExtractor, archiver: Extractor, url: str) -> Metadata:
result = Metadata()
try:
if not tweet.get("user") or not tweet.get("created_at"):
raise ValueError(f"Error retreiving post. Are you sure it exists?")
raise ValueError("Error retreiving post. Are you sure it exists?")
timestamp = datetime.strptime(tweet["created_at"], "%a %b %d %H:%M:%S %z %Y")
except (ValueError, KeyError) as ex:
logger.warning(f"Unable to parse tweet: {str(ex)}\nRetreived tweet data: {tweet}")
return False
result\
.set_title(tweet.get('full_text', ''))\
.set_content(json.dumps(tweet, ensure_ascii=False))\
.set_timestamp(timestamp)
result.set_title(tweet.get("full_text", "")).set_content(json.dumps(tweet, ensure_ascii=False)).set_timestamp(
timestamp
)
if not tweet.get("entities", {}).get("media"):
logger.debug('No media found, archiving tweet text only')
logger.debug("No media found, archiving tweet text only")
result.status = "twitter-ytdl"
return result
for i, tw_media in enumerate(tweet["entities"]["media"]):
media = Media(filename="")
mimetype = ""
if tw_media["type"] == "photo":
media.set("src", UrlUtil.twitter_best_quality_url(tw_media['media_url_https']))
media.set("src", UrlUtil.twitter_best_quality_url(tw_media["media_url_https"]))
mimetype = "image/jpeg"
elif tw_media["type"] == "video":
variant = self.choose_variant(tw_media['video_info']['variants'])
media.set("src", variant['url'])
mimetype = variant['content_type']
variant = self.choose_variant(tw_media["video_info"]["variants"])
media.set("src", variant["url"])
mimetype = variant["content_type"]
elif tw_media["type"] == "animated_gif":
variant = tw_media['video_info']['variants'][0]
media.set("src", variant['url'])
mimetype = variant['content_type']
variant = tw_media["video_info"]["variants"][0]
media.set("src", variant["url"])
mimetype = variant["content_type"]
ext = mimetypes.guess_extension(mimetype)
media.filename = archiver.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
media.filename = archiver.download_from_url(media.get("src"), f"{slugify(url)}_{i}{ext}")
result.add_media(media)
return result

Wyświetl plik

@ -12,9 +12,7 @@
"default": None,
"help": "the id of the sheet to archive (alternative to 'sheet' config)",
},
"header": {"default": 1,
"type": "int",
"help": "index of the header row (starts at 1)", "type": "int"},
"header": {"default": 1, "help": "index of the header row (starts at 1)", "type": "int"},
"service_account": {
"default": "secrets/service_account.json",
"help": "service account JSON file path. Learn how to create one: https://gspread.readthedocs.io/en/latest/oauth2.html",
@ -53,19 +51,6 @@
"help": "if True the stored files path will include 'workbook_name/worksheet_name/...'",
"type": "bool",
},
"allow_worksheets": {
"default": set(),
"help": "(CSV) only worksheets whose name is included in allow are included (overrides worksheet_block), leave empty so all are allowed",
},
"block_worksheets": {
"default": set(),
"help": "(CSV) explicitly block some worksheets from being processed",
},
"use_sheet_names_in_stored_paths": {
"default": True,
"type": "bool",
"help": "if True the stored files path will include 'workbook_name/worksheet_name/...'",
}
},
"description": """
GsheetsFeederDatabase

Wyświetl plik

@ -8,6 +8,7 @@ The filtered rows are processed into `Metadata` objects.
- validates the sheet's structure and filters rows based on input configurations.
- Ensures only rows with valid URLs and unprocessed statuses are included.
"""
import os
from typing import Tuple, Union
from urllib.parse import quote
@ -19,11 +20,10 @@ from slugify import slugify
from auto_archiver.core import Feeder, Database, Media
from auto_archiver.core import Metadata
from auto_archiver.modules.gsheet_feeder_db import GWorksheet
from auto_archiver.utils.misc import calculate_file_hash, get_current_timestamp
from auto_archiver.utils.misc import get_current_timestamp
class GsheetsFeederDB(Feeder, Database):
def setup(self) -> None:
self.gsheets_client = gspread.service_account(filename=self.service_account)
# TODO mv to validators
@ -42,24 +42,28 @@ class GsheetsFeederDB(Feeder, Database):
if not self.should_process_sheet(worksheet.title):
logger.debug(f"SKIPPED worksheet '{worksheet.title}' due to allow/block rules")
continue
logger.info(f'Opening worksheet {ii=}: {worksheet.title=} header={self.header}')
logger.info(f"Opening worksheet {ii=}: {worksheet.title=} header={self.header}")
gw = GWorksheet(worksheet, header_row=self.header, columns=self.columns)
if len(missing_cols := self.missing_required_columns(gw)):
logger.warning(f"SKIPPED worksheet '{worksheet.title}' due to missing required column(s) for {missing_cols}")
logger.warning(
f"SKIPPED worksheet '{worksheet.title}' due to missing required column(s) for {missing_cols}"
)
continue
# process and yield metadata here:
yield from self._process_rows(gw)
logger.success(f'Finished worksheet {worksheet.title}')
logger.success(f"Finished worksheet {worksheet.title}")
def _process_rows(self, gw: GWorksheet):
for row in range(1 + self.header, gw.count_rows() + 1):
url = gw.get_cell(row, 'url').strip()
if not len(url): continue
original_status = gw.get_cell(row, 'status')
status = gw.get_cell(row, 'status', fresh=original_status in ['', None])
url = gw.get_cell(row, "url").strip()
if not len(url):
continue
original_status = gw.get_cell(row, "status")
status = gw.get_cell(row, "status", fresh=original_status in ["", None])
# TODO: custom status parser(?) aka should_retry_from_status
if status not in ['', None]: continue
if status not in ["", None]:
continue
# All checks done - archival process starts here
m = Metadata().set_url(url)
@ -70,10 +74,10 @@ class GsheetsFeederDB(Feeder, Database):
# TODO: Check folder value not being recognised
m.set_context("gsheet", {"row": row, "worksheet": gw})
if gw.get_cell_or_default(row, 'folder', "") is None:
folder = ''
if gw.get_cell_or_default(row, "folder", "") is None:
folder = ""
else:
folder = slugify(gw.get_cell_or_default(row, 'folder', "").strip())
folder = slugify(gw.get_cell_or_default(row, "folder", "").strip())
if len(folder):
if self.use_sheet_names_in_stored_paths:
m.set_context("folder", os.path.join(folder, slugify(self.sheet), slugify(gw.wks.title)))
@ -91,12 +95,11 @@ class GsheetsFeederDB(Feeder, Database):
def missing_required_columns(self, gw: GWorksheet) -> list:
missing = []
for required_col in ['url', 'status']:
for required_col in ["url", "status"]:
if not gw.col_exists(required_col):
missing.append(required_col)
return missing
def started(self, item: Metadata) -> None:
logger.warning(f"STARTED {item}")
gw, row = self._retrieve_gsheet(item)
@ -155,9 +158,7 @@ class GsheetsFeederDB(Feeder, Database):
if len(pdq_hashes):
batch_if_valid("pdq_hash", ",".join(pdq_hashes))
if (screenshot := item.get_media_by_id("screenshot")) and hasattr(
screenshot, "urls"
):
if (screenshot := item.get_media_by_id("screenshot")) and hasattr(screenshot, "urls"):
batch_if_valid("screenshot", "\n".join(screenshot.urls))
if thumbnail := item.get_first_image("thumbnail"):
@ -186,11 +187,12 @@ class GsheetsFeederDB(Feeder, Database):
logger.debug(f"Unable to update sheet: {e}")
def _retrieve_gsheet(self, item: Metadata) -> Tuple[GWorksheet, int]:
if gsheet := item.get_context("gsheet"):
gw: GWorksheet = gsheet.get("worksheet")
row: int = gsheet.get("row")
elif self.sheet_id:
logger.error(f"Unable to retrieve Gsheet for {item.get_url()}, GsheetDB must be used alongside GsheetFeeder.")
logger.error(
f"Unable to retrieve Gsheet for {item.get_url()}, GsheetDB must be used alongside GsheetFeeder."
)
return gw, row

Wyświetl plik

@ -8,21 +8,22 @@ class GWorksheet:
should always include the offset of the header.
eg: if header=4, row 5 will be the first with data.
"""
COLUMN_NAMES = {
'url': 'link',
'status': 'archive status',
'folder': 'destination folder',
'archive': 'archive location',
'date': 'archive date',
'thumbnail': 'thumbnail',
'timestamp': 'upload timestamp',
'title': 'upload title',
'text': 'text content',
'screenshot': 'screenshot',
'hash': 'hash',
'pdq_hash': 'perceptual hashes',
'wacz': 'wacz',
'replaywebpage': 'replaywebpage',
"url": "link",
"status": "archive status",
"folder": "destination folder",
"archive": "archive location",
"date": "archive date",
"thumbnail": "thumbnail",
"timestamp": "upload timestamp",
"title": "upload title",
"text": "text content",
"screenshot": "screenshot",
"hash": "hash",
"pdq_hash": "perceptual hashes",
"wacz": "wacz",
"replaywebpage": "replaywebpage",
}
def __init__(self, worksheet, columns=COLUMN_NAMES, header_row=1):
@ -36,7 +37,7 @@ class GWorksheet:
def _check_col_exists(self, col: str):
if col not in self.columns:
raise Exception(f'Column {col} is not in the configured column names: {self.columns.keys()}')
raise Exception(f"Column {col} is not in the configured column names: {self.columns.keys()}")
def _col_index(self, col: str):
self._check_col_exists(col)
@ -67,11 +68,11 @@ class GWorksheet:
if fresh:
return self.wks.cell(row, col_index + 1).value
if type(row) == int:
if isinstance(row, int):
row = self.get_row(row)
if col_index >= len(row):
return ''
return ""
return row[col_index]
def get_cell_or_default(self, row, col: str, default: str = None, fresh=False, when_empty_use_default=True):
@ -83,7 +84,7 @@ class GWorksheet:
if when_empty_use_default and val.strip() == "":
return default
return val
except:
except Exception:
return default
def set_cell(self, row: int, col: str, val):
@ -96,13 +97,9 @@ class GWorksheet:
receives a list of [(row:int, col:str, val)] and batch updates it, the parameters are the same as in the self.set_cell() method
"""
cell_updates = [
{
'range': self.to_a1(row, col),
'values': [[str(val)[0:49999]]]
}
for row, col, val in cell_updates
{"range": self.to_a1(row, col), "values": [[str(val)[0:49999]]]} for row, col, val in cell_updates
]
self.wks.batch_update(cell_updates, value_input_option='USER_ENTERED')
self.wks.batch_update(cell_updates, value_input_option="USER_ENTERED")
def to_a1(self, row: int, col: str):
# row is 1-based

Wyświetl plik

@ -8,9 +8,10 @@
"configs": {
"algorithm": {"default": "SHA-256", "help": "hash algorithm to use", "choices": ["SHA-256", "SHA3-512"]},
# TODO add non-negative requirement to match previous implementation?
"chunksize": {"default": 16000000,
"chunksize": {
"default": 16000000,
"help": "number of bytes to use when reading files in chunks (if this value is too large you will run out of RAM), default is 16MB",
'type': 'int',
"type": "int",
},
},
"description": """

Wyświetl plik

@ -7,6 +7,7 @@ exact duplicates. The hash is computed by reading the file's bytes in chunks,
making it suitable for handling large files efficiently.
"""
import hashlib
from loguru import logger
@ -20,7 +21,6 @@ class HashEnricher(Enricher):
Calculates hashes for Media instances
"""
def enrich(self, to_enrich: Metadata) -> None:
url = to_enrich.get_url()
logger.debug(f"calculating media hashes for {url=} (using {self.algorithm})")
@ -35,5 +35,6 @@ class HashEnricher(Enricher):
hash_algo = hashlib.sha256
elif self.algorithm == "SHA3-512":
hash_algo = hashlib.sha3_512
else: return ""
else:
return ""
return calculate_file_hash(filename, hash_algo, self.chunksize)

Wyświetl plik

@ -2,14 +2,13 @@
"name": "HTML Formatter",
"type": ["formatter"],
"requires_setup": False,
"dependencies": {
"python": ["hash_enricher", "loguru", "jinja2"],
"bin": [""]
},
"dependencies": {"python": ["hash_enricher", "loguru", "jinja2"], "bin": [""]},
"configs": {
"detect_thumbnails": {"default": True,
"detect_thumbnails": {
"default": True,
"help": "if true will group by thumbnails generated by thumbnail enricher by id 'thumbnail_00'",
"type": "bool"},
"type": "bool",
},
},
"description": """ """,
}

Wyświetl plik

@ -1,5 +1,7 @@
from __future__ import annotations
import mimetypes, os, pathlib
import mimetypes
import os
import pathlib
from jinja2 import Environment, FileSystemLoader
from urllib.parse import quote
from loguru import logger
@ -11,6 +13,7 @@ from auto_archiver.core import Metadata, Media
from auto_archiver.core import Formatter
from auto_archiver.utils.misc import random_str
class HtmlFormatter(Formatter):
environment: Environment = None
template: any = None
@ -21,9 +24,9 @@ class HtmlFormatter(Formatter):
self.environment = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
# JinjaHelper class static methods are added as filters
self.environment.filters.update({
k: v.__func__ for k, v in JinjaHelpers.__dict__.items() if isinstance(v, staticmethod)
})
self.environment.filters.update(
{k: v.__func__ for k, v in JinjaHelpers.__dict__.items() if isinstance(v, staticmethod)}
)
# Load a specific template or default to "html_template.html"
template_name = self.config.get("template_name", "html_template.html")
@ -36,11 +39,7 @@ class HtmlFormatter(Formatter):
return
content = self.template.render(
url=url,
title=item.get_title(),
media=item.media,
metadata=item.metadata,
version=__version__
url=url, title=item.get_title(), media=item.media, metadata=item.metadata, version=__version__
)
html_path = os.path.join(self.tmp_dir, f"formatted{random_str(24)}.html")
@ -49,7 +48,7 @@ class HtmlFormatter(Formatter):
final_media = Media(filename=html_path, _mimetype="text/html")
# get the already instantiated hash_enricher module
he = self.module_factory.get_module('hash_enricher', self.config)
he = self.module_factory.get_module("hash_enricher", self.config)
if len(hd := he.calculate_hash(final_media.filename)):
final_media.set("hash", f"{he.algorithm}:{hd}")

Wyświetl plik

@ -2,18 +2,18 @@
"name": "Instagram API Extractor",
"type": ["extractor"],
"entry_point": "instagram_api_extractor::InstagramAPIExtractor",
"dependencies":
{"python": ["requests",
"dependencies": {
"python": [
"requests",
"loguru",
"retrying",
"tqdm",],
"tqdm",
],
},
"requires_setup": True,
"configs": {
"access_token": {"default": None,
"help": "a valid instagrapi-api token"},
"api_endpoint": {"required": True,
"help": "API endpoint to use"},
"access_token": {"default": None, "help": "a valid instagrapi-api token"},
"api_endpoint": {"required": True, "help": "API endpoint to use"},
"full_profile": {
"default": False,
"type": "bool",

Wyświetl plik

@ -36,21 +36,16 @@ class InstagramAPIExtractor(Extractor):
if self.api_endpoint[-1] == "/":
self.api_endpoint = self.api_endpoint[:-1]
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
url.replace("instagr.com", "instagram.com").replace(
"instagr.am", "instagram.com"
)
url.replace("instagr.com", "instagram.com").replace("instagr.am", "instagram.com")
insta_matches = self.valid_url.findall(url)
logger.info(f"{insta_matches=}")
if not len(insta_matches) or len(insta_matches[0]) != 3:
return
if len(insta_matches) > 1:
logger.warning(
f"Multiple instagram matches found in {url=}, using the first one"
)
logger.warning(f"Multiple instagram matches found in {url=}, using the first one")
return
g1, g2, g3 = insta_matches[0][0], insta_matches[0][1], insta_matches[0][2]
if g1 == "":
@ -73,23 +68,20 @@ class InstagramAPIExtractor(Extractor):
def call_api(self, path: str, params: dict) -> dict:
headers = {"accept": "application/json", "x-access-key": self.access_token}
logger.debug(f"calling {self.api_endpoint}/{path} with {params=}")
return requests.get(
f"{self.api_endpoint}/{path}", headers=headers, params=params
).json()
return requests.get(f"{self.api_endpoint}/{path}", headers=headers, params=params).json()
def cleanup_dict(self, d: dict | list) -> dict:
# repeats 3 times to remove nested empty values
if not self.minimize_json_output:
return d
if type(d) == list:
if isinstance(d, list):
return [self.cleanup_dict(v) for v in d]
if type(d) != dict:
if not isinstance(d, dict):
return d
return {
k: clean_v
for k, v in d.items()
if (clean_v := self.cleanup_dict(v))
not in [0.0, 0, [], {}, "", None, "null"]
if (clean_v := self.cleanup_dict(v)) not in [0.0, 0, [], {}, "", None, "null"]
and k not in ["x", "y", "width", "height"]
}
@ -103,7 +95,7 @@ class InstagramAPIExtractor(Extractor):
result.set_title(user.get("full_name", username)).set("data", user)
if pic_url := user.get("profile_pic_url_hd", user.get("profile_pic_url")):
filename = self.download_from_url(pic_url)
result.add_media(Media(filename=filename), id=f"profile_picture")
result.add_media(Media(filename=filename), id="profile_picture")
if self.full_profile:
user_id = user.get("pk")
@ -126,9 +118,7 @@ class InstagramAPIExtractor(Extractor):
try:
self.download_all_tagged(result, user_id)
except Exception as e:
result.append(
"errors", f"Error downloading tagged posts for {username}"
)
result.append("errors", f"Error downloading tagged posts for {username}")
logger.error(f"Error downloading tagged posts for {username}: {e}")
# download all highlights
@ -143,7 +133,7 @@ class InstagramAPIExtractor(Extractor):
def download_all_highlights(self, result, username, user_id):
count_highlights = 0
highlights = self.call_api(f"v1/user/highlights", {"user_id": user_id})
highlights = self.call_api("v1/user/highlights", {"user_id": user_id})
for h in highlights:
try:
h_info = self._download_highlights_reusable(result, h.get("pk"))
@ -153,26 +143,17 @@ class InstagramAPIExtractor(Extractor):
"errors",
f"Error downloading highlight id{h.get('pk')} for {username}",
)
logger.error(
f"Error downloading highlight id{h.get('pk')} for {username}: {e}"
)
if (
self.full_profile_max_posts
and count_highlights >= self.full_profile_max_posts
):
logger.info(
f"HIGHLIGHTS reached full_profile_max_posts={self.full_profile_max_posts}"
)
logger.error(f"Error downloading highlight id{h.get('pk')} for {username}: {e}")
if self.full_profile_max_posts and count_highlights >= self.full_profile_max_posts:
logger.info(f"HIGHLIGHTS reached full_profile_max_posts={self.full_profile_max_posts}")
break
result.set("#highlights", count_highlights)
def download_post(
self, result: Metadata, code: str = None, id: str = None, context: str = None
) -> Metadata:
def download_post(self, result: Metadata, code: str = None, id: str = None, context: str = None) -> Metadata:
if id:
post = self.call_api(f"v1/media/by/id", {"id": id})
post = self.call_api("v1/media/by/id", {"id": id})
else:
post = self.call_api(f"v1/media/by/code", {"code": code})
post = self.call_api("v1/media/by/code", {"code": code})
assert post, f"Post {id or code} not found"
if caption_text := post.get("caption_text"):
@ -192,15 +173,11 @@ class InstagramAPIExtractor(Extractor):
return result.success("insta highlights")
def _download_highlights_reusable(self, result: Metadata, id: str) -> dict:
full_h = self.call_api(f"v2/highlight/by/id", {"id": id})
full_h = self.call_api("v2/highlight/by/id", {"id": id})
h_info = full_h.get("response", {}).get("reels", {}).get(f"highlight:{id}")
assert h_info, f"Highlight {id} not found: {full_h=}"
if (
cover_media := h_info.get("cover_media", {})
.get("cropped_image_version", {})
.get("url")
):
if cover_media := h_info.get("cover_media", {}).get("cropped_image_version", {}).get("url"):
filename = self.download_from_url(cover_media)
result.add_media(Media(filename=filename), id=f"cover_media highlight {id}")
@ -210,9 +187,7 @@ class InstagramAPIExtractor(Extractor):
self.scrape_item(result, h, "highlight")
except Exception as e:
result.append("errors", f"Error downloading highlight {h.get('id')}")
logger.error(
f"Error downloading highlight, skipping {h.get('id')}: {e}"
)
logger.error(f"Error downloading highlight, skipping {h.get('id')}: {e}")
return h_info
@ -225,7 +200,7 @@ class InstagramAPIExtractor(Extractor):
return result.success(f"insta stories {now}")
def _download_stories_reusable(self, result: Metadata, username: str) -> list[dict]:
stories = self.call_api(f"v1/user/stories/by/username", {"username": username})
stories = self.call_api("v1/user/stories/by/username", {"username": username})
if not stories or not len(stories):
return []
stories = stories[::-1] # newest to oldest
@ -244,10 +219,8 @@ class InstagramAPIExtractor(Extractor):
post_count = 0
while end_cursor != "":
posts = self.call_api(
f"v1/user/medias/chunk", {"user_id": user_id, "end_cursor": end_cursor}
)
if not len(posts) or not type(posts) == list or len(posts) != 2:
posts = self.call_api("v1/user/medias/chunk", {"user_id": user_id, "end_cursor": end_cursor})
if not posts or not isinstance(posts, list) or len(posts) != 2:
break
posts, end_cursor = posts[0], posts[1]
logger.info(f"parsing {len(posts)} posts, next {end_cursor=}")
@ -260,13 +233,8 @@ class InstagramAPIExtractor(Extractor):
logger.error(f"Error downloading post, skipping {p.get('id')}: {e}")
pbar.update(1)
post_count += 1
if (
self.full_profile_max_posts
and post_count >= self.full_profile_max_posts
):
logger.info(
f"POSTS reached full_profile_max_posts={self.full_profile_max_posts}"
)
if self.full_profile_max_posts and post_count >= self.full_profile_max_posts:
logger.info(f"POSTS reached full_profile_max_posts={self.full_profile_max_posts}")
break
result.set("#posts", post_count)
@ -275,10 +243,8 @@ class InstagramAPIExtractor(Extractor):
pbar = tqdm(desc="downloading tagged posts")
tagged_count = 0
while next_page_id != None:
resp = self.call_api(
f"v2/user/tag/medias", {"user_id": user_id, "page_id": next_page_id}
)
while next_page_id is not None:
resp = self.call_api("v2/user/tag/medias", {"user_id": user_id, "page_id": next_page_id})
posts = resp.get("response", {}).get("items", [])
if not len(posts):
break
@ -290,21 +256,12 @@ class InstagramAPIExtractor(Extractor):
try:
self.scrape_item(result, p, "tagged")
except Exception as e:
result.append(
"errors", f"Error downloading tagged post {p.get('id')}"
)
logger.error(
f"Error downloading tagged post, skipping {p.get('id')}: {e}"
)
result.append("errors", f"Error downloading tagged post {p.get('id')}")
logger.error(f"Error downloading tagged post, skipping {p.get('id')}: {e}")
pbar.update(1)
tagged_count += 1
if (
self.full_profile_max_posts
and tagged_count >= self.full_profile_max_posts
):
logger.info(
f"TAGS reached full_profile_max_posts={self.full_profile_max_posts}"
)
if self.full_profile_max_posts and tagged_count >= self.full_profile_max_posts:
logger.info(f"TAGS reached full_profile_max_posts={self.full_profile_max_posts}")
break
result.set("#tagged", tagged_count)
@ -318,9 +275,7 @@ class InstagramAPIExtractor(Extractor):
context can be used to give specific id prefixes to media
"""
if "clips_metadata" in item:
if reusable_text := item.get("clips_metadata", {}).get(
"reusable_text_attribute_string"
):
if reusable_text := item.get("clips_metadata", {}).get("reusable_text_attribute_string"):
item["clips_metadata_text"] = reusable_text
if self.minimize_json_output:
del item["clips_metadata"]

Wyświetl plik

@ -9,8 +9,7 @@
},
"requires_setup": True,
"configs": {
"username": {"required": True,
"help": "A valid Instagram username."},
"username": {"required": True, "help": "A valid Instagram username."},
"password": {
"required": True,
"help": "The corresponding Instagram account password.",

Wyświetl plik

@ -3,7 +3,10 @@
highlights, and tagged posts. Authentication is required via username/password or a session file.
"""
import re, os, shutil
import re
import os
import shutil
import instaloader
from loguru import logger
@ -11,6 +14,7 @@ from auto_archiver.core import Extractor
from auto_archiver.core import Metadata
from auto_archiver.core import Media
class InstagramExtractor(Extractor):
"""
Uses Instaloader to download either a post (inc images, videos, text) or as much as possible from a profile (posts, stories, highlights, ...)
@ -25,26 +29,24 @@ class InstagramExtractor(Extractor):
# TODO: links to stories
def setup(self) -> None:
self.insta = instaloader.Instaloader(
download_geotags=True,
download_comments=True,
compress_json=False,
dirname_pattern=self.download_folder,
filename_pattern="{date_utc}_UTC_{target}__{typename}"
filename_pattern="{date_utc}_UTC_{target}__{typename}",
)
try:
self.insta.load_session_from_file(self.username, self.session_file)
except Exception as e:
except Exception:
try:
logger.debug(f"Session file failed", exc_info=True)
logger.debug("Session file failed", exc_info=True)
logger.info("No valid session file found - Attempting login with use and password.")
self.insta.login(self.username, self.password)
self.insta.save_session_to_file(self.session_file)
except Exception as e:
logger.error(f"Failed to setup Instagram Extractor with Instagrapi. {e}")
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
@ -53,7 +55,8 @@ class InstagramExtractor(Extractor):
profile_matches = self.profile_pattern.findall(url)
# return if not a valid instagram link
if not len(post_matches) and not len(profile_matches): return
if not len(post_matches) and not len(profile_matches):
return
result = None
try:
@ -65,7 +68,9 @@ class InstagramExtractor(Extractor):
elif len(profile_matches):
result = self.download_profile(url, profile_matches[0])
except Exception as e:
logger.error(f"Failed to download with instagram extractor due to: {e}, make sure your account credentials are valid.")
logger.error(
f"Failed to download with instagram extractor due to: {e}, make sure your account credentials are valid."
)
finally:
shutil.rmtree(self.download_folder, ignore_errors=True)
return result
@ -84,35 +89,50 @@ class InstagramExtractor(Extractor):
profile = instaloader.Profile.from_username(self.insta.context, username)
try:
for post in profile.get_posts():
try: self.insta.download_post(post, target=f"profile_post_{post.owner_username}")
except Exception as e: logger.error(f"Failed to download post: {post.shortcode}: {e}")
except Exception as e: logger.error(f"Failed profile.get_posts: {e}")
try:
self.insta.download_post(post, target=f"profile_post_{post.owner_username}")
except Exception as e:
logger.error(f"Failed to download post: {post.shortcode}: {e}")
except Exception as e:
logger.error(f"Failed profile.get_posts: {e}")
try:
for post in profile.get_tagged_posts():
try: self.insta.download_post(post, target=f"tagged_post_{post.owner_username}")
except Exception as e: logger.error(f"Failed to download tagged post: {post.shortcode}: {e}")
except Exception as e: logger.error(f"Failed profile.get_tagged_posts: {e}")
try:
self.insta.download_post(post, target=f"tagged_post_{post.owner_username}")
except Exception as e:
logger.error(f"Failed to download tagged post: {post.shortcode}: {e}")
except Exception as e:
logger.error(f"Failed profile.get_tagged_posts: {e}")
try:
for post in profile.get_igtv_posts():
try: self.insta.download_post(post, target=f"igtv_post_{post.owner_username}")
except Exception as e: logger.error(f"Failed to download igtv post: {post.shortcode}: {e}")
except Exception as e: logger.error(f"Failed profile.get_igtv_posts: {e}")
try:
self.insta.download_post(post, target=f"igtv_post_{post.owner_username}")
except Exception as e:
logger.error(f"Failed to download igtv post: {post.shortcode}: {e}")
except Exception as e:
logger.error(f"Failed profile.get_igtv_posts: {e}")
try:
for story in self.insta.get_stories([profile.userid]):
for item in story.get_items():
try: self.insta.download_storyitem(item, target=f"story_item_{story.owner_username}")
except Exception as e: logger.error(f"Failed to download story item: {item}: {e}")
except Exception as e: logger.error(f"Failed get_stories: {e}")
try:
self.insta.download_storyitem(item, target=f"story_item_{story.owner_username}")
except Exception as e:
logger.error(f"Failed to download story item: {item}: {e}")
except Exception as e:
logger.error(f"Failed get_stories: {e}")
try:
for highlight in self.insta.get_highlights(profile.userid):
for item in highlight.get_items():
try: self.insta.download_storyitem(item, target=f"highlight_item_{highlight.owner_username}")
except Exception as e: logger.error(f"Failed to download highlight item: {item}: {e}")
except Exception as e: logger.error(f"Failed get_highlights: {e}")
try:
self.insta.download_storyitem(item, target=f"highlight_item_{highlight.owner_username}")
except Exception as e:
logger.error(f"Failed to download highlight item: {item}: {e}")
except Exception as e:
logger.error(f"Failed get_highlights: {e}")
return self.process_downloads(url, f"@{username}", profile._asdict(), None)
@ -124,7 +144,8 @@ class InstagramExtractor(Extractor):
all_media = []
for f in os.listdir(self.download_folder):
if os.path.isfile((filename := os.path.join(self.download_folder, f))):
if filename[-4:] == ".txt": continue
if filename[-4:] == ".txt":
continue
all_media.append(Media(filename))
assert len(all_media) > 1, "No uploaded media found"

Wyświetl plik

@ -1,16 +1,21 @@
{
"name": "Instagram Telegram Bot Extractor",
"type": ["extractor"],
"dependencies": {"python": ["loguru", "telethon",],
"dependencies": {
"python": [
"loguru",
"telethon",
],
},
"requires_setup": True,
"configs": {
"api_id": {"default": None, "help": "telegram API_ID value, go to https://my.telegram.org/apps"},
"api_hash": {"default": None, "help": "telegram API_HASH value, go to https://my.telegram.org/apps"},
"session_file": {"default": "secrets/anon-insta", "help": "optional, records the telegram login session for future usage, '.session' will be appended to the provided value."},
"timeout": {"default": 45,
"type": "int",
"help": "timeout to fetch the instagram content in seconds."},
"session_file": {
"default": "secrets/anon-insta",
"help": "optional, records the telegram login session for future usage, '.session' will be appended to the provided value.",
},
"timeout": {"default": 45, "type": "int", "help": "timeout to fetch the instagram content in seconds."},
},
"description": """
The `InstagramTbotExtractor` module uses a Telegram bot (`instagram_load_bot`) to fetch and archive Instagram content,

Wyświetl plik

@ -51,7 +51,7 @@ class InstagramTbotExtractor(Extractor):
"""Initializes the Telegram client."""
try:
self.client = TelegramClient(self.session_file, self.api_id, self.api_hash)
except OperationalError as e:
except OperationalError:
logger.error(
f"Unable to access the {self.session_file} session. "
"Ensure that you don't use the same session file here and in telethon_extractor. "
@ -68,12 +68,12 @@ class InstagramTbotExtractor(Extractor):
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
if not "instagram.com" in url: return False
if "instagram.com" not in url:
return False
result = Metadata()
tmp_dir = self.tmp_dir
with self.client.start():
chat, since_id = self._send_url_to_bot(url)
message = self._process_messages(chat, since_id, tmp_dir, result)
@ -110,13 +110,14 @@ class InstagramTbotExtractor(Extractor):
for post in self.client.iter_messages(chat, min_id=since_id):
since_id = max(since_id, post.id)
# Skip known filler message:
if post.message == 'The bot receives information through https://hikerapi.com/p/hJqpppqi':
if post.message == "The bot receives information through https://hikerapi.com/p/hJqpppqi":
continue
if post.media and post.id not in seen_media:
filename_dest = os.path.join(tmp_dir, f'{chat.id}_{post.id}')
filename_dest = os.path.join(tmp_dir, f"{chat.id}_{post.id}")
media = self.client.download_media(post.media, filename_dest)
if media:
result.add_media(Media(media))
seen_media.append(post.id)
if post.message: message += post.message
if post.message:
message += post.message
return message.strip()

Wyświetl plik

@ -17,9 +17,11 @@
"choices": ["random", "static"],
},
"save_to": {"default": "./local_archive", "help": "folder where to save archived content"},
"save_absolute": {"default": False,
"save_absolute": {
"default": False,
"type": "bool",
"help": "whether the path to the stored file is absolute or relative in the output result inc. formatters (WARN: leaks the file structure)"},
"help": "whether the path to the stored file is absolute or relative in the output result inc. formatters (WARN: leaks the file structure)",
},
},
"description": """
LocalStorage: A storage module for saving archived content locally on the filesystem.
@ -33,5 +35,5 @@
### Notes
- Default storage folder is `./archived`, but this can be changed via the `save_to` configuration.
- The `save_absolute` option can reveal the file structure in output formats; use with caution.
"""
""",
}

Wyświetl plik

@ -1,4 +1,3 @@
import shutil
from typing import IO
import os
@ -8,12 +7,13 @@ from auto_archiver.core import Media
from auto_archiver.core import Storage
from auto_archiver.core.consts import SetupError
class LocalStorage(Storage):
def setup(self) -> None:
if len(self.save_to) > 200:
raise SetupError(f"Your save_to path is too long, this will cause issues saving files on your computer. Please use a shorter path.")
raise SetupError(
"Your save_to path is too long, this will cause issues saving files on your computer. Please use a shorter path."
)
def get_cdn_url(self, media: Media) -> str:
dest = media.key
@ -25,18 +25,18 @@ class LocalStorage(Storage):
def set_key(self, media, url, metadata):
# clarify we want to save the file to the save_to folder
old_folder = metadata.get('folder', '')
metadata.set_context('folder', os.path.join(self.save_to, metadata.get('folder', '')))
old_folder = metadata.get("folder", "")
metadata.set_context("folder", os.path.join(self.save_to, metadata.get("folder", "")))
super().set_key(media, url, metadata)
# don't impact other storages that might want a different 'folder' set
metadata.set_context('folder', old_folder)
metadata.set_context("folder", old_folder)
def upload(self, media: Media, **kwargs) -> bool:
# override parent so that we can use shutil.copy2 and keep metadata
dest = media.key
os.makedirs(os.path.dirname(dest), exist_ok=True)
logger.debug(f'[{self.__class__.__name__}] storing file {media.filename} with key {media.key} to {dest}')
logger.debug(f"[{self.__class__.__name__}] storing file {media.filename} with key {media.key} to {dest}")
res = shutil.copy2(media.filename, dest)
logger.info(res)

Wyświetl plik

@ -23,7 +23,9 @@ class MetaEnricher(Enricher):
self.enrich_archive_duration(to_enrich)
def enrich_file_sizes(self, to_enrich: Metadata):
logger.debug(f"calculating archive file sizes for url={to_enrich.get_url()} ({len(to_enrich.media)} media files)")
logger.debug(
f"calculating archive file sizes for url={to_enrich.get_url()} ({len(to_enrich.media)} media files)"
)
total_size = 0
for media in to_enrich.get_all_media():
file_stats = os.stat(media.filename)
@ -34,7 +36,6 @@ class MetaEnricher(Enricher):
to_enrich.set("total_bytes", total_size)
to_enrich.set("total_size", self.human_readable_bytes(total_size))
def human_readable_bytes(self, size: int) -> str:
# receives number of bytes and returns human readble size
for unit in ["bytes", "KB", "MB", "GB", "TB"]:

Wyświetl plik

@ -2,10 +2,7 @@
"name": "Media Metadata Enricher",
"type": ["enricher"],
"requires_setup": True,
"dependencies": {
"python": ["loguru"],
"bin": ["exiftool"]
},
"dependencies": {"python": ["loguru"], "bin": ["exiftool"]},
"description": """
Extracts metadata information from files using ExifTool.
@ -17,5 +14,5 @@
### Notes
- Requires ExifTool to be installed and accessible via the system's PATH.
- Skips enrichment for files where metadata extraction fails.
"""
""",
}

Wyświetl plik

@ -11,7 +11,6 @@ class MetadataEnricher(Enricher):
Extracts metadata information from files using exiftool.
"""
def enrich(self, to_enrich: Metadata) -> None:
url = to_enrich.get_url()
logger.debug(f"extracting EXIF metadata for {url=}")
@ -23,13 +22,13 @@ class MetadataEnricher(Enricher):
def get_metadata(self, filename: str) -> dict:
try:
# Run ExifTool command to extract metadata from the file
cmd = ['exiftool', filename]
cmd = ["exiftool", filename]
result = subprocess.run(cmd, capture_output=True, text=True)
# Process the output to extract individual metadata fields
metadata = {}
for line in result.stdout.splitlines():
field, value = line.strip().split(':', 1)
field, value = line.strip().split(":", 1)
metadata[field.strip()] = value.strip()
return metadata
except FileNotFoundError:

Wyświetl plik

@ -2,8 +2,7 @@
"name": "Mute Formatter",
"type": ["formatter"],
"requires_setup": True,
"dependencies": {
},
"dependencies": {},
"description": """ Default formatter.
""",
}

Wyświetl plik

@ -5,5 +5,5 @@ from auto_archiver.core import Formatter
class MuteFormatter(Formatter):
def format(self, item: Metadata) -> Media: return None
def format(self, item: Metadata) -> Media:
return None

Wyświetl plik

@ -17,5 +17,5 @@
### Notes
- Best used after enrichers like `thumbnail_enricher` or `screenshot_enricher` to ensure images are available.
- Uses the `pdqhash` library to compute 256-bit perceptual hashes, which are stored as hexadecimal strings.
"""
""",
}

Wyświetl plik

@ -10,6 +10,7 @@ This enricher is typically used after thumbnail or screenshot enrichers
to ensure images are available for hashing.
"""
import traceback
import pdqhash
import numpy as np
@ -34,7 +35,12 @@ class PdqHashEnricher(Enricher):
for m in to_enrich.media:
for media in m.all_inner_media(True):
media_id = media.get("id", "")
if media.is_image() and "screenshot" not in media_id and "warc-file-" not in media_id and len(hd := self.calculate_pdq_hash(media.filename)):
if (
media.is_image()
and "screenshot" not in media_id
and "warc-file-" not in media_id
and len(hd := self.calculate_pdq_hash(media.filename))
):
media.set("pdq_hash", hd)
media_with_hashes.append(media.filename)
@ -51,5 +57,7 @@ class PdqHashEnricher(Enricher):
hash = "".join(str(b) for b in hash_array)
return hex(int(hash, 2))[2:]
except UnidentifiedImageError as e:
logger.error(f"Image {filename=} is likely corrupted or in unsupported format {e}: {traceback.format_exc()}")
logger.error(
f"Image {filename=} is likely corrupted or in unsupported format {e}: {traceback.format_exc()}"
)
return ""

Wyświetl plik

@ -20,20 +20,20 @@
"region": {"default": None, "help": "S3 region name"},
"key": {"default": None, "help": "S3 API key"},
"secret": {"default": None, "help": "S3 API secret"},
"random_no_duplicate": {"default": False,
"random_no_duplicate": {
"default": False,
"type": "bool",
"help": "if set, it will override `path_generator`, `filename_generator` and `folder`. It will check if the file already exists and if so it will not upload it again. Creates a new root folder path `no-dups/`"},
"help": "if set, it will override `path_generator`, `filename_generator` and `folder`. It will check if the file already exists and if so it will not upload it again. Creates a new root folder path `no-dups/`",
},
"endpoint_url": {
"default": 'https://{region}.digitaloceanspaces.com',
"help": "S3 bucket endpoint, {region} are inserted at runtime"
"default": "https://{region}.digitaloceanspaces.com",
"help": "S3 bucket endpoint, {region} are inserted at runtime",
},
"cdn_url": {
"default": 'https://{bucket}.{region}.cdn.digitaloceanspaces.com/{key}',
"help": "S3 CDN url, {bucket}, {region} and {key} are inserted at runtime"
"default": "https://{bucket}.{region}.cdn.digitaloceanspaces.com/{key}",
"help": "S3 CDN url, {bucket}, {region} and {key} are inserted at runtime",
},
"private": {"default": False,
"type": "bool",
"help": "if true S3 files will not be readable online"},
"private": {"default": False, "type": "bool", "help": "if true S3 files will not be readable online"},
},
"description": """
S3Storage: A storage module for saving media files to an S3-compatible object storage.
@ -50,5 +50,5 @@
- The `random_no_duplicate` option ensures no duplicate uploads by leveraging hash-based folder structures.
- Uses `boto3` for interaction with the S3 API.
- Depends on the `HashEnricher` module for hash calculation.
"""
""",
}

Wyświetl plik

@ -1,4 +1,3 @@
from typing import IO
import boto3
@ -11,33 +10,36 @@ from auto_archiver.utils.misc import calculate_file_hash, random_str
NO_DUPLICATES_FOLDER = "no-dups/"
class S3Storage(Storage):
class S3Storage(Storage):
def setup(self) -> None:
self.s3 = boto3.client(
's3',
"s3",
region_name=self.region,
endpoint_url=self.endpoint_url.format(region=self.region),
aws_access_key_id=self.key,
aws_secret_access_key=self.secret
aws_secret_access_key=self.secret,
)
if self.random_no_duplicate:
logger.warning("random_no_duplicate is set to True, this will override `path_generator`, `filename_generator` and `folder`.")
logger.warning(
"random_no_duplicate is set to True, this will override `path_generator`, `filename_generator` and `folder`."
)
def get_cdn_url(self, media: Media) -> str:
return self.cdn_url.format(bucket=self.bucket, region=self.region, key=media.key)
def uploadf(self, file: IO[bytes], media: Media, **kwargs: dict) -> None:
if not self.is_upload_needed(media): return True
if not self.is_upload_needed(media):
return True
extra_args = kwargs.get("extra_args", {})
if not self.private and 'ACL' not in extra_args:
extra_args['ACL'] = 'public-read'
if not self.private and "ACL" not in extra_args:
extra_args["ACL"] = "public-read"
if 'ContentType' not in extra_args:
if "ContentType" not in extra_args:
try:
if media.mimetype:
extra_args['ContentType'] = media.mimetype
extra_args["ContentType"] = media.mimetype
except Exception as e:
logger.warning(f"Unable to get mimetype for {media.key=}, error: {e}")
self.s3.upload_fileobj(file, Bucket=self.bucket, Key=media.key, ExtraArgs=extra_args)
@ -61,9 +63,9 @@ class S3Storage(Storage):
def file_in_folder(self, path: str) -> str:
# checks if path exists and is not an empty folder
if not path.endswith('/'):
path = path + '/'
resp = self.s3.list_objects(Bucket=self.bucket, Prefix=path, Delimiter='/', MaxKeys=1)
if 'Contents' in resp:
return resp['Contents'][0]['Key']
if not path.endswith("/"):
path = path + "/"
resp = self.s3.list_objects(Bucket=self.bucket, Prefix=path, Delimiter="/", MaxKeys=1)
if "Contents" in resp:
return resp["Contents"][0]["Key"]
return False

Wyświetl plik

@ -6,25 +6,28 @@
"python": ["loguru", "selenium"],
},
"configs": {
"width": {"default": 1280,
"width": {"default": 1280, "type": "int", "help": "width of the screenshots"},
"height": {"default": 1024, "type": "int", "help": "height of the screenshots"},
"timeout": {"default": 60, "type": "int", "help": "timeout for taking the screenshot"},
"sleep_before_screenshot": {
"default": 4,
"type": "int",
"help": "width of the screenshots"},
"height": {"default": 1024,
"type": "int",
"help": "height of the screenshots"},
"timeout": {"default": 60,
"type": "int",
"help": "timeout for taking the screenshot"},
"sleep_before_screenshot": {"default": 4,
"type": "int",
"help": "seconds to wait for the pages to load before taking screenshot"},
"http_proxy": {"default": "", "help": "http proxy to use for the webdriver, eg http://proxy-user:password@proxy-ip:port"},
"save_to_pdf": {"default": False,
"help": "seconds to wait for the pages to load before taking screenshot",
},
"http_proxy": {
"default": "",
"help": "http proxy to use for the webdriver, eg http://proxy-user:password@proxy-ip:port",
},
"save_to_pdf": {
"default": False,
"type": "bool",
"help": "save the page as pdf along with the screenshot. PDF saving options can be adjusted with the 'print_options' parameter"},
"print_options": {"default": {},
"help": "save the page as pdf along with the screenshot. PDF saving options can be adjusted with the 'print_options' parameter",
},
"print_options": {
"default": {},
"help": "options to pass to the pdf printer, in JSON format. See https://www.selenium.dev/documentation/webdriver/interactions/print_page/ for more information",
"type": "json_loader"},
"type": "json_loader",
},
},
"description": """
Captures screenshots and optionally saves web pages as PDFs using a WebDriver.
@ -37,5 +40,5 @@
### Notes
- Requires a WebDriver (e.g., ChromeDriver) installed and accessible via the system's PATH.
"""
""",
}

Wyświetl plik

@ -1,5 +1,6 @@
from loguru import logger
import time, os
import time
import os
import base64
from selenium.common.exceptions import TimeoutException
@ -9,8 +10,8 @@ from auto_archiver.core import Enricher
from auto_archiver.utils import Webdriver, url as UrlUtil, random_str
from auto_archiver.core import Media, Metadata
class ScreenshotEnricher(Enricher):
class ScreenshotEnricher(Enricher):
def __init__(self, webdriver_factory=None):
super().__init__()
self.webdriver_factory = webdriver_factory or Webdriver
@ -25,8 +26,14 @@ class ScreenshotEnricher(Enricher):
logger.debug(f"Enriching screenshot for {url=}")
auth = self.auth_for_site(url)
with self.webdriver_factory(
self.width, self.height, self.timeout, facebook_accept_cookies='facebook.com' in url,
http_proxy=self.http_proxy, print_options=self.print_options, auth=auth) as driver:
self.width,
self.height,
self.timeout,
facebook_accept_cookies="facebook.com" in url,
http_proxy=self.http_proxy,
print_options=self.print_options,
auth=auth,
) as driver:
try:
driver.get(url)
time.sleep(int(self.sleep_before_screenshot))
@ -43,4 +50,3 @@ class ScreenshotEnricher(Enricher):
logger.info("TimeoutException loading page for screenshot")
except Exception as e:
logger.error(f"Got error while loading webdriver for screenshot enricher: {e}")

Wyświetl plik

@ -5,11 +5,13 @@
"dependencies": {
"python": ["loguru", "slugify"],
},
'entry_point': 'ssl_enricher::SSLEnricher',
"entry_point": "ssl_enricher::SSLEnricher",
"configs": {
"skip_when_nothing_archived": {"default": True,
"type": 'bool',
"help": "if true, will skip enriching when no media is archived"},
"skip_when_nothing_archived": {
"default": True,
"type": "bool",
"help": "if true, will skip enriching when no media is archived",
},
},
"description": """
Retrieves SSL certificate information for a domain and stores it as a file.
@ -21,5 +23,5 @@
### Notes
- Requires the target URL to use the HTTPS scheme; other schemes are not supported.
"""
""",
}

Wyświetl plik

@ -1,4 +1,5 @@
import ssl, os
import ssl
import os
from slugify import slugify
from urllib.parse import urlparse
from loguru import logger
@ -13,7 +14,8 @@ class SSLEnricher(Enricher):
"""
def enrich(self, to_enrich: Metadata) -> None:
if not to_enrich.media and self.skip_when_nothing_archived: return
if not to_enrich.media and self.skip_when_nothing_archived:
return
url = to_enrich.get_url()
parsed = urlparse(url)
@ -24,5 +26,6 @@ class SSLEnricher(Enricher):
cert = ssl.get_server_certificate((domain, 443))
cert_fn = os.path.join(self.tmp_dir, f"{slugify(domain)}.pem")
with open(cert_fn, "w") as f: f.write(cert)
with open(cert_fn, "w") as f:
f.write(cert)
to_enrich.add_media(Media(filename=cert_fn), id="ssl_certificate")

Wyświetl plik

@ -1,4 +1,6 @@
import requests, re, html
import requests
import re
import html
from bs4 import BeautifulSoup
from loguru import logger
@ -15,11 +17,11 @@ class TelegramExtractor(Extractor):
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
# detect URLs that we definitely cannot handle
if 't.me' != item.netloc:
if "t.me" != item.netloc:
return False
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}
# TODO: check if we can do this more resilient to variable URLs
@ -27,11 +29,11 @@ class TelegramExtractor(Extractor):
url += "?embed=1"
t = requests.get(url, headers=headers)
s = BeautifulSoup(t.content, 'html.parser')
s = BeautifulSoup(t.content, "html.parser")
result = Metadata()
result.set_content(html.escape(str(t.content)))
if (timestamp := (s.find_all('time') or [{}])[0].get('datetime')):
if timestamp := (s.find_all("time") or [{}])[0].get("datetime"):
result.set_timestamp(timestamp)
video = s.find("video")
@ -41,25 +43,26 @@ class TelegramExtractor(Extractor):
image_urls = []
for im in image_tags:
urls = [u.replace("'", "") for u in re.findall(r'url\((.*?)\)', im['style'])]
urls = [u.replace("'", "") for u in re.findall(r"url\((.*?)\)", im["style"])]
image_urls += urls
if not len(image_urls): return False
if not len(image_urls):
return False
for img_url in image_urls:
result.add_media(Media(self.download_from_url(img_url)))
else:
video_url = video.get('src')
video_url = video.get("src")
m_video = Media(self.download_from_url(video_url))
# extract duration from HTML
try:
duration = s.find_all('time')[0].contents[0]
if ':' in duration:
duration = float(duration.split(
':')[0]) * 60 + float(duration.split(':')[1])
duration = s.find_all("time")[0].contents[0]
if ":" in duration:
duration = float(duration.split(":")[0]) * 60 + float(duration.split(":")[1])
else:
duration = float(duration)
m_video.set("duration", duration)
except: pass
except Exception:
pass
result.add_media(m_video)
return result.success("telegram")

Wyświetl plik

@ -3,25 +3,34 @@
"type": ["extractor"],
"requires_setup": True,
"dependencies": {
"python": ["telethon",
"python": [
"telethon",
"loguru",
"tqdm",
],
"bin": [""]
"bin": [""],
},
"configs": {
"api_id": {"default": None, "help": "telegram API_ID value, go to https://my.telegram.org/apps"},
"api_hash": {"default": None, "help": "telegram API_HASH value, go to https://my.telegram.org/apps"},
"bot_token": {"default": None, "help": "optional, but allows access to more content such as large videos, talk to @botfather"},
"session_file": {"default": "secrets/anon", "help": "optional, records the telegram login session for future usage, '.session' will be appended to the provided value."},
"join_channels": {"default": True,
"bot_token": {
"default": None,
"help": "optional, but allows access to more content such as large videos, talk to @botfather",
},
"session_file": {
"default": "secrets/anon",
"help": "optional, records the telegram login session for future usage, '.session' will be appended to the provided value.",
},
"join_channels": {
"default": True,
"type": "bool",
"help": "disables the initial setup with channel_invites config, useful if you have a lot and get stuck"},
"help": "disables the initial setup with channel_invites config, useful if you have a lot and get stuck",
},
"channel_invites": {
"default": {},
"help": "(JSON string) private channel invite links (format: t.me/joinchat/HASH OR t.me/+HASH) and (optional but important to avoid hanging for minutes on startup) channel id (format: CHANNEL_ID taken from a post url like https://t.me/c/CHANNEL_ID/1), the telegram account will join any new channels on setup",
"type": "json_loader",
}
},
},
"description": """
The `TelethonExtractor` uses the Telethon library to archive posts and media from Telegram channels and groups.
@ -46,5 +55,5 @@ To use the `TelethonExtractor`, you must configure the following:
The first time you run, you will be prompted to do a authentication with the phone number associated, alternatively you can put your `anon.session` in the root.
"""
""",
}

Wyświetl plik

@ -1,12 +1,18 @@
import shutil
from telethon.sync import TelegramClient
from telethon.errors import ChannelInvalidError
from telethon.tl.functions.messages import ImportChatInviteRequest
from telethon.errors.rpcerrorlist import UserAlreadyParticipantError, FloodWaitError, InviteRequestSentError, InviteHashExpiredError
from telethon.errors.rpcerrorlist import (
UserAlreadyParticipantError,
FloodWaitError,
InviteRequestSentError,
InviteHashExpiredError,
)
from loguru import logger
from tqdm import tqdm
import re, time, os
import re
import time
import os
from auto_archiver.core import Extractor
from auto_archiver.core import Metadata, Media
@ -17,9 +23,7 @@ class TelethonExtractor(Extractor):
valid_url = re.compile(r"https:\/\/t\.me(\/c){0,1}\/(.+)\/(\d+)")
invite_pattern = re.compile(r"t.me(\/joinchat){0,1}\/\+?(.+)")
def setup(self) -> None:
"""
1. makes a copy of session_file that is removed in cleanup
2. trigger login process for telegram or proceed if already saved in a session file
@ -52,18 +56,20 @@ class TelethonExtractor(Extractor):
channel_invite = self.channel_invites[i]
channel_id = channel_invite.get("id", False)
invite = channel_invite["invite"]
if (match := self.invite_pattern.search(invite)):
if match := self.invite_pattern.search(invite):
try:
if channel_id:
ent = self.client.get_entity(int(channel_id)) # fails if not a member
else:
ent = self.client.get_entity(invite) # fails if not a member
logger.warning(f"please add the property id='{ent.id}' to the 'channel_invites' configuration where {invite=}, not doing so can lead to a minutes-long setup time due to telegram's rate limiting.")
except ValueError as e:
logger.warning(
f"please add the property id='{ent.id}' to the 'channel_invites' configuration where {invite=}, not doing so can lead to a minutes-long setup time due to telegram's rate limiting."
)
except ValueError:
logger.info(f"joining new channel {invite=}")
try:
self.client(ImportChatInviteRequest(match.group(2)))
except UserAlreadyParticipantError as e:
except UserAlreadyParticipantError:
logger.info(f"already joined {invite=}")
except InviteRequestSentError:
logger.warning(f"already sent a join request with {invite} still no answer")
@ -95,7 +101,8 @@ class TelethonExtractor(Extractor):
# detect URLs that we definitely cannot handle
match = self.valid_url.search(url)
logger.debug(f"TELETHON: {match=}")
if not match: return False
if not match:
return False
is_private = match.group(1) == "/c"
chat = int(match.group(2)) if is_private else match.group(2)
@ -112,32 +119,40 @@ class TelethonExtractor(Extractor):
logger.error(f"Could not fetch telegram {url} possibly it's private: {e}")
return False
except ChannelInvalidError as e:
logger.error(f"Could not fetch telegram {url}. This error may be fixed if you setup a bot_token in addition to api_id and api_hash (but then private channels will not be archived, we need to update this logic to handle both): {e}")
logger.error(
f"Could not fetch telegram {url}. This error may be fixed if you setup a bot_token in addition to api_id and api_hash (but then private channels will not be archived, we need to update this logic to handle both): {e}"
)
return False
logger.debug(f"TELETHON GOT POST {post=}")
if post is None: return False
if post is None:
return False
media_posts = self._get_media_posts_in_group(chat, post)
logger.debug(f'got {len(media_posts)=} for {url=}')
logger.debug(f"got {len(media_posts)=} for {url=}")
tmp_dir = self.tmp_dir
group_id = post.grouped_id if post.grouped_id is not None else post.id
title = post.message
for mp in media_posts:
if len(mp.message) > len(title): title = mp.message # save the longest text found (usually only 1)
if len(mp.message) > len(title):
title = mp.message # save the longest text found (usually only 1)
# media can also be in entities
if mp.entities:
other_media_urls = [e.url for e in mp.entities if hasattr(e, "url") and e.url and self._guess_file_type(e.url) in ["video", "image", "audio"]]
other_media_urls = [
e.url
for e in mp.entities
if hasattr(e, "url") and e.url and self._guess_file_type(e.url) in ["video", "image", "audio"]
]
if len(other_media_urls):
logger.debug(f"Got {len(other_media_urls)} other media urls from {mp.id=}: {other_media_urls}")
for i, om_url in enumerate(other_media_urls):
filename = self.download_from_url(om_url, f'{chat}_{group_id}_{i}')
filename = self.download_from_url(om_url, f"{chat}_{group_id}_{i}")
result.add_media(Media(filename=filename), id=f"{group_id}_{i}")
filename_dest = os.path.join(tmp_dir, f'{chat}_{group_id}', str(mp.id))
filename_dest = os.path.join(tmp_dir, f"{chat}_{group_id}", str(mp.id))
filename = self.client.download_media(mp.media, filename_dest)
if not filename:
logger.debug(f"Empty media found, skipping {str(mp)=}")

Wyświetl plik

@ -2,17 +2,18 @@
"name": "Thumbnail Enricher",
"type": ["enricher"],
"requires_setup": False,
"dependencies": {
"python": ["loguru", "ffmpeg"],
"bin": ["ffmpeg"]
},
"dependencies": {"python": ["loguru", "ffmpeg"], "bin": ["ffmpeg"]},
"configs": {
"thumbnails_per_minute": {"default": 60,
"thumbnails_per_minute": {
"default": 60,
"type": "int",
"help": "how many thumbnails to generate per minute of video, can be limited by max_thumbnails"},
"max_thumbnails": {"default": 16,
"help": "how many thumbnails to generate per minute of video, can be limited by max_thumbnails",
},
"max_thumbnails": {
"default": 16,
"type": "int",
"help": "limit the number of thumbnails to generate per video, 0 means no limit"},
"help": "limit the number of thumbnails to generate per video, 0 means no limit",
},
},
"description": """
Generates thumbnails for video files to provide visual previews.
@ -27,5 +28,5 @@
- Requires `ffmpeg` to be installed and accessible via the system's PATH.
- Handles videos without pre-existing duration metadata by probing with `ffmpeg`.
- Skips enrichment for non-video media files.
"""
""",
}

Wyświetl plik

@ -6,7 +6,9 @@ visual snapshots of the video's keyframes, helping users preview content
and identify important moments without watching the entire video.
"""
import ffmpeg, os
import ffmpeg
import os
from loguru import logger
from auto_archiver.core import Enricher
@ -36,7 +38,9 @@ class ThumbnailEnricher(Enricher):
if duration is None:
try:
probe = ffmpeg.probe(m.filename)
duration = float(next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')['duration'])
duration = float(
next(stream for stream in probe["streams"] if stream["codec_type"] == "video")["duration"]
)
to_enrich.media[m_id].set("duration", duration)
except Exception as e:
logger.error(f"error getting duration of video {m.filename}: {e}")
@ -48,11 +52,13 @@ class ThumbnailEnricher(Enricher):
thumbnails_media = []
for index, timestamp in enumerate(timestamps):
output_path = os.path.join(folder, f"out{index}.jpg")
ffmpeg.input(m.filename, ss=timestamp).filter('scale', 512, -1).output(output_path, vframes=1, loglevel="quiet").run()
ffmpeg.input(m.filename, ss=timestamp).filter("scale", 512, -1).output(
output_path, vframes=1, loglevel="quiet"
).run()
try:
thumbnails_media.append(Media(
filename=output_path)
thumbnails_media.append(
Media(filename=output_path)
.set("id", f"thumbnail_{index}")
.set("timestamp", "%.3fs" % timestamp)
)

Wyświetl plik

@ -1 +0,0 @@
from .tiktok_tikwm_extractor import TiktokTikwmExtractor

Wyświetl plik

@ -1,23 +0,0 @@
{
"name": "Tiktok Tikwm Extractor",
"type": ["extractor"],
"requires_setup": False,
"dependencies": {
"python": ["loguru", "requests"],
"bin": []
},
"description": """
Uses an unofficial TikTok video download platform's API to download videos: https://tikwm.com/
This extractor complements the generic_extractor which can already get TikTok videos, but this one can extract special videos like those marked as sensitive.
### Features
- Downloads the video and, if possible, also the video cover.
- Stores extra metadata about the post like author information, and more as returned by tikwm.com.
### Notes
- If tikwm.com is down, this extractor will not work.
- If tikwm.com changes their API, this extractor may break.
- If no video is found, this extractor will consider the extraction failed.
"""
}

Wyświetl plik

@ -1,75 +0,0 @@
import re
import requests
from loguru import logger
from datetime import datetime, timezone
from yt_dlp.extractor.tiktok import TikTokIE
from auto_archiver.core import Extractor
from auto_archiver.core import Metadata, Media
class TiktokTikwmExtractor(Extractor):
"""
Extractor for TikTok that uses an unofficial API and can capture content that requires a login, like sensitive content.
"""
TIKWM_ENDPOINT = "https://www.tikwm.com/api/?url={url}"
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
if not re.match(TikTokIE._VALID_URL, url):
return False
endpoint = TiktokTikwmExtractor.TIKWM_ENDPOINT.format(url=url)
r = requests.get(endpoint)
if r.status_code != 200:
logger.error(f"unexpected status code '{r.status_code}' from tikwm.com for {url=}:")
return False
try:
json_response = r.json()
except ValueError:
logger.error(f"failed to parse JSON response from tikwm.com for {url=}")
return False
if not json_response.get('msg') == 'success' or not (api_data := json_response.get('data', {})):
logger.error(f"failed to get a valid response from tikwm.com for {url=}: {json_response}")
return False
# tries to get the non-watermarked version first
video_url = api_data.pop("play", api_data.pop("wmplay", None))
if not video_url:
logger.error(f"no valid video URL found in response from tikwm.com for {url=}")
return False
# prepare result, start by downloading video
result = Metadata()
# get the cover if possible
cover_url = api_data.pop("origin_cover", api_data.pop("cover", api_data.pop("ai_dynamic_cover", None)))
if cover_url and (cover_downloaded := self.download_from_url(cover_url)):
result.add_media(Media(cover_downloaded))
# get the video or fail
video_downloaded = self.download_from_url(video_url, f"vid_{api_data.get('id', '')}")
if not video_downloaded:
logger.error(f"failed to download video from {video_url}")
return False
video_media = Media(video_downloaded)
if duration := api_data.pop("duration", None):
video_media.set("duration", duration)
result.add_media(video_media)
# add remaining metadata
result.set_title(api_data.pop("title", ""))
if created_at := api_data.pop("create_time", None):
result.set_timestamp(datetime.fromtimestamp(created_at, tz=timezone.utc))
if (author := api_data.pop("author", None)):
result.set("author", author)
result.set("api_data", api_data)
return result.success("tikwm")

Wyświetl plik

@ -3,14 +3,7 @@
"type": ["enricher"],
"requires_setup": True,
"dependencies": {
"python": [
"loguru",
"slugify",
"tsp_client",
"asn1crypto",
"certvalidator",
"certifi"
],
"python": ["loguru", "slugify", "tsp_client", "asn1crypto", "certvalidator", "certifi"],
},
"configs": {
"tsa_urls": {
@ -20,10 +13,8 @@
"http://timestamp.identrust.com",
# "https://timestamp.entrust.net/TSS/RFC3161sha2TS", # not valid for timestamping
# "https://timestamp.sectigo.com", # wait 15 seconds between each request.
# [Adobe: European Union Trusted Lists].
# "https://timestamp.sectigo.com/qualified", # wait 15 seconds between each request.
# [Windows Cert Store]
"http://timestamp.globalsign.com/tsa/r6advanced1",
# [Adobe: European Union Trusted Lists] and [Windows Cert Store]
@ -50,5 +41,5 @@
### Notes
- Should be run after the `hash_enricher` to ensure file hashes are available.
- Requires internet access to interact with the configured TSAs.
"""
""",
}

Wyświetl plik

@ -11,6 +11,7 @@ import certifi
from auto_archiver.core import Enricher
from auto_archiver.core import Metadata, Media
class TimestampingEnricher(Enricher):
"""
Uses several RFC3161 Time Stamp Authorities to generate a timestamp token that will be preserved. This can be used to prove that a certain file existed at a certain time, useful for legal purposes, for example, to prove that a certain file was not tampered with after a certain date.
@ -25,7 +26,9 @@ class TimestampingEnricher(Enricher):
logger.debug(f"RFC3161 timestamping existing files for {url=}")
# create a new text file with the existing media hashes
hashes = [m.get("hash").replace("SHA-256:", "").replace("SHA3-512:", "") for m in to_enrich.media if m.get("hash")]
hashes = [
m.get("hash").replace("SHA-256:", "").replace("SHA3-512:", "") for m in to_enrich.media if m.get("hash")
]
if not len(hashes):
logger.warning(f"No hashes found in {url=}")
@ -41,11 +44,12 @@ class TimestampingEnricher(Enricher):
timestamp_tokens = []
from slugify import slugify
for tsa_url in self.tsa_urls:
try:
signing_settings = SigningSettings(tsp_server=tsa_url, digest_algorithm=DigestAlgorithm.SHA256)
signer = TSPSigner()
message = bytes(data_to_sign, encoding='utf8')
message = bytes(data_to_sign, encoding="utf8")
# send TSQ and get TSR from the TSA server
signed = signer.sign(message=message, signing_settings=signing_settings)
# fail if there's any issue with the certificates, uses certifi list of trusted CAs
@ -54,7 +58,8 @@ class TimestampingEnricher(Enricher):
cert_chain = self.download_and_verify_certificate(signed)
# continue with saving the timestamp token
tst_fn = os.path.join(tmp_dir, f"timestamp_token_{slugify(tsa_url)}")
with open(tst_fn, "wb") as f: f.write(signed)
with open(tst_fn, "wb") as f:
f.write(signed)
timestamp_tokens.append(Media(filename=tst_fn).set("tsa", tsa_url).set("cert_chain", cert_chain))
except Exception as e:
logger.warning(f"Error while timestamping {url=} with {tsa_url=}: {e}")
@ -75,7 +80,7 @@ class TimestampingEnricher(Enricher):
tst = ContentInfo.load(signed)
trust_roots = []
with open(certifi.where(), 'rb') as f:
with open(certifi.where(), "rb") as f:
for _, _, der_bytes in pem.unarmor(f.read(), multiple=True):
trust_roots.append(der_bytes)
context = ValidationContext(trust_roots=trust_roots)
@ -87,7 +92,7 @@ class TimestampingEnricher(Enricher):
intermediate_certs.append(certificates[i].dump())
validator = CertificateValidator(first_cert, intermediate_certs=intermediate_certs, validation_context=context)
path = validator.validate_usage({'digital_signature'}, extended_key_usage={'time_stamping'})
path = validator.validate_usage({"digital_signature"}, extended_key_usage={"time_stamping"})
cert_chain = []
for cert in path:

Wyświetl plik

@ -3,15 +3,22 @@
"type": ["extractor"],
"requires_setup": True,
"dependencies": {
"python": ["requests",
"python": [
"requests",
"loguru",
"pytwitter",
"slugify",],
"bin": [""]
"slugify",
],
"bin": [""],
},
"configs": {
"bearer_token": {"default": None, "help": "[deprecated: see bearer_tokens] twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret"},
"bearer_tokens": {"default": [], "help": " a list of twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret, if provided you can still add those for better rate limits. CSV of bearer tokens if provided via the command line",
"bearer_token": {
"default": None,
"help": "[deprecated: see bearer_tokens] twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret",
},
"bearer_tokens": {
"default": [],
"help": " a list of twitter API bearer_token which is enough for archiving, if not provided you will need consumer_key, consumer_secret, access_token, access_secret, if provided you can still add those for better rate limits. CSV of bearer tokens if provided via the command line",
},
"consumer_key": {"default": None, "help": "twitter API consumer_key"},
"consumer_secret": {"default": None, "help": "twitter API consumer_secret"},
@ -39,6 +46,5 @@
- **Access Token and Secret**: Complements the consumer key for enhanced API capabilities.
Credentials can be obtained by creating a Twitter developer account at [Twitter Developer Platform](https://developer.twitter.com/en).
"""
,
""",
}

Wyświetl plik

@ -11,8 +11,8 @@ from slugify import slugify
from auto_archiver.core import Extractor
from auto_archiver.core import Metadata, Media
class TwitterApiExtractor(Extractor):
class TwitterApiExtractor(Extractor):
valid_url: re.Pattern = re.compile(r"(?:twitter|x).com\/(?:\#!\/)?(\w+)\/status(?:es)?\/(\d+)")
def setup(self) -> None:
@ -23,9 +23,17 @@ class TwitterApiExtractor(Extractor):
if self.bearer_token:
self.apis.append(Api(bearer_token=self.bearer_token))
if self.consumer_key and self.consumer_secret and self.access_token and self.access_secret:
self.apis.append(Api(consumer_key=self.consumer_key, consumer_secret=self.consumer_secret,
access_token=self.access_token, access_secret=self.access_secret))
assert self.api_client is not None, "Missing Twitter API configurations, please provide either AND/OR (consumer_key, consumer_secret, access_token, access_secret) to use this archiver, you can provide both for better rate-limit results."
self.apis.append(
Api(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
access_token=self.access_token,
access_secret=self.access_secret,
)
)
assert self.api_client is not None, (
"Missing Twitter API configurations, please provide either AND/OR (consumer_key, consumer_secret, access_token, access_secret) to use this archiver, you can provide both for better rate-limit results."
)
@property # getter .mimetype
def api_client(self) -> str:
@ -33,20 +41,20 @@ class TwitterApiExtractor(Extractor):
def sanitize_url(self, url: str) -> str:
# expand URL if t.co and clean tracker GET params
if 'https://t.co/' in url:
if "https://t.co/" in url:
try:
r = requests.get(url, timeout=30)
logger.debug(f'Expanded url {url} to {r.url}')
logger.debug(f"Expanded url {url} to {r.url}")
url = r.url
except:
logger.error(f'Failed to expand url {url}')
except Exception:
logger.error(f"Failed to expand url {url}")
return url
def download(self, item: Metadata) -> Metadata:
# call download retry until success or no more apis
while self.api_index < len(self.apis):
if res := self.download_retry(item): return res
if res := self.download_retry(item):
return res
self.api_index += 1
self.api_index = 0
return False
@ -54,7 +62,8 @@ class TwitterApiExtractor(Extractor):
def get_username_tweet_id(self, url):
# detect URLs that we definitely cannot handle
matches = self.valid_url.findall(url)
if not len(matches): return False, False
if not len(matches):
return False, False
username, tweet_id = matches[0] # only one URL supported
logger.debug(f"Found {username=} and {tweet_id=} in {url=}")
@ -65,10 +74,16 @@ class TwitterApiExtractor(Extractor):
url = item.get_url()
# detect URLs that we definitely cannot handle
username, tweet_id = self.get_username_tweet_id(url)
if not username: return False
if not username:
return False
try:
tweet = self.api_client.get_tweet(tweet_id, expansions=["attachments.media_keys"], media_fields=["type", "duration_ms", "url", "variants"], tweet_fields=["attachments", "author_id", "created_at", "entities", "id", "text", "possibly_sensitive"])
tweet = self.api_client.get_tweet(
tweet_id,
expansions=["attachments.media_keys"],
media_fields=["type", "duration_ms", "url", "variants"],
tweet_fields=["attachments", "author_id", "created_at", "entities", "id", "text", "possibly_sensitive"],
)
logger.debug(tweet)
except Exception as e:
logger.error(f"Could not get tweet: {e}")
@ -88,29 +103,35 @@ class TwitterApiExtractor(Extractor):
mimetype = "image/jpeg"
elif hasattr(m, "variants"):
variant = self.choose_variant(m.variants)
if not variant: continue
if not variant:
continue
media.set("src", variant.url)
mimetype = variant.content_type
else:
continue
logger.info(f"Found media {media}")
ext = mimetypes.guess_extension(mimetype)
media.filename = self.download_from_url(media.get("src"), f'{slugify(url)}_{i}{ext}')
media.filename = self.download_from_url(media.get("src"), f"{slugify(url)}_{i}{ext}")
result.add_media(media)
result.set_content(json.dumps({
result.set_content(
json.dumps(
{
"id": tweet.data.id,
"text": tweet.data.text,
"created_at": tweet.data.created_at,
"author_id": tweet.data.author_id,
"geo": tweet.data.geo,
"lang": tweet.data.lang,
"media": urls
}, ensure_ascii=False, indent=4))
"media": urls,
},
ensure_ascii=False,
indent=4,
)
)
return result.success("twitter-api")
def choose_variant(self, variants):
"""
Chooses the highest quality variable possible out of a list of variants
"""

Wyświetl plik

@ -7,10 +7,8 @@
"python": ["loguru", "vk_url_scraper"],
},
"configs": {
"username": {"required": True,
"help": "valid VKontakte username"},
"password": {"required": True,
"help": "valid VKontakte password"},
"username": {"required": True, "help": "valid VKontakte username"},
"password": {"required": True, "help": "valid VKontakte password"},
"session_file": {
"default": "secrets/vk_config.v2.json",
"help": "valid VKontakte password",

Wyświetl plik

@ -18,11 +18,13 @@ class VkExtractor(Extractor):
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
if "vk.com" not in item.netloc: return False
if "vk.com" not in item.netloc:
return False
# some urls can contain multiple wall/photo/... parts and all will be fetched
vk_scrapes = self.vks.scrape(url)
if not len(vk_scrapes): return False
if not len(vk_scrapes):
return False
logger.debug(f"VK: got {len(vk_scrapes)} scraped instances")
result = Metadata()

Wyświetl plik

@ -4,32 +4,36 @@
"entry_point": "wacz_extractor_enricher::WaczExtractorEnricher",
"requires_setup": True,
"dependencies": {
"python": [
"loguru",
"jsonlines",
"warcio"
],
"python": ["loguru", "jsonlines", "warcio"],
# TODO?
"bin": [
"docker"
]
"bin": ["docker"],
},
"configs": {
"profile": {"default": None, "help": "browsertrix-profile (for profile generation see https://github.com/webrecorder/browsertrix-crawler#creating-and-using-browser-profiles)."},
"profile": {
"default": None,
"help": "browsertrix-profile (for profile generation see https://github.com/webrecorder/browsertrix-crawler#creating-and-using-browser-profiles).",
},
"docker_commands": {"default": None, "help": "if a custom docker invocation is needed"},
"timeout": {"default": 120,
"timeout": {"default": 120, "help": "timeout for WACZ generation in seconds", "type": "int"},
"extract_media": {
"default": False,
"type": "bool",
"help": "If enabled all the images/videos/audio present in the WACZ archive will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched.",
},
"extract_screenshot": {
"default": True,
"type": "bool",
"help": "If enabled the screenshot captured by browsertrix will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched.",
},
"socks_proxy_host": {
"default": None,
"help": "SOCKS proxy host for browsertrix-crawler, use in combination with socks_proxy_port. eg: user:password@host",
},
"socks_proxy_port": {
"default": None,
"type": "int",
"help": "timeout for WACZ generation in seconds", "type": "int"},
"extract_media": {"default": False,
"type": 'bool',
"help": "If enabled all the images/videos/audio present in the WACZ archive will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."
"help": "SOCKS proxy port for browsertrix-crawler, use in combination with socks_proxy_host. eg 1234",
},
"extract_screenshot": {"default": True,
"type": 'bool',
"help": "If enabled the screenshot captured by browsertrix will be extracted into separate Media and appear in the html report. The .wacz file will be kept untouched."
},
"socks_proxy_host": {"default": None, "help": "SOCKS proxy host for browsertrix-crawler, use in combination with socks_proxy_port. eg: user:password@host"},
"socks_proxy_port": {"default": None, "type":"int", "help": "SOCKS proxy port for browsertrix-crawler, use in combination with socks_proxy_host. eg 1234"},
"proxy_server": {"default": None, "help": "SOCKS server proxy URL, in development"},
},
"description": """
@ -45,5 +49,5 @@
### Notes
- Requires Docker for running `browsertrix-crawler` .
- Configurable via parameters for timeout, media extraction, screenshots, and proxy settings.
"""
""",
}

Wyświetl plik

@ -1,6 +1,8 @@
import jsonlines
import mimetypes
import os, shutil, subprocess
import os
import shutil
import subprocess
from zipfile import ZipFile
from loguru import logger
from warcio.archiveiterator import ArchiveIterator
@ -19,13 +21,12 @@ class WaczExtractorEnricher(Enricher, Extractor):
"""
def setup(self) -> None:
self.use_docker = os.environ.get('WACZ_ENABLE_DOCKER') or not os.environ.get('RUNNING_IN_DOCKER')
self.docker_in_docker = os.environ.get('WACZ_ENABLE_DOCKER') and os.environ.get('RUNNING_IN_DOCKER')
self.use_docker = os.environ.get("WACZ_ENABLE_DOCKER") or not os.environ.get("RUNNING_IN_DOCKER")
self.docker_in_docker = os.environ.get("WACZ_ENABLE_DOCKER") and os.environ.get("RUNNING_IN_DOCKER")
self.cwd_dind = f"/crawls/crawls{random_str(8)}"
self.browsertrix_home_host = os.environ.get('BROWSERTRIX_HOME_HOST')
self.browsertrix_home_container = os.environ.get('BROWSERTRIX_HOME_CONTAINER') or self.browsertrix_home_host
self.browsertrix_home_host = os.environ.get("BROWSERTRIX_HOME_HOST")
self.browsertrix_home_container = os.environ.get("BROWSERTRIX_HOME_CONTAINER") or self.browsertrix_home_host
# create crawls folder if not exists, so it can be safely removed in cleanup
if self.docker_in_docker:
os.makedirs(self.cwd_dind, exist_ok=True)
@ -55,18 +56,29 @@ class WaczExtractorEnricher(Enricher, Extractor):
cmd = [
"crawl",
"--url", url,
"--scopeType", "page",
"--url",
url,
"--scopeType",
"page",
"--generateWACZ",
"--text", "to-pages",
"--screenshot", "fullPage",
"--collection", collection,
"--id", collection,
"--saveState", "never",
"--behaviors", "autoscroll,autoplay,autofetch,siteSpecific",
"--behaviorTimeout", str(self.timeout),
"--timeout", str(self.timeout),
"--diskUtilization", "99",
"--text",
"to-pages",
"--screenshot",
"fullPage",
"--collection",
collection,
"--id",
collection,
"--saveState",
"never",
"--behaviors",
"autoscroll,autoplay,autofetch,siteSpecific",
"--behaviorTimeout",
str(self.timeout),
"--timeout",
str(self.timeout),
"--diskUtilization",
"99",
# "--blockAds" # note: this has been known to cause issues on cloudflare protected sites
]
@ -80,7 +92,14 @@ class WaczExtractorEnricher(Enricher, Extractor):
if self.docker_commands:
cmd = self.docker_commands + cmd
else:
cmd = ["docker", "run", "--rm", "-v", f"{browsertrix_home_host}:/crawls/", "webrecorder/browsertrix-crawler"] + cmd
cmd = [
"docker",
"run",
"--rm",
"-v",
f"{browsertrix_home_host}:/crawls/",
"webrecorder/browsertrix-crawler",
] + cmd
if self.profile:
profile_fn = os.path.join(browsertrix_home_container, "profile.tar.gz")
@ -109,7 +128,6 @@ class WaczExtractorEnricher(Enricher, Extractor):
logger.error(f"WACZ generation failed: {e}")
return False
if self.docker_in_docker:
wacz_fn = os.path.join(self.cwd_dind, "collections", collection, f"{collection}.wacz")
elif self.use_docker:
@ -138,11 +156,10 @@ class WaczExtractorEnricher(Enricher, Extractor):
logger.info(f"Parsing pages.jsonl {jsonl_fn=}")
with jsonlines.open(jsonl_fn) as reader:
for obj in reader:
if 'title' in obj:
to_enrich.set_title(obj['title'])
if 'text' in obj:
to_enrich.set_content(obj['text'])
if "title" in obj:
to_enrich.set_title(obj["title"])
if "text" in obj:
to_enrich.set_content(obj["text"])
return True
@ -155,36 +172,41 @@ class WaczExtractorEnricher(Enricher, Extractor):
# unzipping the .wacz
tmp_dir = self.tmp_dir
unzipped_dir = os.path.join(tmp_dir, "unzipped")
with ZipFile(wacz_filename, 'r') as z_obj:
with ZipFile(wacz_filename, "r") as z_obj:
z_obj.extractall(path=unzipped_dir)
# if warc is split into multiple gzip chunks, merge those
warc_dir = os.path.join(unzipped_dir, "archive")
warc_filename = os.path.join(tmp_dir, "merged.warc")
with open(warc_filename, 'wb') as outfile:
with open(warc_filename, "wb") as outfile:
for filename in sorted(os.listdir(warc_dir)):
if filename.endswith('.gz'):
if filename.endswith(".gz"):
chunk_file = os.path.join(warc_dir, filename)
with open(chunk_file, 'rb') as infile:
with open(chunk_file, "rb") as infile:
shutil.copyfileobj(infile, outfile)
# get media out of .warc
counter = 0
seen_urls = set()
import json
with open(warc_filename, 'rb') as warc_stream:
with open(warc_filename, "rb") as warc_stream:
for record in ArchiveIterator(warc_stream):
# only include fetched resources
if record.rec_type == "resource" and record.content_type == "image/png" and self.extract_screenshot: # screenshots
if (
record.rec_type == "resource" and record.content_type == "image/png" and self.extract_screenshot
): # screenshots
fn = os.path.join(tmp_dir, f"warc-file-{counter}.png")
with open(fn, "wb") as outf: outf.write(record.raw_stream.read())
with open(fn, "wb") as outf:
outf.write(record.raw_stream.read())
m = Media(filename=fn)
to_enrich.add_media(m, "browsertrix-screenshot")
counter += 1
if not self.extract_media: continue
if not self.extract_media:
continue
if record.rec_type != 'response': continue
record_url = record.rec_headers.get_header('WARC-Target-URI')
if record.rec_type != "response":
continue
record_url = record.rec_headers.get_header("WARC-Target-URI")
if not UrlUtil.is_relevant_url(record_url):
logger.debug(f"Skipping irrelevant URL {record_url} but it's still present in the WACZ.")
continue
@ -194,8 +216,10 @@ class WaczExtractorEnricher(Enricher, Extractor):
# filter by media mimetypes
content_type = record.http_headers.get("Content-Type")
if not content_type: continue
if not any(x in content_type for x in ["video", "image", "audio"]): continue
if not content_type:
continue
if not any(x in content_type for x in ["video", "image", "audio"]):
continue
# create local file and add media
ext = mimetypes.guess_extension(content_type)
@ -203,7 +227,8 @@ class WaczExtractorEnricher(Enricher, Extractor):
fn = os.path.join(tmp_dir, warc_fn)
record_url_best_qual = UrlUtil.twitter_best_quality_url(record_url)
with open(fn, "wb") as outf: outf.write(record.raw_stream.read())
with open(fn, "wb") as outf:
outf.write(record.raw_stream.read())
m = Media(filename=fn)
m.set("src", record_url)
@ -213,10 +238,14 @@ class WaczExtractorEnricher(Enricher, Extractor):
m.filename = self.download_from_url(record_url_best_qual, warc_fn)
m.set("src", record_url_best_qual)
m.set("src_alternative", record_url)
except Exception as e: logger.warning(f"Unable to download best quality URL for {record_url=} got error {e}, using original in WARC.")
except Exception as e:
logger.warning(
f"Unable to download best quality URL for {record_url=} got error {e}, using original in WARC."
)
# remove bad videos
if m.is_video() and not m.is_valid_video(): continue
if m.is_video() and not m.is_valid_video():
continue
to_enrich.add_media(m, warc_fn)
counter += 1

Wyświetl plik

@ -1,11 +1,13 @@
import json
from loguru import logger
import time, requests
import time
import requests
from auto_archiver.core import Extractor, Enricher
from auto_archiver.utils import url as UrlUtil
from auto_archiver.core import Metadata
class WaybackExtractorEnricher(Enricher, Extractor):
"""
Submits the current URL to the webarchive and returns a job_id or completed archive.
@ -22,8 +24,10 @@ class WaybackExtractorEnricher(Enricher, Extractor):
def enrich(self, to_enrich: Metadata) -> bool:
proxies = {}
if self.proxy_http: proxies["http"] = self.proxy_http
if self.proxy_https: proxies["https"] = self.proxy_https
if self.proxy_http:
proxies["http"] = self.proxy_http
if self.proxy_https:
proxies["https"] = self.proxy_https
url = to_enrich.get_url()
if UrlUtil.is_auth_wall(url):
@ -36,15 +40,12 @@ class WaybackExtractorEnricher(Enricher, Extractor):
logger.info(f"Wayback enricher had already been executed: {to_enrich.get('wayback')}")
return True
ia_headers = {
"Accept": "application/json",
"Authorization": f"LOW {self.key}:{self.secret}"
}
post_data = {'url': url}
ia_headers = {"Accept": "application/json", "Authorization": f"LOW {self.key}:{self.secret}"}
post_data = {"url": url}
if self.if_not_archived_within:
post_data["if_not_archived_within"] = self.if_not_archived_within
# see https://docs.google.com/document/d/1Nsv52MvSjbLb2PCpHlat0gkzw0EvtSgpKHu4mk0MnrA for more options
r = requests.post('https://web.archive.org/save/', headers=ia_headers, data=post_data, proxies=proxies)
r = requests.post("https://web.archive.org/save/", headers=ia_headers, data=post_data, proxies=proxies)
if r.status_code != 200:
logger.error(em := f"Internet archive failed with status of {r.status_code}: {r.json()}")
@ -53,15 +54,14 @@ class WaybackExtractorEnricher(Enricher, Extractor):
# check job status
try:
job_id = r.json().get('job_id')
job_id = r.json().get("job_id")
if not job_id:
logger.error(f"Wayback failed with {r.json()}")
return False
except json.decoder.JSONDecodeError as e:
except json.decoder.JSONDecodeError:
logger.error(f"Expected a JSON with job_id from Wayback and got {r.text}")
return False
# waits at most timeout seconds until job is completed, otherwise only enriches the job_id information
start_time = time.time()
wayback_url = False
@ -69,17 +69,19 @@ class WaybackExtractorEnricher(Enricher, Extractor):
while not wayback_url and time.time() - start_time <= self.timeout:
try:
logger.debug(f"GETting status for {job_id=} on {url=} ({attempt=})")
r_status = requests.get(f'https://web.archive.org/save/status/{job_id}', headers=ia_headers, proxies=proxies)
r_status = requests.get(
f"https://web.archive.org/save/status/{job_id}", headers=ia_headers, proxies=proxies
)
r_json = r_status.json()
if r_status.status_code == 200 and r_json['status'] == 'success':
if r_status.status_code == 200 and r_json["status"] == "success":
wayback_url = f"https://web.archive.org/web/{r_json['timestamp']}/{r_json['original_url']}"
elif r_status.status_code != 200 or r_json['status'] != 'pending':
elif r_status.status_code != 200 or r_json["status"] != "pending":
logger.error(f"Wayback failed with {r_json}")
return False
except requests.exceptions.RequestException as e:
logger.warning(f"RequestException: fetching status for {url=} due to: {e}")
break
except json.decoder.JSONDecodeError as e:
except json.decoder.JSONDecodeError:
logger.error(f"Expected a JSON from Wayback and got {r.text} for {url=}")
break
except Exception as e:
@ -91,6 +93,8 @@ class WaybackExtractorEnricher(Enricher, Extractor):
if wayback_url:
to_enrich.set("wayback", wayback_url)
else:
to_enrich.set("wayback", {"job_id": job_id, "check_status": f'https://web.archive.org/save/status/{job_id}'})
to_enrich.set(
"wayback", {"job_id": job_id, "check_status": f"https://web.archive.org/save/status/{job_id}"}
)
to_enrich.set("check wayback", f"https://web.archive.org/web/*/{url}")
return True

Wyświetl plik

@ -6,19 +6,26 @@
"python": ["s3_storage", "loguru", "requests"],
},
"configs": {
"api_endpoint": {"required": True,
"help": "WhisperApi api endpoint, eg: https://whisperbox-api.com/api/v1, a deployment of https://github.com/bellingcat/whisperbox-transcribe."},
"api_key": {"required": True,
"help": "WhisperApi api key for authentication"},
"include_srt": {"default": False,
"api_endpoint": {
"required": True,
"help": "WhisperApi api endpoint, eg: https://whisperbox-api.com/api/v1, a deployment of https://github.com/bellingcat/whisperbox-transcribe.",
},
"api_key": {"required": True, "help": "WhisperApi api key for authentication"},
"include_srt": {
"default": False,
"type": "bool",
"help": "Whether to include a subtitle SRT (SubRip Subtitle file) for the video (can be used in video players)."},
"timeout": {"default": 90,
"help": "Whether to include a subtitle SRT (SubRip Subtitle file) for the video (can be used in video players).",
},
"timeout": {
"default": 90,
"type": "int",
"help": "How many seconds to wait at most for a successful job completion."},
"action": {"default": "translate",
"help": "How many seconds to wait at most for a successful job completion.",
},
"action": {
"default": "translate",
"help": "which Whisper operation to execute",
"choices": ["transcribe", "translate", "language_detection"]},
"choices": ["transcribe", "translate", "language_detection"],
},
},
"description": """
Integrates with a Whisper API service to transcribe, translate, or detect the language of audio and video files.
@ -35,5 +42,5 @@
- Only compatible with S3-compatible storage systems for media file accessibility.
- ** This stores the media files in S3 prior to enriching them as Whisper requires public URLs to access the media files.
- Handles multiple jobs and retries for failed or incomplete processing.
"""
""",
}

Wyświetl plik

@ -1,10 +1,12 @@
import traceback
import requests, time
import requests
import time
from loguru import logger
from auto_archiver.core import Enricher
from auto_archiver.core import Metadata, Media
class WhisperEnricher(Enricher):
"""
Connects with a Whisper API service to get texts out of audio
@ -13,15 +15,15 @@ class WhisperEnricher(Enricher):
"""
def setup(self) -> None:
self.stores = self.config['steps']['storages']
self.stores = self.config["steps"]["storages"]
self.s3 = self.module_factory.get_module("s3_storage", self.config)
if not "s3_storage" in self.stores:
logger.error("WhisperEnricher: To use the WhisperEnricher you need to use S3Storage so files are accessible publicly to the whisper service being called.")
if "s3_storage" not in self.stores:
logger.error(
"WhisperEnricher: To use the WhisperEnricher you need to use S3Storage so files are accessible publicly to the whisper service being called."
)
return
def enrich(self, to_enrich: Metadata) -> None:
url = to_enrich.get_url()
logger.debug(f"WHISPER[{self.action}]: iterating media items for {url=}.")
@ -36,20 +38,26 @@ class WhisperEnricher(Enricher):
logger.debug(f"JOB SUBMITTED: {job_id=} for {m.key=}")
to_enrich.media[i].set("whisper_model", {"job_id": job_id})
except Exception as e:
logger.error(f"Failed to submit whisper job for {m.filename=} with error {e}\n{traceback.format_exc()}")
logger.error(
f"Failed to submit whisper job for {m.filename=} with error {e}\n{traceback.format_exc()}"
)
job_results = self.check_jobs(job_results)
for i, m in enumerate(to_enrich.media):
if m.is_video() or m.is_audio():
job_id = to_enrich.media[i].get("whisper_model", {}).get("job_id")
if not job_id: continue
to_enrich.media[i].set("whisper_model", {
if not job_id:
continue
to_enrich.media[i].set(
"whisper_model",
{
"job_id": job_id,
"job_status_check": f"{self.api_endpoint}/jobs/{job_id}",
"job_artifacts_check": f"{self.api_endpoint}/jobs/{job_id}/artifacts",
**(job_results[job_id] if job_results[job_id] else {"result": "incomplete or failed job"})
})
**(job_results[job_id] if job_results[job_id] else {"result": "incomplete or failed job"}),
},
)
# append the extracted text to the content of the post so it gets written to the DBs like gsheets text column
if job_results[job_id]:
for k, v in job_results[job_id].items():
@ -57,7 +65,6 @@ class WhisperEnricher(Enricher):
to_enrich.set_content(f"\n[automatic video transcript]: {v}")
def submit_job(self, media: Media):
s3_url = self.s3.get_cdn_url(media)
assert s3_url in media.urls, f"Could not find S3 url ({s3_url}) in list of stored media urls "
payload = {
@ -66,10 +73,14 @@ class WhisperEnricher(Enricher):
# "language": "string" # may be a config
}
logger.debug(f"calling API with {payload=}")
response = requests.post(f'{self.api_endpoint}/jobs', json=payload, headers={'Authorization': f'Bearer {self.api_key}'})
assert response.status_code == 201, f"calling the whisper api {self.api_endpoint} returned a non-success code: {response.status_code}"
response = requests.post(
f"{self.api_endpoint}/jobs", json=payload, headers={"Authorization": f"Bearer {self.api_key}"}
)
assert response.status_code == 201, (
f"calling the whisper api {self.api_endpoint} returned a non-success code: {response.status_code}"
)
logger.debug(response.json())
return response.json()['id']
return response.json()["id"]
def check_jobs(self, job_results: dict):
start_time = time.time()
@ -77,24 +88,33 @@ class WhisperEnricher(Enricher):
while not all_completed and (time.time() - start_time) <= self.timeout:
all_completed = True
for job_id in job_results:
if job_results[job_id] != False: continue
if job_results[job_id] is not False:
continue
all_completed = False # at least one not ready
try: job_results[job_id] = self.check_job(job_id)
try:
job_results[job_id] = self.check_job(job_id)
except Exception as e:
logger.error(f"Failed to check {job_id=} with error {e}\n{traceback.format_exc()}")
if not all_completed: time.sleep(3)
if not all_completed:
time.sleep(3)
return job_results
def check_job(self, job_id):
r = requests.get(f'{self.api_endpoint}/jobs/{job_id}', headers={'Authorization': f'Bearer {self.api_key}'})
r = requests.get(f"{self.api_endpoint}/jobs/{job_id}", headers={"Authorization": f"Bearer {self.api_key}"})
assert r.status_code == 200, f"Job status did not respond with 200, instead with: {r.status_code}"
j = r.json()
logger.debug(f"Checked job {job_id=} with status='{j['status']}'")
if j['status'] == "processing": return False
elif j['status'] == "error": return f"Error: {j['meta']['error']}"
elif j['status'] == "success":
r_res = requests.get(f'{self.api_endpoint}/jobs/{job_id}/artifacts', headers={'Authorization': f'Bearer {self.api_key}'})
assert r_res.status_code == 200, f"Job artifacts did not respond with 200, instead with: {r_res.status_code}"
if j["status"] == "processing":
return False
elif j["status"] == "error":
return f"Error: {j['meta']['error']}"
elif j["status"] == "success":
r_res = requests.get(
f"{self.api_endpoint}/jobs/{job_id}/artifacts", headers={"Authorization": f"Bearer {self.api_key}"}
)
assert r_res.status_code == 200, (
f"Job artifacts did not respond with 200, instead with: {r_res.status_code}"
)
logger.success(r_res.json())
result = {}
for art_id, artifact in enumerate(r_res.json()):
@ -102,12 +122,16 @@ class WhisperEnricher(Enricher):
full_text = []
for i, d in enumerate(artifact.get("data")):
subtitle.append(f"{i + 1}\n{d.get('start')} --> {d.get('end')}\n{d.get('text').strip()}")
full_text.append(d.get('text').strip())
if not len(subtitle): continue
if self.include_srt: result[f"artifact_{art_id}_subtitle"] = "\n".join(subtitle)
full_text.append(d.get("text").strip())
if not len(subtitle):
continue
if self.include_srt:
result[f"artifact_{art_id}_subtitle"] = "\n".join(subtitle)
result[f"artifact_{art_id}_text"] = "\n".join(full_text)
# call /delete endpoint on timely success
r_del = requests.delete(f'{self.api_endpoint}/jobs/{job_id}', headers={'Authorization': f'Bearer {self.api_key}'})
r_del = requests.delete(
f"{self.api_endpoint}/jobs/{job_id}", headers={"Authorization": f"Bearer {self.api_key}"}
)
logger.debug(f"DELETE whisper {job_id=} result: {r_del.status_code}")
return result
return False

Wyświetl plik

@ -1,7 +1,8 @@
"""Auto Archiver Utilities."""
# we need to explicitly expose the available imports here
from .misc import *
from .webdriver import Webdriver
# handy utils from ytdlp
from yt_dlp.utils import (clean_html, traverse_obj, strip_or_none, url_or_none)
from yt_dlp.utils import clean_html, traverse_obj, strip_or_none, url_or_none

Wyświetl plik

@ -16,22 +16,23 @@ def mkdir_if_not_exists(folder):
def expand_url(url):
# expand short URL links
if 'https://t.co/' in url:
if "https://t.co/" in url:
try:
r = requests.get(url)
logger.debug(f'Expanded url {url} to {r.url}')
logger.debug(f"Expanded url {url} to {r.url}")
return r.url
except:
logger.error(f'Failed to expand url {url}')
except Exception:
logger.error(f"Failed to expand url {url}")
return url
def getattr_or(o: object, prop: str, default=None):
try:
res = getattr(o, prop)
if res is None: raise
if res is None:
raise
return res
except:
except Exception:
return default
@ -66,7 +67,8 @@ def calculate_file_hash(filename: str, hash_algo = hashlib.sha256, chunksize: in
with open(filename, "rb") as f:
while True:
buf = f.read(chunksize)
if not buf: break
if not buf:
break
hash.update(buf)
return hash.hexdigest()
@ -95,12 +97,17 @@ def get_timestamp(ts, utc=True, iso=True, dayfirst=True) -> str | datetime | Non
Use dayfirst to signify between date formats which put the date vs month first:
e.g. DD/MM/YYYY vs MM/DD/YYYY
"""
if not ts: return
if not ts:
return
try:
if isinstance(ts, str): ts = parse_dt(ts, dayfirst=dayfirst)
if isinstance(ts, (int, float)): ts = datetime.fromtimestamp(ts)
if utc: ts = ts.replace(tzinfo=timezone.utc)
if iso: return ts.isoformat()
if isinstance(ts, str):
ts = parse_dt(ts, dayfirst=dayfirst)
if isinstance(ts, (int, float)):
ts = datetime.fromtimestamp(ts)
if utc:
ts = ts.replace(tzinfo=timezone.utc)
if iso:
return ts.isoformat()
return ts
except Exception as e:
logger.error(f"Unable to parse timestamp {ts}: {e}")

Wyświetl plik

@ -14,7 +14,6 @@ def check_url_or_raise(url: str) -> bool | ValueError:
Blocks localhost, private, reserved, and link-local IPs and all non-http/https schemes.
"""
if not (url.startswith("http://") or url.startswith("https://")):
raise ValueError(f"Invalid URL scheme for url {url}")
@ -45,15 +44,18 @@ def check_url_or_raise(url: str) -> bool | ValueError:
return True
def domain_for_url(url: str) -> str:
"""
SECURITY: parse the domain using urllib to avoid any potential security issues
"""
return urlparse(url).netloc
def clean(url: str) -> str:
return url
def is_auth_wall(url: str) -> bool:
"""
checks if URL is behind an authentication wall meaning steps like wayback, wacz, ... may not work
@ -64,13 +66,15 @@ def is_auth_wall(url: str) -> bool:
return False
def remove_get_parameters(url: str) -> str:
# http://example.com/file.mp4?t=1 -> http://example.com/file.mp4
# useful for mimetypes to work
parsed_url = urlparse(url)
new_url = urlunparse(parsed_url._replace(query=''))
new_url = urlunparse(parsed_url._replace(query=""))
return new_url
def is_relevant_url(url: str) -> bool:
"""
Detect if a detected media URL is recurring and therefore irrelevant to a specific archive. Useful, for example, for the enumeration of the media files in WARC files which include profile pictures, favicons, etc.
@ -78,42 +82,59 @@ def is_relevant_url(url: str) -> bool:
clean_url = remove_get_parameters(url)
# favicons
if "favicon" in url: return False
if "favicon" in url:
return False
# ifnore icons
if clean_url.endswith(".ico"): return False
if clean_url.endswith(".ico"):
return False
# ignore SVGs
if remove_get_parameters(url).endswith(".svg"): return False
if remove_get_parameters(url).endswith(".svg"):
return False
# twitter profile pictures
if "twimg.com/profile_images" in url: return False
if "twimg.com" in url and "/default_profile_images" in url: return False
if "twimg.com/profile_images" in url:
return False
if "twimg.com" in url and "/default_profile_images" in url:
return False
# instagram profile pictures
if "https://scontent.cdninstagram.com/" in url and "150x150" in url: return False
if "https://scontent.cdninstagram.com/" in url and "150x150" in url:
return False
# instagram recurring images
if "https://static.cdninstagram.com/rsrc.php/" in url: return False
if "https://static.cdninstagram.com/rsrc.php/" in url:
return False
# telegram
if "https://telegram.org/img/emoji/" in url: return False
if "https://telegram.org/img/emoji/" in url:
return False
# youtube
if "https://www.youtube.com/s/gaming/emoji/" in url: return False
if "https://yt3.ggpht.com" in url and "default-user=" in url: return False
if "https://www.youtube.com/s/search/audio/" in url: return False
if "https://www.youtube.com/s/gaming/emoji/" in url:
return False
if "https://yt3.ggpht.com" in url and "default-user=" in url:
return False
if "https://www.youtube.com/s/search/audio/" in url:
return False
# ok
if " https://ok.ru/res/i/" in url: return False
if " https://ok.ru/res/i/" in url:
return False
# vk
if "https://vk.com/emoji/" in url: return False
if "vk.com/images/" in url: return False
if "vk.com/images/reaction/" in url: return False
if "https://vk.com/emoji/" in url:
return False
if "vk.com/images/" in url:
return False
if "vk.com/images/reaction/" in url:
return False
# wikipedia
if "wikipedia.org/static" in url: return False
if "wikipedia.org/static" in url:
return False
return True
def twitter_best_quality_url(url: str) -> str:
"""
some twitter image URLs point to a less-than best quality

Wyświetl plik

@ -1,4 +1,5 @@
"""This Webdriver class acts as a context manager for the selenium webdriver."""
from __future__ import annotations
import os
@ -19,15 +20,14 @@ from loguru import logger
class CookieSettingDriver(webdriver.Firefox):
facebook_accept_cookies: bool
cookies: str
cookiejar: MozillaCookieJar
def __init__(self, cookies, cookiejar, facebook_accept_cookies, *args, **kwargs):
if os.environ.get('RUNNING_IN_DOCKER'):
if os.environ.get("RUNNING_IN_DOCKER"):
# Selenium doesn't support linux-aarch64 driver, we need to set this manually
kwargs['service'] = webdriver.FirefoxService(executable_path='/usr/local/bin/geckodriver')
kwargs["service"] = webdriver.FirefoxService(executable_path="/usr/local/bin/geckodriver")
super(CookieSettingDriver, self).__init__(*args, **kwargs)
self.cookies = cookies
@ -38,42 +38,43 @@ class CookieSettingDriver(webdriver.Firefox):
if self.cookies or self.cookiejar:
# set up the driver to make it not 'cookie averse' (needs a context/URL)
# get the 'robots.txt' file which should be quick and easy
robots_url = urlunparse(urlparse(url)._replace(path='/robots.txt', query='', fragment=''))
robots_url = urlunparse(urlparse(url)._replace(path="/robots.txt", query="", fragment=""))
super(CookieSettingDriver, self).get(robots_url)
if self.cookies:
# an explicit cookie is set for this site, use that first
for cookie in self.cookies.split(";"):
for name, value in cookie.split("="):
self.driver.add_cookie({'name': name, 'value': value})
self.driver.add_cookie({"name": name, "value": value})
elif self.cookiejar:
domain = urlparse(url).netloc.lstrip("www.")
for cookie in self.cookiejar:
if domain in cookie.domain:
try:
self.add_cookie({
'name': cookie.name,
'value': cookie.value,
'path': cookie.path,
'domain': cookie.domain,
'secure': bool(cookie.secure),
'expiry': cookie.expires
})
self.add_cookie(
{
"name": cookie.name,
"value": cookie.value,
"path": cookie.path,
"domain": cookie.domain,
"secure": bool(cookie.secure),
"expiry": cookie.expires,
}
)
except Exception as e:
logger.warning(f"Failed to add cookie to webdriver: {e}")
if self.facebook_accept_cookies:
try:
logger.debug(f'Trying fb click accept cookie popup.')
logger.debug("Trying fb click accept cookie popup.")
super(CookieSettingDriver, self).get("http://www.facebook.com")
essential_only = self.find_element(By.XPATH, "//span[contains(text(), 'Decline optional cookies')]")
essential_only.click()
logger.debug(f'fb click worked')
logger.debug("fb click worked")
# linux server needs a sleep otherwise facebook cookie won't have worked and we'll get a popup on next page
time.sleep(2)
except Exception as e:
logger.warning(f'Failed on fb accept cookies.', e)
logger.warning("Failed on fb accept cookies.", e)
# now get the actual URL
super(CookieSettingDriver, self).get(url)
@ -87,9 +88,14 @@ class CookieSettingDriver(webdriver.Firefox):
pass
else:
# for all other sites, try and use some common button text to reject/accept cookies
for text in ["Refuse non-essential cookies", "Decline optional cookies", "Reject additional cookies", "Reject all", "Accept all cookies"]:
for text in [
"Refuse non-essential cookies",
"Decline optional cookies",
"Reject additional cookies",
"Reject all",
"Accept all cookies",
]:
try:
xpath = f"//*[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), '{text.lower()}')]"
WebDriverWait(self, 5).until(EC.element_to_be_clickable((By.XPATH, xpath))).click()
@ -99,9 +105,16 @@ class CookieSettingDriver(webdriver.Firefox):
class Webdriver:
def __init__(self, width: int, height: int, timeout_seconds: int,
facebook_accept_cookies: bool = False, http_proxy: str = "",
print_options: dict = {}, auth: dict = {}) -> webdriver:
def __init__(
self,
width: int,
height: int,
timeout_seconds: int,
facebook_accept_cookies: bool = False,
http_proxy: str = "",
print_options: dict = {},
auth: dict = {},
) -> webdriver:
self.width = width
self.height = height
self.timeout_seconds = timeout_seconds
@ -116,20 +129,26 @@ class Webdriver:
def __enter__(self) -> webdriver:
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
options.add_argument(f'--proxy-server={self.http_proxy}')
options.set_preference('network.protocol-handler.external.tg', False)
options.add_argument(f"--proxy-server={self.http_proxy}")
options.set_preference("network.protocol-handler.external.tg", False)
# if facebook cookie popup is present, force the browser to English since then it's easier to click the 'Decline optional cookies' option
if self.facebook_accept_cookies:
options.add_argument('--lang=en')
options.add_argument("--lang=en")
try:
self.driver = CookieSettingDriver(cookies=self.auth.get('cookies'), cookiejar=self.auth.get('cookies_jar'),
facebook_accept_cookies=self.facebook_accept_cookies, options=options)
self.driver = CookieSettingDriver(
cookies=self.auth.get("cookies"),
cookiejar=self.auth.get("cookies_jar"),
facebook_accept_cookies=self.facebook_accept_cookies,
options=options,
)
self.driver.set_window_size(self.width, self.height)
self.driver.set_page_load_timeout(self.timeout_seconds)
self.driver.print_options = self.print_options
except selenium_exceptions.TimeoutException as e:
logger.error(f"failed to get new webdriver, possibly due to insufficient system resources or timeout settings: {e}")
logger.error(
f"failed to get new webdriver, possibly due to insufficient system resources or timeout settings: {e}"
)
return self.driver

Wyświetl plik

@ -2,6 +2,7 @@
TODO: This is a placeholder to replicate previous versioning.
"""
from importlib.metadata import version as get_version
VERSION_SHORT = get_version("auto_archiver")

Some files were not shown because too many files have changed in this diff Show More