Merge branch 'main' into add-leaderboard-metadata

pull/968/head
Andrey 2024-01-25 04:51:05 +02:00
commit b2e67ffec6
37 zmienionych plików z 2189 dodań i 480 usunięć

Wyświetl plik

@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Deployment script of monitoring services - intended to run on Moonstream crawlers server
# Colors
C_RESET='\033[0m'
C_RED='\033[1;31m'
C_GREEN='\033[1;32m'
C_YELLOW='\033[1;33m'
# Logs
PREFIX_INFO="${C_GREEN}[INFO]${C_RESET} [$(date +%d-%m\ %T)]"
PREFIX_WARN="${C_YELLOW}[WARN]${C_RESET} [$(date +%d-%m\ %T)]"
PREFIX_CRIT="${C_RED}[CRIT]${C_RESET} [$(date +%d-%m\ %T)]"
# Main
AWS_DEFAULT_REGION="${AWS_DEFAULT_REGION:-us-east-1}"
SECRETS_DIR="${SECRETS_DIR:-/home/ubuntu/moonstream-secrets}"
PARAMETERS_ENV_MONITORING_PATH="${SECRETS_DIR}/monitoring.env"
SCRIPT_DIR="$(realpath $(dirname $0))"
# Service files
MONITORING_CRAWLERS_SERVICE_FILE="monitoring-crawlers.service"
set -eu
echo
echo
echo -e "${PREFIX_INFO} Install checkenv"
HOME=/home/ubuntu /usr/local/go/bin/go install github.com/bugout-dev/checkenv@latest
echo
echo
echo -e "${PREFIX_INFO} Copy monitoring binary from AWS S3"
aws s3 cp s3://bugout-binaries/prod/monitoring/monitoring "/home/ubuntu/monitoring"
chmod +x "/home/ubuntu/monitoring"
chown ubuntu:ubuntu "/home/ubuntu/monitoring"
echo
echo
echo -e "${PREFIX_INFO} Retrieving monitoring deployment parameters"
AWS_DEFAULT_REGION="${AWS_DEFAULT_REGION}" /home/ubuntu/go/bin/checkenv show aws_ssm+service:true,monitoring:true > "${PARAMETERS_ENV_MONITORING_PATH}"
chmod 0640 "${PARAMETERS_ENV_MONITORING_PATH}"
echo
echo
echo -e "${PREFIX_INFO} Add instance local IP to monitoring parameters"
echo "AWS_LOCAL_IPV4=$(ec2metadata --local-ipv4)" >> "${PARAMETERS_ENV_MONITORING_PATH}"
echo
echo
echo -e "${PREFIX_INFO} Add AWS default region to monitoring parameters"
echo "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}" >> "${PARAMETERS_ENV_MONITORING_PATH}"
echo
echo
echo -e "${PREFIX_INFO} Prepare monitoring configuration"
if [ ! -d "/home/ubuntu/.monitoring" ]; then
mkdir -p /home/ubuntu/.monitoring
echo -e "${PREFIX_WARN} Created monitoring configuration directory"
fi
cp "${SCRIPT_DIR}/monitoring-crawlers-config.json" /home/ubuntu/.monitoring/monitoring-crawlers-config.json
echo
echo
if [ ! -d "/home/ubuntu/.config/systemd/user/" ]; then
mkdir -p /home/ubuntu/.config/systemd/user/
echo -e "${PREFIX_WARN} Created user systemd directory"
fi
echo
echo
echo -e "${PREFIX_INFO} Replacing existing systemd crawlers monitoring service definition with ${MONITORING_CRAWLERS_SERVICE_FILE}"
chmod 644 "${SCRIPT_DIR}/${MONITORING_CRAWLERS_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${MONITORING_CRAWLERS_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${MONITORING_CRAWLERS_SERVICE_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart "${MONITORING_CRAWLERS_SERVICE_FILE}"

Wyświetl plik

@ -29,7 +29,6 @@ MOONCRAWL_SERVICE_FILE="mooncrawl.service"
LEADERBOARDS_WORKER_SERVICE_FILE="leaderboards-worker.service"
LEADERBOARDS_WORKER_TIMER_FILE="leaderboards-worker.timer"
# Ethereum service files
ETHEREUM_SYNCHRONIZE_SERVICE_FILE="ethereum-synchronize.service"
ETHEREUM_TRENDING_SERVICE_FILE="ethereum-trending.service"
@ -48,8 +47,6 @@ ETHEREUM_HISTORICAL_CRAWL_EVENTS_TIMER_FILE="ethereum-historical-crawl-events.ti
POLYGON_SYNCHRONIZE_SERVICE="polygon-synchronize.service"
POLYGON_MISSING_SERVICE_FILE="polygon-missing.service"
POLYGON_MISSING_TIMER_FILE="polygon-missing.timer"
POLYGON_STATISTICS_SERVICE_FILE="polygon-statistics.service"
POLYGON_STATISTICS_TIMER_FILE="polygon-statistics.timer"
POLYGON_MOONWORM_CRAWLER_SERVICE_FILE="polygon-moonworm-crawler.service"
POLYGON_STATE_SERVICE_FILE="polygon-state.service"
POLYGON_STATE_TIMER_FILE="polygon-state.timer"
@ -57,8 +54,6 @@ POLYGON_STATE_CLEAN_SERVICE_FILE="polygon-state-clean.service"
POLYGON_STATE_CLEAN_TIMER_FILE="polygon-state-clean.timer"
POLYGON_METADATA_SERVICE_FILE="polygon-metadata.service"
POLYGON_METADATA_TIMER_FILE="polygon-metadata.timer"
POLYGON_CU_REPORTS_TOKENONOMICS_SERVICE_FILE="polygon-cu-reports-tokenonomics.service"
POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE="polygon-cu-reports-tokenonomics.timer"
POLYGON_CU_NFT_DASHBOARD_SERVICE_FILE="polygon-cu-nft-dashboard.service"
POLYGON_CU_NFT_DASHBOARD_TIMER_FILE="polygon-cu-nft-dashboard.timer"
POLYGON_HISTORICAL_CRAWL_TRANSACTIONS_SERVICE_FILE="polygon-historical-crawl-transactions.service"
@ -87,8 +82,6 @@ MUMBAI_HISTORICAL_CRAWL_EVENTS_TIMER_FILE="mumbai-historical-crawl-events.timer"
XDAI_SYNCHRONIZE_SERVICE="xdai-synchronize.service"
XDAI_MISSING_SERVICE_FILE="xdai-missing.service"
XDAI_MISSING_TIMER_FILE="xdai-missing.timer"
XDAI_STATISTICS_SERVICE_FILE="xdai-statistics.service"
XDAI_STATISTICS_TIMER_FILE="xdai-statistics.timer"
XDAI_MOONWORM_CRAWLER_SERVICE_FILE="xdai-moonworm-crawler.service"
XDai_HISTORICAL_CRAWL_TRANSACTIONS_SERVICE_FILE="xdai-historical-crawl-transactions.service"
XDai_HISTORICAL_CRAWL_TRANSACTIONS_TIMER_FILE="xdai-historical-crawl-transactions.timer"
@ -99,8 +92,6 @@ XDai_HISTORICAL_CRAWL_EVENTS_TIMER_FILE="xdai-historical-crawl-events.timer"
WYRM_SYNCHRONIZE_SERVICE="wyrm-synchronize.service"
WYRM_MISSING_SERVICE_FILE="wyrm-missing.service"
WYRM_MISSING_TIMER_FILE="wyrm-missing.timer"
WYRM_STATISTICS_SERVICE_FILE="wyrm-statistics.service"
WYRM_STATISTICS_TIMER_FILE="wyrm-statistics.timer"
WYRM_MOONWORM_CRAWLER_SERVICE_FILE="wyrm-moonworm-crawler.service"
WYRM_HISTORICAL_CRAWL_TRANSACTIONS_SERVICE_FILE="wyrm-historical-crawl-transactions.service"
WYRM_HISTORICAL_CRAWL_TRANSACTIONS_TIMER_FILE="wyrm-historical-crawl-transactions.timer"
@ -116,6 +107,10 @@ ZKSYNC_ERA_HISTORICAL_CRAWL_TRANSACTIONS_SERVICE_FILE="zksync-era-historical-cra
ZKSYNC_ERA_HISTORICAL_CRAWL_TRANSACTIONS_TIMER_FILE="zksync-era-historical-crawl-transactions.timer"
ZKSYNC_ERA_HISTORICAL_CRAWL_EVENTS_SERVICE_FILE="zksync-era-historical-crawl-events.service"
ZKSYNC_ERA_HISTORICAL_CRAWL_EVENTS_TIMER_FILE="zksync-era-historical-crawl-events.timer"
ZKSYNC_ERA_STATE_SERVICE_FILE="zksync-era-state.service"
ZKSYNC_ERA_STATE_TIMER_FILE="zksync-era-state.timer"
ZKSYNC_ERA_STATE_CLEAN_SERVICE_FILE="zksync-era-state-clean.service"
ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE="zksync-era-state-clean.timer"
# ZkSync Era testnet
ZKSYNC_ERA_TESTNET_SYNCHRONIZE_SERVICE="zksync-era-testnet-synchronize.service"
@ -129,14 +124,6 @@ ZKSYNC_ERA_TESTNET_HISTORICAL_CRAWL_EVENTS_TIMER_FILE="zksync-era-testnet-histor
set -eu
echo
echo
echo -e "${PREFIX_INFO} Building executable Ethereum transaction pool crawler script with Go"
EXEC_DIR=$(pwd)
cd "${APP_CRAWLERS_DIR}/txpool"
HOME=/home/ubuntu /usr/local/go/bin/go build -o "${APP_CRAWLERS_DIR}/txpool/txpool" "${APP_CRAWLERS_DIR}/txpool/main.go"
cd "${EXEC_DIR}"
echo
echo
echo -e "${PREFIX_INFO} Upgrading Python pip and setuptools"
@ -154,8 +141,11 @@ HOME=/home/ubuntu /usr/local/go/bin/go install github.com/bugout-dev/checkenv@la
echo
echo
echo -e "${PREFIX_INFO} Retrieving addition deployment parameters"
mkdir -p "${SECRETS_DIR}"
echo -e "${PREFIX_INFO} Retrieving deployment parameters"
if [ ! -d "${SECRETS_DIR}" ]; then
mkdir -p "${SECRETS_DIR}"
echo -e "${PREFIX_WARN} Created new secrets directory"
fi
AWS_DEFAULT_REGION="${AWS_DEFAULT_REGION}" /home/ubuntu/go/bin/checkenv show aws_ssm+moonstream:true > "${PARAMETERS_ENV_PATH}"
chmod 0640 "${PARAMETERS_ENV_PATH}"
@ -164,6 +154,13 @@ echo
echo -e "${PREFIX_INFO} Add instance local IP to parameters"
echo "AWS_LOCAL_IPV4=$(ec2metadata --local-ipv4)" >> "${PARAMETERS_ENV_PATH}"
echo
echo
if [ ! -d "/home/ubuntu/.config/systemd/user/" ]; then
mkdir -p /home/ubuntu/.config/systemd/user/
echo -e "${PREFIX_WARN} Created user systemd directory"
fi
echo
echo
echo -e "${PREFIX_INFO} Replacing existing Moonstream crawlers HTTP API server service definition with ${MOONCRAWL_SERVICE_FILE}"
@ -251,14 +248,6 @@ cp "${SCRIPT_DIR}/${POLYGON_MISSING_TIMER_FILE}" "/home/ubuntu/.config/systemd/u
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${POLYGON_MISSING_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing Polygon statistics dashbord service and timer with: ${POLYGON_STATISTICS_SERVICE_FILE}, ${POLYGON_STATISTICS_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${POLYGON_STATISTICS_SERVICE_FILE}" "${SCRIPT_DIR}/${POLYGON_STATISTICS_TIMER_FILE}"
cp "${SCRIPT_DIR}/${POLYGON_STATISTICS_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${POLYGON_STATISTICS_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${POLYGON_STATISTICS_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${POLYGON_STATISTICS_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${POLYGON_STATISTICS_TIMER_FILE}"
echo
echo
@ -295,14 +284,6 @@ cp "${SCRIPT_DIR}/${POLYGON_METADATA_TIMER_FILE}" "/home/ubuntu/.config/systemd/
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${POLYGON_METADATA_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing Polygon CU reports tokenonomics service and timer with: ${POLYGON_CU_REPORTS_TOKENONOMICS_SERVICE_FILE}, ${POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${POLYGON_CU_REPORTS_TOKENONOMICS_SERVICE_FILE}" "${SCRIPT_DIR}/${POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE}"
cp "${SCRIPT_DIR}/${POLYGON_CU_REPORTS_TOKENONOMICS_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${POLYGON_CU_REPORTS_TOKENONOMICS_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${POLYGON_CU_REPORTS_TOKENONOMICS_TIMER_FILE}"
echo
echo
@ -418,14 +399,6 @@ cp "${SCRIPT_DIR}/${XDAI_MISSING_TIMER_FILE}" "/home/ubuntu/.config/systemd/user
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${XDAI_MISSING_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing XDai statistics dashbord service and timer with: ${XDAI_STATISTICS_SERVICE_FILE}, ${XDAI_STATISTICS_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${XDAI_STATISTICS_SERVICE_FILE}" "${SCRIPT_DIR}/${XDAI_STATISTICS_TIMER_FILE}"
cp "${SCRIPT_DIR}/${XDAI_STATISTICS_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${XDAI_STATISTICS_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${XDAI_STATISTICS_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${XDAI_STATISTICS_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${XDAI_STATISTICS_TIMER_FILE}"
echo
echo
@ -470,15 +443,6 @@ cp "${SCRIPT_DIR}/${WYRM_MISSING_TIMER_FILE}" "/home/ubuntu/.config/systemd/user
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${WYRM_MISSING_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing Wyrm statistics dashbord service and timer with: ${WYRM_STATISTICS_SERVICE_FILE}, ${WYRM_STATISTICS_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${WYRM_STATISTICS_SERVICE_FILE}" "${SCRIPT_DIR}/${WYRM_STATISTICS_TIMER_FILE}"
cp "${SCRIPT_DIR}/${WYRM_STATISTICS_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${WYRM_STATISTICS_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${WYRM_STATISTICS_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${WYRM_STATISTICS_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${WYRM_STATISTICS_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing Wyrm moonworm crawler service definition with ${WYRM_MOONWORM_CRAWLER_SERVICE_FILE}"
@ -549,6 +513,23 @@ cp "${SCRIPT_DIR}/${ZKSYNC_ERA_HISTORICAL_CRAWL_EVENTS_TIMER_FILE}" "/home/ubunt
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${ZKSYNC_ERA_HISTORICAL_CRAWL_EVENTS_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing ZkSync Era state service and timer with: ${ZKSYNC_ERA_STATE_SERVICE_FILE}, ${ZKSYNC_ERA_STATE_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_SERVICE_FILE}" "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_TIMER_FILE}"
cp "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${ZKSYNC_ERA_STATE_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${ZKSYNC_ERA_STATE_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${ZKSYNC_ERA_STATE_TIMER_FILE}"
echo
echo
echo -e "${PREFIX_INFO} Replacing existing ZkSync Era state clean service and timer with: ${ZKSYNC_ERA_STATE_CLEAN_SERVICE_FILE}, ${ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE}"
chmod 644 "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_CLEAN_SERVICE_FILE}" "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE}"
cp "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_CLEAN_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${ZKSYNC_ERA_STATE_CLEAN_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${ZKSYNC_ERA_STATE_CLEAN_TIMER_FILE}"
# ZkSync Era testnet
echo
@ -602,4 +583,4 @@ chmod 644 "${SCRIPT_DIR}/${LEADERBOARDS_WORKER_SERVICE_FILE}" "${SCRIPT_DIR}/${L
cp "${SCRIPT_DIR}/${LEADERBOARDS_WORKER_SERVICE_FILE}" "/home/ubuntu/.config/systemd/user/${LEADERBOARDS_WORKER_SERVICE_FILE}"
cp "${SCRIPT_DIR}/${LEADERBOARDS_WORKER_TIMER_FILE}" "/home/ubuntu/.config/systemd/user/${LEADERBOARDS_WORKER_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user daemon-reload
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${LEADERBOARDS_WORKER_TIMER_FILE}"
XDG_RUNTIME_DIR="/run/user/1000" systemctl --user restart --no-block "${LEADERBOARDS_WORKER_TIMER_FILE}"

Wyświetl plik

@ -0,0 +1,7 @@
{
"notification_pagerduty": true,
"notification_telegram": true,
"notification_sendgrid": true,
"notification_humbug": true,
"silent": []
}

Wyświetl plik

@ -0,0 +1,17 @@
[Unit]
Description=Monitor crawlers systemd state
StartLimitIntervalSec=300
StartLimitBurst=3
After=network.target
[Service]
Restart=on-failure
RestartSec=15s
WorkingDirectory=/home/ubuntu/
EnvironmentFile=/home/ubuntu/moonstream-secrets/monitoring.env
ExecStart=/home/ubuntu/monitoring -plugin systemd -host "${AWS_LOCAL_IPV4}" -port 7171 -healthcheck -server -threshold 3 -config /home/ubuntu/.monitoring/monitoring-crawlers-config.json -service ethereum-moonworm-crawler.service -service mumbai-moonworm-crawler.service -service polygon-moonworm-crawler.service -service zksync-era-moonworm-crawler.service
CPUWeight=90
SyslogIdentifier=monitoring-crawlers
[Install]
WantedBy=multi-user.target

Wyświetl plik

@ -1,11 +0,0 @@
[Unit]
Description=Runs custom crawler for CU tokenonomics
After=network.target
[Service]
Type=oneshot
WorkingDirectory=/home/ubuntu/moonstream/crawlers/mooncrawl
EnvironmentFile=/home/ubuntu/moonstream-secrets/app.env
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.reports_crawler.cli cu-reports --moonstream-token "${MOONSTREAM_PUBLIC_QUERIES_DATA_ACCESS_TOKEN}" queries run-tokenonomics
CPUWeight=60
SyslogIdentifier=polygon-cu-reports-tokenonomics

Wyświetl plik

@ -1,9 +0,0 @@
[Unit]
Description=Runs custom crawler for CU tokenonomics
[Timer]
OnBootSec=60s
OnUnitActiveSec=60m
[Install]
WantedBy=timers.target

Wyświetl plik

@ -1,11 +0,0 @@
[Unit]
Description=Update Polygon statistics dashboards
After=network.target
[Service]
Type=oneshot
WorkingDirectory=/home/ubuntu/moonstream/crawlers/mooncrawl
EnvironmentFile=/home/ubuntu/moonstream-secrets/app.env
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.stats_worker.dashboard --access-id "${NB_CONTROLLER_ACCESS_ID}" generate --blockchain polygon
CPUWeight=60
SyslogIdentifier=polygon-statistics

Wyświetl plik

@ -1,9 +0,0 @@
[Unit]
Description=Update Polygon statistics dashboards each 6 hours
[Timer]
OnBootSec=20s
OnUnitActiveSec=6h
[Install]
WantedBy=timers.target

Wyświetl plik

@ -1,9 +0,0 @@
[Unit]
Description=Update Wyrm statistics dashboards each 6 hours
[Timer]
OnBootSec=25s
OnUnitActiveSec=6h
[Install]
WantedBy=timers.target

Wyświetl plik

@ -1,11 +0,0 @@
[Unit]
Description=Update XDai statistics dashboards
After=network.target
[Service]
Type=oneshot
WorkingDirectory=/home/ubuntu/moonstream/crawlers/mooncrawl
EnvironmentFile=/home/ubuntu/moonstream-secrets/app.env
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.stats_worker.dashboard --access-id "${NB_CONTROLLER_ACCESS_ID}" generate --blockchain xdai
CPUWeight=60
SyslogIdentifier=xdai-statistics

Wyświetl plik

@ -1,9 +0,0 @@
[Unit]
Description=Update XDai statistics dashboards each 6 hours
[Timer]
OnBootSec=25s
OnUnitActiveSec=6h
[Install]
WantedBy=timers.target

Wyświetl plik

@ -1,11 +1,11 @@
[Unit]
Description=Update Wyrm statistics dashboards
Description=Execute state clean labels crawler
After=network.target
[Service]
Type=oneshot
WorkingDirectory=/home/ubuntu/moonstream/crawlers/mooncrawl
EnvironmentFile=/home/ubuntu/moonstream-secrets/app.env
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.stats_worker.dashboard --access-id "${NB_CONTROLLER_ACCESS_ID}" generate --blockchain wyrm
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.state_crawler.cli --access-id "${NB_CONTROLLER_ACCESS_ID}" clean-state-labels --blockchain zksync_era -N 10000
CPUWeight=60
SyslogIdentifier=wyrm-statistics
SyslogIdentifier=zksync-era-state-clean

Wyświetl plik

@ -0,0 +1,9 @@
[Unit]
Description=Execute Zksync Era state clean labels crawler each 25m
[Timer]
OnBootSec=50s
OnUnitActiveSec=25m
[Install]
WantedBy=timers.target

Wyświetl plik

@ -0,0 +1,11 @@
[Unit]
Description=Execute state crawler
After=network.target
[Service]
Type=oneshot
WorkingDirectory=/home/ubuntu/moonstream/crawlers/mooncrawl
EnvironmentFile=/home/ubuntu/moonstream-secrets/app.env
ExecStart=/home/ubuntu/moonstream-env/bin/python -m mooncrawl.state_crawler.cli --access-id "${NB_CONTROLLER_ACCESS_ID}" crawl-jobs --moonstream-token "${MOONSTREAM_PUBLIC_QUERIES_DATA_ACCESS_TOKEN}" --blockchain zksync_era --jobs-file /home/ubuntu/moonstream/crawlers/mooncrawl/mooncrawl/state_crawler/jobs/zksync-era-jobs.json
CPUWeight=60
SyslogIdentifier=zksync-era-state

Wyświetl plik

@ -0,0 +1,9 @@
[Unit]
Description=Execute Zksync Era state crawler each 10m
[Timer]
OnBootSec=15s
OnUnitActiveSec=10m
[Install]
WantedBy=timers.target

Wyświetl plik

@ -7,12 +7,14 @@ import uuid
import requests # type: ignore
from bugout.data import BugoutSearchResult
from .utils import get_results_for_moonstream_query
from .utils import get_results_for_moonstream_query, leaderboard_push_batch
from ..settings import (
MOONSTREAM_ADMIN_ACCESS_TOKEN,
MOONSTREAM_LEADERBOARD_GENERATOR_JOURNAL_ID,
MOONSTREAM_API_URL,
MOONSTREAM_ENGINE_URL,
MOONSTREAM_LEADERBOARD_GENERATOR_BATCH_SIZE,
MOONSTREAM_LEADERBOARD_GENERATOR_PUSH_TIMEOUT_SECONDS,
)
from ..settings import bugout_client as bc
@ -35,10 +37,15 @@ def handle_leaderboards(args: argparse.Namespace) -> None:
### get leaderboard journal
leaderboard_push_batch_size = args.leaderboard_push_batch_size
leaderboard_push_timeout_seconds = args.leaderboard_push_timeout_seconds
query = "#leaderboard #status:active"
if args.leaderboard_id: # way to run only one leaderboard
query += f" #leaderboard_id:{args.leaderboard_id}"
if args.leaderboard_id: # way to run only one leaderboard without status:active
query = f"#leaderboard #leaderboard_id:{args.leaderboard_id}"
try:
leaderboards = bc.search(
token=MOONSTREAM_ADMIN_ACCESS_TOKEN,
@ -97,6 +104,7 @@ def handle_leaderboards(args: argparse.Namespace) -> None:
MOONSTREAM_API_URL,
args.max_retries,
args.interval,
args.query_api_retries,
)
except Exception as e:
logger.error(f"Could not get results for query {query_name}: error: {e}")
@ -115,26 +123,33 @@ def handle_leaderboards(args: argparse.Namespace) -> None:
"Content-Type": "application/json",
}
try:
leaderboard_api_response = requests.put(
leaderboard_push_api_url,
json=query_results["data"],
headers=leaderboard_api_headers,
timeout=10,
if len(query_results["data"]) > leaderboard_push_batch_size:
logger.info(
f"Pushing {len(query_results['data'])} scores to leaderboard {leaderboard_id} in batches of {leaderboard_push_batch_size}"
)
except Exception as e:
logger.error(
f"Could not push results to leaderboard API: {e} for leaderboard {leaderboard_id}"
leaderboard_push_batch(
leaderboard_id,
leaderboard_data,
query_results["data"],
leaderboard_api_headers,
leaderboard_push_batch_size,
timeout=leaderboard_push_timeout_seconds,
)
continue
try:
leaderboard_api_response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not push results to leaderboard API: {http_error.response.text} with status code {http_error.response.status_code}"
)
continue
else:
try:
leaderboard_api_response = requests.put(
leaderboard_push_api_url,
json=query_results["data"],
headers=leaderboard_api_headers,
timeout=leaderboard_push_timeout_seconds,
)
leaderboard_api_response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not push results to leaderboard API: {http_error.response.text} with status code {http_error.response.status_code}"
)
continue
### get leaderboard from leaderboard API
@ -188,6 +203,12 @@ def main():
default=12,
help="Number of times to retry requests for Moonstream Query results",
)
leaderboard_generator_parser.add_argument(
"--query-api-retries",
type=int,
default=3,
help="Number of times to retry updating Moonstream Query data",
)
leaderboard_generator_parser.add_argument(
"--interval",
type=float,
@ -206,6 +227,18 @@ def main():
required=True,
help="Moonstream Access Token to use for Moonstream Query API requests",
)
leaderboard_generator_parser.add_argument(
"--leaderboard-push-batch-size",
type=int,
default=MOONSTREAM_LEADERBOARD_GENERATOR_BATCH_SIZE,
help="Number of scores to push to leaderboard API at once",
)
leaderboard_generator_parser.add_argument(
"--leaderboard-push-timeout-seconds",
type=int,
default=MOONSTREAM_LEADERBOARD_GENERATOR_PUSH_TIMEOUT_SECONDS,
help="Timeout for leaderboard API requests",
)
leaderboard_generator_parser.set_defaults(func=handle_leaderboards)

Wyświetl plik

@ -3,12 +3,17 @@ import json
import logging
import os
import time
from typing import Any, Dict, Optional
from typing import Any, Dict, Optional, List
import requests # type: ignore
from ..settings import MOONSTREAM_API_URL
from ..settings import (
MOONSTREAM_API_URL,
MOONSTREAM_ENGINE_URL,
MOONSTREAM_LEADERBOARD_GENERATOR_BATCH_SIZE,
MOONSTREAM_LEADERBOARD_GENERATOR_PUSH_TIMEOUT_SECONDS,
)
logging.basicConfig()
@ -23,6 +28,7 @@ def get_results_for_moonstream_query(
api_url: str = MOONSTREAM_API_URL,
max_retries: int = 100,
interval: float = 30.0,
query_api_retries: int = 3,
) -> Optional[Dict[str, Any]]:
"""
@ -65,11 +71,11 @@ def get_results_for_moonstream_query(
success = False
attempts = 0
while not success and attempts < max_retries:
attempts += 1
while not success and attempts < query_api_retries:
response = requests.post(
request_url, json=request_body, headers=headers, timeout=10
)
attempts += 1
response.raise_for_status()
response_body = response.json()
data_url = response_body["url"]
@ -100,3 +106,115 @@ def get_results_for_moonstream_query(
keep_going = num_retries <= max_retries
return result
def get_data_from_url(url):
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Failed to get data: HTTP {response.status_code}")
def send_data_to_endpoint(chunks, endpoint_url, headers, timeout=10):
for index, chunk in enumerate(chunks):
try:
logger.info(f"Pushing chunk {index} to leaderboard API")
response = requests.put(
endpoint_url, headers=headers, json=chunk, timeout=timeout
)
response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not push results to leaderboard API: {http_error.response.text} with status code {http_error.response.status_code}"
)
raise http_error
def leaderboard_push_batch(
leaderboard_id: str,
leaderboard_config: Dict[str, Any],
data: List[Dict[str, Any]],
headers: Dict[str, str],
batch_size: int = MOONSTREAM_LEADERBOARD_GENERATOR_BATCH_SIZE,
timeout: int = 10,
) -> None:
"""
Push leaderboard data to the leaderboard API in batches.
"""
## first step create leaderboard version
leaderboard_version_api_url = (
f"{MOONSTREAM_ENGINE_URL}/leaderboard/{leaderboard_id}/versions"
)
json_data = {
"publish": False,
}
leaderboard_api_response = requests.post(
leaderboard_version_api_url, json=json_data, headers=headers, timeout=10
)
try:
leaderboard_api_response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not create leaderboard version: {http_error.response.text} with status code {http_error.response.status_code}"
)
return
leaderboard_version_id = leaderboard_api_response.json()["version"]
## second step push data to leaderboard version
leaderboard_version_push_api_url = f"{MOONSTREAM_ENGINE_URL}/leaderboard/{leaderboard_id}/versions/{leaderboard_version_id}/scores?normalize_addresses={leaderboard_config['normalize_addresses']}&overwrite=false"
chunks = [data[x : x + batch_size] for x in range(0, len(data), batch_size)]
send_data_to_endpoint(
chunks, leaderboard_version_push_api_url, headers, timeout=timeout
)
## third step publish leaderboard version
leaderboard_version_publish_api_url = f"{MOONSTREAM_ENGINE_URL}/leaderboard/{leaderboard_id}/versions/{leaderboard_version_id}"
json_data = {
"publish": True,
}
try:
leaderboard_api_response = requests.put(
leaderboard_version_publish_api_url,
json=json_data,
headers=headers,
timeout=10,
)
leaderboard_api_response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not publish leaderboard version: {http_error.response.text} with status code {http_error.response.status_code}"
)
return
## delete leaderboard version -1
try:
leaderboard_version_delete_api_url = f"{MOONSTREAM_ENGINE_URL}/leaderboard/{leaderboard_id}/versions/{leaderboard_version_id - 1}"
leaderboard_api_response = requests.delete(
leaderboard_version_delete_api_url,
headers=headers,
timeout=timeout,
)
leaderboard_api_response.raise_for_status()
except requests.exceptions.HTTPError as http_error:
logger.error(
f"Could not delete leaderboard version: {http_error.response.text} with status code {http_error.response.status_code}"
)
return

Wyświetl plik

@ -241,6 +241,7 @@ multicall_contracts: Dict[AvailableBlockchainType, str] = {
AvailableBlockchainType.POLYGON: "0xc8E51042792d7405184DfCa245F2d27B94D013b6",
AvailableBlockchainType.MUMBAI: "0xe9939e7Ea7D7fb619Ac57f648Da7B1D425832631",
AvailableBlockchainType.ETHEREUM: "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696",
AvailableBlockchainType.ZKSYNC_ERA: "0xF9cda624FBC7e059355ce98a31693d299FACd963",
}
@ -321,3 +322,7 @@ if MOONSTREAM_LEADERBOARD_GENERATOR_JOURNAL_ID == "":
raise ValueError(
"MOONSTREAM_LEADERBOARD_GENERATOR_JOURNAL_ID environment variable must be set"
)
MOONSTREAM_LEADERBOARD_GENERATOR_BATCH_SIZE = 12000
MOONSTREAM_LEADERBOARD_GENERATOR_PUSH_TIMEOUT_SECONDS = 60

Wyświetl plik

@ -90,7 +90,7 @@
"query_url": "twilight_tactics_players",
"blockchain": "mumbai",
"params": {
"season_id": "1"
"address": "0x665B8Db5b9E3b396e2Ccb0Bd768dc74fC47Ec20D"
},
"keys": [
"player"

Wyświetl plik

@ -1,223 +1,313 @@
[
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "function",
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "function",
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"address": "0xdC0479CC5BbA033B3e7De9F178607150B3AbCe1f",
"inputs": []
}
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"name": "tokenURI",
"outputs": [
],
"address": "0xdC0479CC5BbA033B3e7De9F178607150B3AbCe1f",
"inputs": []
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xdC0479CC5BbA033B3e7De9F178607150B3AbCe1f"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc721_minting",
"blockchain": "polygon",
"params": {
"address": "0xA2a13cE1824F3916fC84C65e559391fc6674e6e8"
},
"keys": [
"token_id"
]
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xA2a13cE1824F3916fC84C65e559391fc6674e6e8"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "function",
"name": "totalSupply",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"address": "0xdC0479CC5BbA033B3e7De9F178607150B3AbCe1f"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc721_minting",
"blockchain": "polygon",
"params": {
"address": "0xA2a13cE1824F3916fC84C65e559391fc6674e6e8"
},
"keys": [
"token_id"
]
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xA2a13cE1824F3916fC84C65e559391fc6674e6e8"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "function",
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"address": "0x20B807b9AF56977EF475C089A0e7977540743560",
"inputs": []
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x20B807b9AF56977EF475C089A0e7977540743560"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0xd4D53d8D61adc3B8114C1cd17B89393640db9733"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xd4D53d8D61adc3B8114C1cd17B89393640db9733"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0x74d4567fd8B0b873B61FA180618a82183012F369"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x74d4567fd8B0b873B61FA180618a82183012F369"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0x44b3f42e2BF34F62868Ff9e9dAb7C2F807ba97Cb"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x44b3f42e2BF34F62868Ff9e9dAb7C2F807ba97Cb"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc721_minting",
"blockchain": "polygon",
"params": {
"address": "0xa7D50EE3D7485288107664cf758E877a0D351725"
},
"keys": [
"token_id"
]
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xa7D50EE3D7485288107664cf758E877a0D351725"
}
],
"address": "0x20B807b9AF56977EF475C089A0e7977540743560",
"inputs": []
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x20B807b9AF56977EF475C089A0e7977540743560"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0xd4D53d8D61adc3B8114C1cd17B89393640db9733"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xd4D53d8D61adc3B8114C1cd17B89393640db9733"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0x74d4567fd8B0b873B61FA180618a82183012F369"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x74d4567fd8B0b873B61FA180618a82183012F369"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc1155_token_ids",
"blockchain": "polygon",
"params": {
"address": "0x44b3f42e2BF34F62868Ff9e9dAb7C2F807ba97Cb"
},
"keys": [
"token_id"
]
}
}
],
"name": "uri",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0x44b3f42e2BF34F62868Ff9e9dAb7C2F807ba97Cb"
},
{
"type": "function",
"stateMutability": "view",
"inputs": [
{
"internalType": "uint256",
"name": "tokenId",
"type": "uint256",
"value": {
"type": "queryAPI",
"query_url": "template_erc721_minting",
"blockchain": "polygon",
"params": {
"address": "0xa7D50EE3D7485288107664cf758E877a0D351725"
},
"keys": [
"token_id"
]
}
}
],
"name": "tokenURI",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"address": "0xa7D50EE3D7485288107664cf758E877a0D351725"
},
{
"inputs": [
{
"internalType": "uint16",
"name": "seasonId",
"type": "uint16",
"value": {
"type": "queryAPI",
"query_url": "twilight_seasons",
"blockchain": "polygon",
"params": {
"address": "0xe570fAC6513A8018145aB398Ea71988C688F22C4"
},
"keys": [
"season_id"
]
}
},
{
"internalType": "address",
"name": "user",
"type": "address",
"value": {
"type": "queryAPI",
"query_url": "twilight_tactics_players",
"blockchain": "polygon",
"params": {
"address": "0xe570fAC6513A8018145aB398Ea71988C688F22C4"
},
"keys": [
"player"
]
}
}
],
"name": "twtGetSeasonalDominationPointsByAccount",
"address": "0xe570fAC6513A8018145aB398Ea71988C688F22C4",
"outputs": [
{
"internalType": "uint56[5]",
"name": "shadowcornDominationPoints",
"type": "uint56[5]"
},
{
"internalType": "uint56[5]",
"name": "unicornDominationPoints",
"type": "uint56[5]"
}
],
"stateMutability": "view",
"type": "function",
"selector": "0x0b4ef829"
},
{
"inputs": [
{
"internalType": "uint16",
"name": "seasonId",
"type": "uint16",
"value": {
"type": "queryAPI",
"query_url": "twilight_seasons",
"blockchain": "polygon",
"params": {
"address": "0xe570fAC6513A8018145aB398Ea71988C688F22C4"
},
"keys": [
"season_id"
]
}
}
],
"name": "twtGetSeasonalDominationPointsForAllRegions",
"address": "0xe570fAC6513A8018145aB398Ea71988C688F22C4",
"outputs": [
{
"internalType": "uint56[5]",
"name": "shadowcornDominationPoints",
"type": "uint56[5]"
},
{
"internalType": "uint56[5]",
"name": "unicornDominationPoints",
"type": "uint56[5]"
}
],
"stateMutability": "view",
"type": "function",
"selector": "0xbddb218c"
}
]

Wyświetl plik

@ -0,0 +1,90 @@
[
{
"constant": true,
"inputs": [],
"name": "getReserves",
"outputs": [
{
"internalType": "uint",
"name": "",
"type": "uint256"
},
{
"internalType": "uint",
"name": "",
"type": "uint256"
}
],
"payable": false,
"address": "0x80115c708E12eDd42E504c1cD52Aea96C547c05c",
"stateMutability": "view",
"type": "function"
},
{
"constant": true,
"inputs": [],
"name": "getReserves",
"outputs": [
{
"internalType": "uint112",
"name": "_reserve0",
"type": "uint112"
},
{
"internalType": "uint112",
"name": "_reserve1",
"type": "uint112"
},
{
"internalType": "uint32",
"name": "_blockTimestampLast",
"type": "uint32"
}
],
"payable": false,
"address": "0xb85feb6aF3412d690DFDA280b73EaED73a2315bC",
"stateMutability": "view",
"type": "function"
},
{
"constant": true,
"inputs": [],
"name": "getReserves",
"outputs": [
{
"internalType": "uint112",
"name": "_reserve0",
"type": "uint112"
},
{
"internalType": "uint112",
"name": "_reserve1",
"type": "uint112"
},
{
"internalType": "uint32",
"name": "_blockTimestampLast",
"type": "uint32"
}
],
"payable": false,
"address": "0xDFAaB828f5F515E104BaaBa4d8D554DA9096f0e4",
"stateMutability": "view",
"type": "function"
},
{
"constant": true,
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": false,
"stateMutability": "view",
"type": "function",
"address": "0xDFAaB828f5F515E104BaaBa4d8D554DA9096f0e4"
}
]

Wyświetl plik

@ -23,7 +23,7 @@ from ..settings import MOONSTREAM_S3_QUERIES_BUCKET_PREFIX
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
QUERY_REGEX = re.compile("[\[\]@#$%^&?;`/]")
QUERY_REGEX = re.compile(r"[\[\]@#$%^&?;`]|/\*|\*/")
class QueryNotValid(Exception):

Wyświetl plik

@ -62,4 +62,4 @@ class TestQueries(unittest.TestCase):
queries.query_validation("OR(1=1)#")
with self.assertRaises(queries.QueryNotValid):
queries.query_validation("/etc/hosts")
queries.query_validation("0/**/or/**/1")

Wyświetl plik

@ -1,43 +1,9 @@
# lootbox
## `client`
Use lootboxes in your game economy with ready to use contracts
This repository contains a lightweight Python client for the Engine API.
## Deployment
Deployment with local signer server
To use, for example, with Leaderboard API:
```bash
MOONSTREAM_SIGNING_SERVER_IP=127.0.0.1 ./dev.sh
```
## Run frontend
Do from root directory workspace directory:
Engine:
Run dev
```
yarn workspace engine run dev
```
Build
```
yarn workspace engine run build
```
Player:
Run dev
```
yarn workspace player run dev
```
Build
```
yarn workspace player run build
python -m client.leaderboards -h
```

Wyświetl plik

@ -0,0 +1,154 @@
"""Added leaderboard_versions table and corresponding constraints
Revision ID: cc80e886e153
Revises: 040f2dfde5a5
Create Date: 2023-11-08 16:16:39.265150
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "cc80e886e153"
down_revision = "040f2dfde5a5"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"leaderboard_versions",
sa.Column("leaderboard_id", sa.UUID(), nullable=False),
sa.Column("version_number", sa.DECIMAL(), nullable=False),
sa.Column("published", sa.Boolean(), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', statement_timestamp())"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', statement_timestamp())"),
nullable=False,
),
sa.ForeignKeyConstraint(
["leaderboard_id"],
["leaderboards.id"],
name=op.f("fk_leaderboard_versions_leaderboard_id_leaderboards"),
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint(
"leaderboard_id", "version_number", name=op.f("pk_leaderboard_versions")
),
sa.UniqueConstraint(
"leaderboard_id",
"version_number",
name=op.f("uq_leaderboard_versions_leaderboard_id"),
),
)
op.create_index(
op.f("ix_leaderboard_versions_created_at"),
"leaderboard_versions",
["created_at"],
unique=False,
)
op.add_column(
"leaderboard_scores",
sa.Column("leaderboard_version_number", sa.DECIMAL(), nullable=True),
)
op.drop_constraint(
"uq_leaderboard_scores_leaderboard_id", "leaderboard_scores", type_="unique"
)
op.create_unique_constraint(
op.f("uq_leaderboard_scores_leaderboard_id"),
"leaderboard_scores",
["leaderboard_id", "address", "leaderboard_version_number"],
)
op.drop_constraint(
"fk_leaderboard_scores_leaderboard_id_leaderboards",
"leaderboard_scores",
type_="foreignkey",
)
op.create_foreign_key(
op.f("fk_leaderboard_scores_leaderboard_id_leaderboard_versions"),
"leaderboard_scores",
"leaderboard_versions",
["leaderboard_id", "leaderboard_version_number"],
["leaderboard_id", "version_number"],
ondelete="CASCADE",
)
# ### end Alembic commands ###
# Insert version 0 for all existing leaderboards
op.execute(
"""
INSERT INTO leaderboard_versions (leaderboard_id, version_number, published)
SELECT id, 0, true FROM leaderboards
"""
)
# Set the leaderboard_version_number for all existing scores to the version 0
op.execute(
"""
UPDATE leaderboard_scores SET leaderboard_version_number = 0
"""
)
# Alter leaderboard_scores to make leaderboard_version_number non-nullable
op.alter_column(
"leaderboard_scores",
"leaderboard_version_number",
nullable=False,
)
def downgrade():
op.execute(
"""
WITH latest_version_for_leaderboard AS (
SELECT leaderboard_id, MAX(version_number) AS latest_version
FROM leaderboard_versions WHERE published = true
GROUP BY leaderboard_id
)
DELETE FROM leaderboard_scores WHERE
(leaderboard_id, leaderboard_version_number) NOT IN (
SELECT
leaderboard_id,
latest_version AS leaderboard_version_number
FROM
latest_version_for_leaderboard
)
"""
)
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
op.f("fk_leaderboard_scores_leaderboard_id_leaderboard_versions"),
"leaderboard_scores",
type_="foreignkey",
)
op.create_foreign_key(
"fk_leaderboard_scores_leaderboard_id_leaderboards",
"leaderboard_scores",
"leaderboards",
["leaderboard_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint(
op.f("uq_leaderboard_scores_leaderboard_id"),
"leaderboard_scores",
type_="unique",
)
op.create_unique_constraint(
"uq_leaderboard_scores_leaderboard_id",
"leaderboard_scores",
["leaderboard_id", "address"],
)
op.drop_column("leaderboard_scores", "leaderboard_version_number")
op.drop_index(
op.f("ix_leaderboard_versions_created_at"), table_name="leaderboard_versions"
)
op.drop_table("leaderboard_versions")
# ### end Alembic commands ###

Wyświetl plik

Wyświetl plik

@ -0,0 +1,234 @@
import argparse
import json
import os
import sys
from typing import Optional
import uuid
import requests
LEADERBOARD_API_URL = os.environ.get(
"LEADERBOARD_API_URL", "http://localhost:7191/leaderboard/"
)
def moonstream_access_token(value: Optional[str]) -> uuid.UUID:
if value is None:
value = os.environ.get("MOONSTREAM_ACCESS_TOKEN")
if value is None:
raise ValueError(
"Moonstream access token is required: either via -A/--authorization, or via the MOONSTREAM_ACCESS_TOKEN environment variable"
)
try:
value_uuid = uuid.UUID(value)
except Exception:
raise ValueError("Moonstream access token must be a valid UUID")
return value_uuid
def requires_authorization(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-A",
"--authorization",
type=moonstream_access_token,
required=False,
default=os.environ.get("MOONSTREAM_ACCESS_TOKEN"),
help="Moonstream API access token (if not provided, must be specified using the MOONSTREAM_ACCESS_TOKEN environment variable)",
)
def handle_get(args: argparse.Namespace) -> None:
url = LEADERBOARD_API_URL
params = {
"leaderboard_id": str(args.id),
"limit": str(args.limit),
"offset": str(args.offset),
}
if args.version is not None:
params["version"] = str(args.version)
response = requests.get(url, params=params)
response.raise_for_status()
print(json.dumps(response.json()))
def handle_create(args: argparse.Namespace) -> None:
url = LEADERBOARD_API_URL
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {str(args.authorization)}",
}
body = {
"title": args.title,
"description": args.description,
}
response = requests.post(url, headers=headers, json=body)
response.raise_for_status()
print(json.dumps(response.json()))
def handle_versions(args: argparse.Namespace) -> None:
url = f"{LEADERBOARD_API_URL}{args.id}/versions"
headers = {
"Authorization": f"Bearer {str(args.authorization)}",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
print(json.dumps(response.json()))
def handle_create_version(args: argparse.Namespace) -> None:
url = f"{LEADERBOARD_API_URL}{args.id}/versions"
headers = {
"Authorization": f"Bearer {str(args.authorization)}",
"Content-Type": "application/json",
}
body = {
"publish": args.publish,
}
response = requests.post(url, headers=headers, json=body)
response.raise_for_status()
print(json.dumps(response.json()))
def handle_publish(args: argparse.Namespace) -> None:
url = f"{LEADERBOARD_API_URL}{args.id}/versions/{args.version}"
headers = {
"Authorization": f"Bearer {str(args.authorization)}",
"Content-Type": "application/json",
}
body = {
"publish": args.publish,
}
response = requests.put(url, headers=headers, json=body)
response.raise_for_status()
print(json.dumps(response.json()))
def handle_upload_scores(args: argparse.Namespace) -> None:
url = f"{LEADERBOARD_API_URL}{args.id}/scores"
if args.version is not None:
url = f"{LEADERBOARD_API_URL}{args.id}/versions/{args.version}/scores"
params = {
"overwrite": "true",
"normalize_addresses": "false",
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {str(args.authorization)}",
}
if args.scores is None:
args.scores = sys.stdin
with args.scores as ifp:
body = json.load(ifp)
response = requests.put(url, headers=headers, params=params, json=body)
response.raise_for_status()
print(json.dumps(response.json()))
def generate_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="HTTP client for Leaderboard API")
parser.set_defaults(func=lambda _: parser.print_help())
subparsers = parser.add_subparsers()
# GET /leaderboard/?leaderboard_id=<id>&limit=<limit>&offset=<offset>&version=<version>
get_parser = subparsers.add_parser("get")
get_parser.add_argument("-i", "--id", type=uuid.UUID, required=True)
get_parser.add_argument("-l", "--limit", type=int, default=10)
get_parser.add_argument("-o", "--offset", type=int, default=0)
get_parser.add_argument("-v", "--version", type=int, default=None)
get_parser.set_defaults(func=handle_get)
# POST /leaderboard/
create_parser = subparsers.add_parser("create")
create_parser.add_argument(
"-t", "--title", type=str, required=True, help="Title for leaderboard"
)
create_parser.add_argument(
"-d",
"--description",
type=str,
required=False,
default="",
help="Description for leaderboard",
)
requires_authorization(create_parser)
create_parser.set_defaults(func=handle_create)
# GET /leaderboard/<id>/versions
versions_parser = subparsers.add_parser("versions")
versions_parser.add_argument("-i", "--id", type=uuid.UUID, required=True)
requires_authorization(versions_parser)
versions_parser.set_defaults(func=handle_versions)
# POST /leaderboard/<id>/versions
create_version_parser = subparsers.add_parser("create-version")
create_version_parser.add_argument("-i", "--id", type=uuid.UUID, required=True)
create_version_parser.add_argument(
"--publish",
action="store_true",
help="Set this flag to publish the version immediately upon creation",
)
requires_authorization(create_version_parser)
create_version_parser.set_defaults(func=handle_create_version)
# PUT /leaderboard/<id>/versions/<version>
publish_parser = subparsers.add_parser("publish")
publish_parser.add_argument("-i", "--id", type=uuid.UUID, required=True)
publish_parser.add_argument("-v", "--version", type=int, required=True)
publish_parser.add_argument(
"--publish", action="store_true", help="Set to publish, leave to unpublish"
)
requires_authorization(publish_parser)
publish_parser.set_defaults(func=handle_publish)
# PUT /leaderboard/<id>/scores and PUT /leaderboard/<id>/versions/<version>/scores
upload_scores_parser = subparsers.add_parser("upload-scores")
upload_scores_parser.add_argument("-i", "--id", type=uuid.UUID, required=True)
upload_scores_parser.add_argument(
"-v",
"--version",
type=int,
required=False,
default=None,
help="Specify a version to upload scores to (if not specified a new version is created)",
)
upload_scores_parser.add_argument(
"-s",
"--scores",
type=argparse.FileType("r"),
required=False,
default=None,
help="Path to scores file. If not provided, reads from stdin.",
)
upload_scores_parser.set_defaults(func=handle_upload_scores)
requires_authorization(upload_scores_parser)
return parser
if __name__ == "__main__":
parser = generate_cli()
args = parser.parse_args()
args.func(args)

Wyświetl plik

@ -0,0 +1,10 @@
[
{
"address": "0x0000000000000000000000000000000000000000",
"score": 19,
"points_data": {
"secondary_score_1": 7,
"secondary_score_2": 29
}
}
]

Wyświetl plik

@ -11,7 +11,7 @@ from hexbytes import HexBytes
import requests # type: ignore
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import Session
from sqlalchemy import func, text, or_
from sqlalchemy import func, text, or_, and_, Subquery
from sqlalchemy.engine import Row
from web3 import Web3
from web3.types import ChecksumAddress
@ -31,6 +31,7 @@ from .models import (
DropperClaim,
Leaderboard,
LeaderboardScores,
LeaderboardVersion,
)
from . import signatures
from .settings import (
@ -98,6 +99,10 @@ class LeaderboardConfigAlreadyInactive(Exception):
pass
class LeaderboardVersionNotFound(Exception):
pass
BATCH_SIGNATURE_PAGE_SIZE = 500
logger = logging.getLogger(__name__)
@ -987,25 +992,72 @@ def refetch_drop_signatures(
return claimant_objects
def get_leaderboard_total_count(db_session: Session, leaderboard_id) -> int:
def leaderboard_version_filter(
db_session: Session,
leaderboard_id: uuid.UUID,
version_number: Optional[int] = None,
) -> Union[Subquery, int]:
# Subquery to get the latest version number for the given leaderboard
if version_number is None:
latest_version = (
db_session.query(func.max(LeaderboardVersion.version_number)).filter(
LeaderboardVersion.leaderboard_id == leaderboard_id,
LeaderboardVersion.published == True,
)
).scalar_subquery()
else:
latest_version = version_number
return latest_version
def get_leaderboard_total_count(
db_session: Session, leaderboard_id, version_number: Optional[int] = None
) -> int:
"""
Get the total number of claimants in the leaderboard
Get the total number of position in the leaderboard
"""
return (
db_session.query(LeaderboardScores)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
.count()
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
total_count = (
db_session.query(func.count(LeaderboardScores.id))
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
).scalar()
return total_count
def get_leaderboard_info(
db_session: Session, leaderboard_id: uuid.UUID
db_session: Session, leaderboard_id: uuid.UUID, version_number: Optional[int] = None
) -> Row[Tuple[uuid.UUID, str, str, int, Optional[datetime]]]:
"""
Get the leaderboard from the database with users count
"""
leaderboard = (
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(
Leaderboard.id,
Leaderboard.title,
@ -1014,15 +1066,34 @@ def get_leaderboard_info(
func.max(LeaderboardScores.updated_at).label("last_update"),
)
.join(
LeaderboardScores,
LeaderboardScores.leaderboard_id == Leaderboard.id,
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == Leaderboard.id,
LeaderboardVersion.published == True,
),
isouter=True,
)
.join(
LeaderboardScores,
and_(
LeaderboardScores.leaderboard_id == Leaderboard.id,
LeaderboardScores.leaderboard_version_number
== LeaderboardVersion.version_number,
),
isouter=True,
)
.filter(
or_(
LeaderboardVersion.published == None,
LeaderboardVersion.version_number == latest_version,
)
)
.filter(Leaderboard.id == leaderboard_id)
.group_by(Leaderboard.id, Leaderboard.title, Leaderboard.description)
.one()
)
leaderboard = query.one()
return leaderboard
@ -1106,19 +1177,48 @@ def get_leaderboards(
def get_position(
db_session: Session, leaderboard_id, address, window_size, limit: int, offset: int
db_session: Session,
leaderboard_id,
address,
window_size,
limit: int,
offset: int,
version_number: Optional[int] = None,
) -> List[Row[Tuple[str, int, int, int, Any]]]:
"""
Return position by address with window size
"""
query = db_session.query(
LeaderboardScores.address,
LeaderboardScores.score,
LeaderboardScores.points_data.label("points_data"),
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
func.row_number().over(order_by=LeaderboardScores.score.desc()).label("number"),
).filter(LeaderboardScores.leaderboard_id == leaderboard_id)
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(
LeaderboardScores.address,
LeaderboardScores.score,
LeaderboardScores.points_data.label("points_data"),
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
func.row_number()
.over(order_by=LeaderboardScores.score.desc())
.label("number"),
)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
)
ranked_leaderboard = query.cte(name="ranked_leaderboard")
@ -1157,12 +1257,63 @@ def get_position(
return query.all()
def get_leaderboard_score(
db_session: Session,
leaderboard_id,
address,
version_number: Optional[int] = None,
) -> Optional[LeaderboardScores]:
"""
Return address score
"""
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(LeaderboardScores)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
.filter(LeaderboardScores.address == address)
)
return query.one_or_none()
def get_leaderboard_positions(
db_session: Session, leaderboard_id, limit: int, offset: int
db_session: Session,
leaderboard_id,
limit: int,
offset: int,
version_number: Optional[int] = None,
) -> List[Row[Tuple[uuid.UUID, str, int, str, int]]]:
"""
Get the leaderboard positions
"""
# get public leaderboard scores with max version
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
# Main query
query = (
db_session.query(
LeaderboardScores.id,
@ -1171,8 +1322,17 @@ def get_leaderboard_positions(
LeaderboardScores.points_data,
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
.order_by(text("rank asc, id asc"))
.filter(LeaderboardVersion.published == True)
.filter(LeaderboardVersion.version_number == latest_version)
)
if limit:
@ -1185,18 +1345,39 @@ def get_leaderboard_positions(
def get_qurtiles(
db_session: Session, leaderboard_id
db_session: Session, leaderboard_id, version_number: Optional[int] = None
) -> Tuple[Row[Tuple[str, float, int]], ...]:
"""
Get the leaderboard qurtiles
https://docs.sqlalchemy.org/en/14/core/functions.html#sqlalchemy.sql.functions.percentile_disc
"""
query = db_session.query(
LeaderboardScores.address.label("address"),
LeaderboardScores.score.label("score"),
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
).filter(LeaderboardScores.leaderboard_id == leaderboard_id)
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(
LeaderboardScores.address,
LeaderboardScores.score,
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
)
ranked_leaderboard = query.cte(name="ranked_leaderboard")
@ -1220,17 +1401,41 @@ def get_qurtiles(
return q1, q2, q3
def get_ranks(db_session: Session, leaderboard_id) -> List[Row[Tuple[int, int, int]]]:
def get_ranks(
db_session: Session, leaderboard_id, version_number: Optional[int] = None
) -> List[Row[Tuple[int, int, int]]]:
"""
Get the leaderboard rank buckets(rank, size, score)
"""
query = db_session.query(
LeaderboardScores.id,
LeaderboardScores.address,
LeaderboardScores.score,
LeaderboardScores.points_data,
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
).filter(LeaderboardScores.leaderboard_id == leaderboard_id)
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(
LeaderboardScores.id,
LeaderboardScores.address,
LeaderboardScores.score,
LeaderboardScores.points_data,
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
)
ranked_leaderboard = query.cte(name="ranked_leaderboard")
@ -1248,10 +1453,18 @@ def get_rank(
rank: int,
limit: Optional[int] = None,
offset: Optional[int] = None,
version_number: Optional[int] = None,
) -> List[Row[Tuple[uuid.UUID, str, int, str, int]]]:
"""
Get bucket in leaderboard by rank
"""
latest_version = leaderboard_version_filter(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version_number,
)
query = (
db_session.query(
LeaderboardScores.id,
@ -1260,6 +1473,18 @@ def get_rank(
LeaderboardScores.points_data,
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
)
.join(
LeaderboardVersion,
and_(
LeaderboardVersion.leaderboard_id == LeaderboardScores.leaderboard_id,
LeaderboardVersion.version_number
== LeaderboardScores.leaderboard_version_number,
),
)
.filter(
LeaderboardVersion.published == True,
LeaderboardVersion.version_number == latest_version,
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
.order_by(text("rank asc, id asc"))
)
@ -1433,7 +1658,7 @@ def add_scores(
db_session: Session,
leaderboard_id: uuid.UUID,
scores: List[Score],
overwrite: bool = False,
version_number: int,
normalize_addresses: bool = True,
):
"""
@ -1453,16 +1678,6 @@ def add_scores(
raise DuplicateLeaderboardAddressError("Dublicated addresses", duplicates)
if overwrite:
db_session.query(LeaderboardScores).filter(
LeaderboardScores.leaderboard_id == leaderboard_id
).delete()
try:
db_session.commit()
except:
db_session.rollback()
raise LeaderboardDeleteScoresError("Error deleting leaderboard scores")
for score in scores:
leaderboard_scores.append(
{
@ -1470,13 +1685,18 @@ def add_scores(
"address": normalizer_fn(score.address),
"score": score.score,
"points_data": score.points_data,
"leaderboard_version_number": version_number,
}
)
insert_statement = insert(LeaderboardScores).values(leaderboard_scores)
result_stmt = insert_statement.on_conflict_do_update(
index_elements=[LeaderboardScores.address, LeaderboardScores.leaderboard_id],
index_elements=[
LeaderboardScores.address,
LeaderboardScores.leaderboard_id,
LeaderboardScores.leaderboard_version_number,
],
set_=dict(
score=insert_statement.excluded.score,
points_data=insert_statement.excluded.points_data,
@ -1492,7 +1712,7 @@ def add_scores(
return leaderboard_scores
# leadrboard access actions
# leaderboard access actions
def create_leaderboard_resource(
@ -1731,3 +1951,159 @@ def check_leaderboard_resource_permissions(
return True
return False
def get_leaderboard_version(
db_session: Session, leaderboard_id: uuid.UUID, version_number: int
) -> LeaderboardVersion:
"""
Get the leaderboard version by id
"""
return (
db_session.query(LeaderboardVersion)
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.filter(LeaderboardVersion.version_number == version_number)
.one()
)
def create_leaderboard_version(
db_session: Session,
leaderboard_id: uuid.UUID,
version_number: Optional[int] = None,
publish: bool = False,
) -> LeaderboardVersion:
"""
Create a leaderboard version
"""
if version_number is None:
latest_version_result = (
db_session.query(func.max(LeaderboardVersion.version_number))
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.one()
)
latest_version = latest_version_result[0]
if latest_version is None:
version_number = 0
else:
version_number = latest_version + 1
leaderboard_version = LeaderboardVersion(
leaderboard_id=leaderboard_id,
version_number=version_number,
published=publish,
)
db_session.add(leaderboard_version)
db_session.commit()
return leaderboard_version
def change_publish_leaderboard_version_status(
db_session: Session, leaderboard_id: uuid.UUID, version_number: int, published: bool
) -> LeaderboardVersion:
"""
Publish a leaderboard version
"""
leaderboard_version = (
db_session.query(LeaderboardVersion)
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.filter(LeaderboardVersion.version_number == version_number)
.one()
)
leaderboard_version.published = published
db_session.commit()
return leaderboard_version
def get_leaderboard_versions(
db_session: Session, leaderboard_id: uuid.UUID
) -> List[LeaderboardVersion]:
"""
Get all leaderboard versions
"""
return (
db_session.query(LeaderboardVersion)
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.all()
)
def delete_leaderboard_version(
db_session: Session, leaderboard_id: uuid.UUID, version_number: int
) -> LeaderboardVersion:
"""
Delete a leaderboard version
"""
leaderboard_version = (
db_session.query(LeaderboardVersion)
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.filter(LeaderboardVersion.version_number == version_number)
.one()
)
db_session.delete(leaderboard_version)
db_session.commit()
return leaderboard_version
def get_leaderboard_version_scores(
db_session: Session,
leaderboard_id: uuid.UUID,
version_number: int,
limit: int,
offset: int,
) -> List[LeaderboardScores]:
"""
Get the leaderboard scores by version number
"""
query = (
db_session.query(
LeaderboardScores.id,
LeaderboardScores.address.label("address"),
LeaderboardScores.score.label("score"),
LeaderboardScores.points_data.label("points_data"),
func.rank().over(order_by=LeaderboardScores.score.desc()).label("rank"),
)
.filter(LeaderboardScores.leaderboard_id == leaderboard_id)
.filter(LeaderboardScores.leaderboard_version_number == version_number)
)
if limit:
query = query.limit(limit)
if offset:
query = query.offset(offset)
return query
def delete_previous_versions(
db_session: Session,
leaderboard_id: uuid.UUID,
threshold_version_number: int,
) -> int:
"""
Delete old leaderboard versions
"""
versions_to_delete = (
db_session.query(LeaderboardVersion)
.filter(LeaderboardVersion.leaderboard_id == leaderboard_id)
.filter(LeaderboardVersion.version_number < threshold_version_number)
)
num_deleted = versions_to_delete.delete(synchronize_session=False)
db_session.commit()
return num_deleted

Wyświetl plik

@ -470,3 +470,15 @@ class LeaderboardConfigUpdate(BaseModel):
query_name: Optional[str] = None
params: Dict[str, int]
normalize_addresses: Optional[bool] = None
class LeaderboardVersion(BaseModel):
leaderboard_id: UUID
version: int
published: bool
created_at: datetime
updated_at: datetime
class LeaderboardVersionRequest(BaseModel):
publish: bool

Wyświetl plik

@ -14,6 +14,7 @@ from sqlalchemy import (
MetaData,
String,
UniqueConstraint,
ForeignKeyConstraint,
)
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.ext.compiler import compiles
@ -361,9 +362,45 @@ class Leaderboard(Base): # type: ignore
)
class LeaderboardVersion(Base): # type: ignore
__tablename__ = "leaderboard_versions"
__table_args__ = (UniqueConstraint("leaderboard_id", "version_number"),)
leaderboard_id = Column(
UUID(as_uuid=True),
ForeignKey("leaderboards.id", ondelete="CASCADE"),
primary_key=True,
nullable=False,
)
version_number = Column(DECIMAL, primary_key=True, nullable=False)
published = Column(Boolean, default=False, nullable=False)
created_at = Column(
DateTime(timezone=True),
server_default=utcnow(),
nullable=False,
index=True,
)
updated_at = Column(
DateTime(timezone=True),
server_default=utcnow(),
onupdate=utcnow(),
nullable=False,
)
class LeaderboardScores(Base): # type: ignore
__tablename__ = "leaderboard_scores"
__table_args__ = (UniqueConstraint("leaderboard_id", "address"),)
__table_args__ = (
UniqueConstraint("leaderboard_id", "address", "leaderboard_version_number"),
ForeignKeyConstraint(
["leaderboard_id", "leaderboard_version_number"],
[
"leaderboard_versions.leaderboard_id",
"leaderboard_versions.version_number",
],
ondelete="CASCADE",
),
)
id = Column(
UUID(as_uuid=True),
@ -374,7 +411,10 @@ class LeaderboardScores(Base): # type: ignore
)
leaderboard_id = Column(
UUID(as_uuid=True),
ForeignKey("leaderboards.id", ondelete="CASCADE"),
nullable=False,
)
leaderboard_version_number = Column(
DECIMAL,
nullable=False,
)
address = Column(VARCHAR(256), nullable=False, index=True)

Wyświetl plik

@ -48,7 +48,7 @@ AuthHeader = Header(
)
leaderboad_whitelist = {
leaderboard_whitelist = {
f"/leaderboard/{DOCS_TARGET_PATH}": "GET",
"/leaderboard/openapi.json": "GET",
"/leaderboard/info": "GET",
@ -60,6 +60,7 @@ leaderboad_whitelist = {
"/leaderboard/": "GET",
"/leaderboard/rank": "GET",
"/leaderboard/ranks": "GET",
"/leaderboard/scores": "GET",
"/scores/changes": "GET",
"/leaderboard/docs": "GET",
"/leaderboard/openapi.json": "GET",
@ -76,7 +77,7 @@ app = FastAPI(
)
app.add_middleware(ExtractBearerTokenMiddleware, whitelist=leaderboad_whitelist)
app.add_middleware(ExtractBearerTokenMiddleware, whitelist=leaderboard_whitelist)
app.add_middleware(
CORSMiddleware,
@ -106,7 +107,8 @@ async def leaderboard(
limit: int = Query(10),
offset: int = Query(0),
db_session: Session = Depends(db.yield_db_session),
) -> Any:
version: Optional[str] = Query(None, description="Version of the leaderboard."),
) -> List[data.LeaderboardPosition]:
"""
Returns the leaderboard positions.
"""
@ -124,7 +126,7 @@ async def leaderboard(
raise EngineHTTPException(status_code=500, detail="Internal server error")
leaderboard_positions = actions.get_leaderboard_positions(
db_session, leaderboard_id, limit, offset
db_session, leaderboard_id, limit, offset, version
)
if len(leaderboard.columns_names) > 0:
result = [
@ -152,7 +154,10 @@ async def leaderboard(
@app.post(
"", response_model=data.LeaderboardCreatedResponse, tags=["Authorized Endpoints"]
"",
response_model=data.LeaderboardCreatedResponse,
tags=["Authorized Endpoints"],
include_in_schema=False,
)
@app.post(
"/", response_model=data.LeaderboardCreatedResponse, tags=["Authorized Endpoints"]
@ -388,6 +393,7 @@ async def get_leaderboards(
)
async def count_addresses(
leaderboard_id: UUID = Query(..., description="Leaderboard ID"),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
) -> data.CountAddressesResponse:
"""
@ -406,7 +412,7 @@ async def count_addresses(
logger.error(f"Error while getting leaderboard: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
count = actions.get_leaderboard_total_count(db_session, leaderboard_id)
count = actions.get_leaderboard_total_count(db_session, leaderboard_id, version)
return data.CountAddressesResponse(count=count)
@ -417,12 +423,13 @@ async def count_addresses(
async def leadeboard_info(
leaderboard_id: UUID = Query(..., description="Leaderboard ID"),
db_session: Session = Depends(db.yield_db_session),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
) -> data.LeaderboardInfoResponse:
"""
Returns leaderboard info.
"""
try:
leaderboard = actions.get_leaderboard_info(db_session, leaderboard_id)
leaderboard = actions.get_leaderboard_info(db_session, leaderboard_id, version)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
@ -476,6 +483,7 @@ async def get_scores_changes(
async def quartiles(
leaderboard_id: UUID = Query(..., description="Leaderboard ID"),
db_session: Session = Depends(db.yield_db_session),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
) -> data.QuartilesResponse:
"""
Returns the quartiles of the leaderboard.
@ -493,7 +501,8 @@ async def quartiles(
raise EngineHTTPException(status_code=500, detail="Internal server error")
try:
q1, q2, q3 = actions.get_qurtiles(db_session, leaderboard_id)
q1, q2, q3 = actions.get_qurtiles(db_session, leaderboard_id, version)
except actions.LeaderboardIsEmpty:
raise EngineHTTPException(status_code=204, detail="Leaderboard is empty.")
except Exception as e:
@ -544,6 +553,7 @@ async def position(
normalize_addresses: bool = Query(
True, description="Normalize addresses to checksum."
),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
) -> Union[List[data.LeaderboardPosition], List[data.LeaderboardUnformattedPosition]]:
"""
@ -567,7 +577,13 @@ async def position(
address = Web3.toChecksumAddress(address)
positions = actions.get_position(
db_session, leaderboard_id, address, window_size, limit, offset
db_session,
leaderboard_id,
address,
window_size,
limit,
offset,
version,
)
if len(leaderboard.columns_names) > 0:
@ -607,6 +623,7 @@ async def rank(
rank: int = Query(1, description="Rank to get."),
limit: Optional[int] = Query(None),
offset: Optional[int] = Query(None),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
) -> Union[List[data.LeaderboardPosition], List[data.LeaderboardUnformattedPosition]]:
"""
@ -626,7 +643,12 @@ async def rank(
raise EngineHTTPException(status_code=500, detail="Internal server error")
leaderboard_rank = actions.get_rank(
db_session, leaderboard_id, rank, limit=limit, offset=offset
db_session,
leaderboard_id,
rank,
limit=limit,
offset=offset,
version_number=version,
)
if len(leaderboard.columns_names) > 0:
@ -655,6 +677,7 @@ async def rank(
@app.get("/ranks", response_model=List[data.RanksResponse], tags=["Public Endpoints"])
async def ranks(
leaderboard_id: UUID = Query(..., description="Leaderboard ID"),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
) -> List[data.RanksResponse]:
"""
@ -673,7 +696,7 @@ async def ranks(
logger.error(f"Error while getting leaderboard: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
ranks = actions.get_ranks(db_session, leaderboard_id)
ranks = actions.get_ranks(db_session, leaderboard_id, version)
results = [
data.RanksResponse(
score=rank.score,
@ -685,6 +708,57 @@ async def ranks(
return results
@app.get(
"/scores",
response_model=data.LeaderboardScore,
tags=["Public Endpoints"],
)
async def leaderboard_score(
address: str = Query(..., description="Address to get position for."),
leaderboard_id: UUID = Query(..., description="Leaderboard ID"),
version: Optional[int] = Query(None, description="Version of the leaderboard."),
normalize_addresses: bool = Query(
True, description="Normalize addresses to checksum."
),
db_session: Session = Depends(db.yield_db_session),
) -> data.LeaderboardScore:
"""
Returns the leaderboard posotion for the given address.
"""
### Check if leaderboard exists
try:
actions.get_leaderboard_by_id(db_session, leaderboard_id)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard not found.",
)
except Exception as e:
logger.error(f"Error while getting leaderboard: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
if normalize_addresses:
address = Web3.toChecksumAddress(address)
score = actions.get_leaderboard_score(
db_session,
leaderboard_id,
address,
version,
)
if score is None:
raise EngineHTTPException(status_code=204, detail="Score not found.")
return data.LeaderboardScore(
leaderboard_id=score.leaderboard_id,
address=score.address,
score=score.score,
points_data=score.points_data,
)
@app.put(
"/{leaderboard_id}/scores",
response_model=List[data.LeaderboardScore],
@ -696,10 +770,6 @@ async def leaderboard_push_scores(
scores: List[data.Score] = Body(
..., description="Scores to put to the leaderboard."
),
overwrite: bool = Query(
False,
description="If enabled, this will delete all current scores and replace them with the new scores provided.",
),
normalize_addresses: bool = Query(
True, description="Normalize addresses to checksum."
),
@ -727,13 +797,22 @@ async def leaderboard_push_scores(
status_code=403, detail="You don't have access to this leaderboard."
)
try:
new_version = actions.create_leaderboard_version(
db_session=db_session,
leaderboard_id=leaderboard_id,
)
except Exception as e:
logger.error(f"Error while creating leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
try:
leaderboard_points = actions.add_scores(
db_session=db_session,
leaderboard_id=leaderboard_id,
scores=scores,
overwrite=overwrite,
normalize_addresses=normalize_addresses,
version_number=new_version.version_number,
)
except actions.DuplicateLeaderboardAddressError as e:
raise EngineHTTPException(
@ -750,6 +829,27 @@ async def leaderboard_push_scores(
logger.error(f"Score update failed with error: {e}")
raise EngineHTTPException(status_code=500, detail="Score update failed.")
try:
actions.change_publish_leaderboard_version_status(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=new_version.version_number,
published=True,
)
except Exception as e:
logger.error(f"Error while updating leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
try:
actions.delete_previous_versions(
db_session=db_session,
leaderboard_id=leaderboard_id,
threshold_version_number=new_version.version_number,
)
except Exception as e:
logger.error(f"Error while deleting leaderboard versions: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
result = [
data.LeaderboardScore(
leaderboard_id=score["leaderboard_id"],
@ -973,3 +1073,422 @@ async def leaderboard_config_deactivate(
raise EngineHTTPException(status_code=500, detail="Internal server error")
return True
@app.get(
"/{leaderboard_id}/versions",
response_model=List[data.LeaderboardVersion],
tags=["Authorized Endpoints"],
)
async def leaderboard_versions_list(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> List[data.LeaderboardVersion]:
"""
Get leaderboard versions list.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session,
leaderboard_id=leaderboard_id,
token=token,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard version."
)
try:
leaderboard_versions = actions.get_leaderboard_versions(
db_session=db_session,
leaderboard_id=leaderboard_id,
)
except Exception as e:
logger.error(f"Error while getting leaderboard versions list: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
result = [
data.LeaderboardVersion(
leaderboard_id=version.leaderboard_id,
version=version.version_number,
published=version.published,
created_at=version.created_at,
updated_at=version.updated_at,
)
for version in leaderboard_versions
]
return result
@app.get(
"/{leaderboard_id}/versions/{version}",
response_model=data.LeaderboardVersion,
tags=["Authorized Endpoints"],
)
async def leaderboard_version_handler(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
version: int = Path(..., description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> data.LeaderboardVersion:
"""
Get leaderboard version.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session,
leaderboard_id=leaderboard_id,
token=token,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard."
)
try:
leaderboard_version = actions.get_leaderboard_version(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
except Exception as e:
logger.error(f"Error while getting leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
return data.LeaderboardVersion(
leaderboard_id=leaderboard_version.leaderboard_id,
version=leaderboard_version.version_number,
published=leaderboard_version.published,
created_at=leaderboard_version.created_at,
updated_at=leaderboard_version.updated_at,
)
@app.post(
"/{leaderboard_id}/versions",
response_model=data.LeaderboardVersion,
tags=["Authorized Endpoints"],
)
async def create_leaderboard_version(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
db_session: Session = Depends(db.yield_db_session),
request_body: data.LeaderboardVersionRequest = Body(
...,
description="JSON object specifying whether to publish or unpublish version.",
),
Authorization: str = AuthHeader,
) -> data.LeaderboardVersion:
"""
Create leaderboard version.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session,
leaderboard_id=leaderboard_id,
token=token,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard."
)
try:
new_version = actions.create_leaderboard_version(
db_session=db_session,
leaderboard_id=leaderboard_id,
publish=request_body.publish,
)
except Exception as e:
logger.error(f"Error while creating leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
return data.LeaderboardVersion(
leaderboard_id=new_version.leaderboard_id,
version=new_version.version_number,
published=new_version.published,
created_at=new_version.created_at,
updated_at=new_version.updated_at,
)
@app.put(
"/{leaderboard_id}/versions/{version}",
response_model=data.LeaderboardVersion,
tags=["Authorized Endpoints"],
)
async def update_leaderboard_version_handler(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
version: int = Path(..., description="Version of the leaderboard."),
request_body: data.LeaderboardVersionRequest = Body(
...,
description="JSON object specifying whether to publish or unpublish version.",
),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> data.LeaderboardVersion:
"""
Update leaderboard version.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session,
leaderboard_id=leaderboard_id,
token=token,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard version."
)
try:
leaderboard_version = actions.change_publish_leaderboard_version_status(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version,
published=request_body.publish,
)
except Exception as e:
logger.error(f"Error while updating leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
return data.LeaderboardVersion(
leaderboard_id=leaderboard_version.leaderboard_id,
version=leaderboard_version.version_number,
published=leaderboard_version.published,
created_at=leaderboard_version.created_at,
updated_at=leaderboard_version.updated_at,
)
@app.delete(
"/{leaderboard_id}/versions/{version}",
response_model=data.LeaderboardVersion,
tags=["Authorized Endpoints"],
)
async def delete_leaderboard_version_handler(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
version: int = Path(..., description="Version of the leaderboard."),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> data.LeaderboardVersion:
"""
Delete leaderboard version.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session, leaderboard_id=leaderboard_id, token=token
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard version."
)
try:
leaderboard_version = actions.delete_leaderboard_version(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
except Exception as e:
logger.error(f"Error while deleting leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
return data.LeaderboardVersion(
leaderboard_id=leaderboard_version.leaderboard_id,
version=leaderboard_version.version_number,
published=leaderboard_version.published,
created_at=leaderboard_version.created_at,
updated_at=leaderboard_version.updated_at,
)
@app.get(
"/{leaderboard_id}/versions/{version}/scores",
response_model=List[data.LeaderboardPosition],
tags=["Authorized Endpoints"],
)
async def leaderboard_version_scores_handler(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
version: int = Path(..., description="Version of the leaderboard."),
limit: int = Query(10),
offset: int = Query(0),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> List[data.LeaderboardPosition]:
"""
Get leaderboard version scores.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session, leaderboard_id=leaderboard_id, token=token
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard version."
)
try:
leaderboard_version_scores = actions.get_leaderboard_version_scores(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version,
limit=limit,
offset=offset,
)
except Exception as e:
logger.error(f"Error while getting leaderboard version scores: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
result = [
data.LeaderboardPosition(
address=score.address,
score=score.score,
rank=score.rank,
points_data=score.points_data,
)
for score in leaderboard_version_scores
]
return result
@app.put(
"/{leaderboard_id}/versions/{version}/scores",
response_model=List[data.LeaderboardScore],
tags=["Authorized Endpoints"],
)
async def leaderboard_version_push_scores_handler(
request: Request,
leaderboard_id: UUID = Path(..., description="Leaderboard ID"),
version: int = Path(..., description="Version of the leaderboard."),
scores: List[data.Score] = Body(
..., description="Scores to put to the leaderboard version."
),
normalize_addresses: bool = Query(
True, description="Normalize addresses to checksum."
),
db_session: Session = Depends(db.yield_db_session),
Authorization: str = AuthHeader,
) -> List[data.LeaderboardScore]:
"""
Put the leaderboard version to the database.
"""
token = request.state.token
try:
access = actions.check_leaderboard_resource_permissions(
db_session=db_session, leaderboard_id=leaderboard_id, token=token
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
if not access:
raise EngineHTTPException(
status_code=403, detail="You don't have access to this leaderboard version."
)
try:
leaderboard_version = actions.get_leaderboard_version(
db_session=db_session,
leaderboard_id=leaderboard_id,
version_number=version,
)
except NoResultFound as e:
raise EngineHTTPException(
status_code=404,
detail="Leaderboard version not found.",
)
except Exception as e:
logger.error(f"Error while getting leaderboard version: {e}")
raise EngineHTTPException(status_code=500, detail="Internal server error")
try:
leaderboard_points = actions.add_scores(
db_session=db_session,
leaderboard_id=leaderboard_id,
scores=scores,
normalize_addresses=normalize_addresses,
version_number=leaderboard_version.version_number,
)
except actions.DuplicateLeaderboardAddressError as e:
raise EngineHTTPException(
status_code=409,
detail=f"Duplicates in push to database is disallowed.\n List of duplicates:{e.duplicates}.\n Please handle duplicates manualy.",
)
except Exception as e:
logger.error(f"Score update failed with error: {e}")
raise EngineHTTPException(status_code=500, detail="Score update failed.")
result = [
data.LeaderboardScore(
leaderboard_id=score["leaderboard_id"],
address=score["address"],
score=score["score"],
points_data=score["points_data"],
)
for score in leaderboard_points
]
return result

Wyświetl plik

@ -32,6 +32,7 @@ from ..settings import (
MOONSTREAM_APPLICATION_ID,
MOONSTREAM_CRAWLERS_SERVER_PORT,
MOONSTREAM_CRAWLERS_SERVER_URL,
MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS,
MOONSTREAM_QUERIES_JOURNAL_ID,
MOONSTREAM_QUERY_TEMPLATE_CONTEXT_TYPE,
MOONSTREAM_S3_QUERIES_BUCKET,
@ -473,7 +474,7 @@ async def update_query_data_handler(
if request_update.blockchain
else None,
},
timeout=5,
timeout=MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS,
)
except Exception as e:
logger.error(f"Error interaction with crawlers: {str(e)}")

Wyświetl plik

@ -259,3 +259,17 @@ supportsInterface_abi = [
"type": "function",
}
]
MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS_RAW = os.environ.get(
"MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS"
)
MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS = 10
try:
if MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS_RAW is not None:
MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS = int(
MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS_RAW
)
except:
raise Exception(
f"Could not parse MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS as int: {MOONSTREAM_INTERNAL_REQUEST_TIMEOUT_SECONDS_RAW}"
)

Wyświetl plik

@ -198,7 +198,7 @@ func (bpool *BlockchainPool) HealthCheck() {
for _, b := range bpool.Blockchains {
var timeout time.Duration
getLatestBlockReq := `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["latest", false],"id":1}`
if b.Blockchain == "starknet" {
if b.Blockchain == "starknet" || b.Blockchain == "starknet-goerli" {
getLatestBlockReq = `{"jsonrpc":"2.0","method":"starknet_getBlockWithTxHashes","params":["latest"],"id":"0"}`
timeout = NB_HEALTH_CHECK_CALL_TIMEOUT * 2
}
@ -241,7 +241,7 @@ func (bpool *BlockchainPool) HealthCheck() {
}
var blockNumber uint64
if b.Blockchain == "starknet" {
if b.Blockchain == "starknet" || b.Blockchain == "starknet-goerli" {
blockNumber = statusResponse.Result.BlockNumber
} else {
blockNumberHex := strings.Replace(statusResponse.Result.Number, "0x", "", -1)

Wyświetl plik

@ -38,7 +38,7 @@ var (
NB_CONNECTION_RETRIES = 2
NB_CONNECTION_RETRIES_INTERVAL = time.Millisecond * 10
NB_HEALTH_CHECK_INTERVAL = time.Millisecond * 5000
NB_HEALTH_CHECK_INTERVAL = os.Getenv("NB_HEALTH_CHECK_INTERVAL")
NB_HEALTH_CHECK_CALL_TIMEOUT = time.Second * 2
NB_CACHE_CLEANING_INTERVAL = time.Second * 10

Wyświetl plik

@ -12,6 +12,7 @@ import (
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"time"
@ -28,7 +29,11 @@ var (
// initHealthCheck runs a routine for check status of the nodes every 5 seconds
func initHealthCheck(debug bool) {
t := time.NewTicker(NB_HEALTH_CHECK_INTERVAL)
healthCheckInterval, convErr := strconv.Atoi(NB_HEALTH_CHECK_INTERVAL)
if convErr != nil {
healthCheckInterval = 5
}
t := time.NewTicker(time.Second * time.Duration(healthCheckInterval))
for {
select {
case <-t.C: