Jobs now part of backend and portal installer work.

portal
jprochazka 2024-07-15 17:58:08 -04:00
rodzic 0a8d253833
commit fdd18f5cad
15 zmienionych plików z 302 dodań i 618 usunięć

Wyświetl plik

@ -0,0 +1,190 @@
## FUNCTIONS
cancel_setup() {
echo -e "\e[91m \e[5mINSTALLATION HALTED!\e[25m"
echo -e " Setup has been halted at the request of the user."
echo -e ""
echo -e "\e[93m ------------------------------------------------------------------------------"
echo -e "\e[92m ADS-B Receiver Project Portal setup halted.\e[39m"
echo -e ""
exit 1
}
prompt_for_input() {
if [[ -z $title ]]; then title = "Password"
if [[ -z $message ]]; then title = "Enter the password."
while [[ -z $input ]]; do
input = $(whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "${title}" \
--inputbox "${message}" \
8 78 "${default_value}")
exit_status = $?
if [ $exit_status = 0 ]; then
cancel_setup()
fi
title ="${title} (REQUIRED)"
done
}
prompt_for_password() {
if [[ -z $title ]]; then title = "Password"
if [[ -z $message ]]; then title = "Enter the password."
while [[ -z $password ]]; do
password = $(whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "${title}" \
--passwordbox "${message}" \
8 78)
exit_status = $?
if [ $exit_status = 0 ]; then
cancel_setup()
fi
title ="${title} (REQUIRED)"
done
}
prompt_for_username(){
if [[ -z $title ]]; then title = "Username"
if [[ -z $message ]]; then title = "Enter the username."
while [[ -z $user ]]; do
user = $(whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "${title}" \
--inputbox "${message}" \
8 78)
exit_status = $?
if [ $exit_status = 0 ]; then
cancel_setup()
fi
title = "${title} (REQUIRED)"
done
}
## MYSQL
setup_mysql() {
# Gather information
mysql_host = "localhost"
database_exists = false
if [[ "${PORTAL_LOCAL_MYSQL_SERVER}" = "false" ]]
mysql_host = $(whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "MySQL Database Server Hostname" \
--nocancel \
--inputbox "\nWhat is the remote MySQL server's hostname?" \
10 60)
database_exists = $(whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "Does MySQL Database Exist" \
--yesno "Does the database already exist on the host?" \
7 80)
if
# Check for and install if needed all MariaDB packages needed to host the database locally
if [[ "${mysql_host}" = "localhost" || "${mysql_host}" = "127.0.0.1" ]]
CheckPackage mariadb-server
CheckPackage mariadb-client
fi
whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "MySQL Secure Installation" \
--msgbox "The mysql_secure_installation will now be executed. Follow the on screen instructions to complete the MariaDB (MySQL) server setup." \
12 78
echo -e "\e[94m Executing the mysql_secure_installation script...\e[97m"
sudo mysql_secure_installation
echo ""
# Get MySQL administrative user credentials
if [[ "${mysql_host}" = "localhost" || "${database_exists}" = "false" ]] ; then
whiptail \
--backtitle "${RECEIVER_PROJECT_TITLE}" \
--title "Create Remote MySQL Database" \
--msgbox "This script will attempt to create the MySQL database for you.\nPlease supply credentials for the root user or another account granted permission to create a new database." \
9 78
title = "MySQL Administrative Username"
message = "Enter the MySQL administrator username"
default_value = "root"
prompt_for_input()
admin_user = $input
title = "MySQL Administrator Password"
message = "Enter the MySQL password for username ${admin_user}"
prompt_for_password()
admin_password_one = $password
title = "Confirm The MySQL Administrator Password"
message = "Reenter the MySQL password for username ${admin_user}"
prompt_for_password()
admin_password_two = $password
while [[ ! $admin_password_one = $admin_password_two ]] ; do
admin_password_one=""
admin_password_two=""
title = "MySQL Administrator Passwords Did Not Match"
message = "Enter the MySQL password for username ${admin_user}"
prompt_for_password()
admin_password_one = $password
title = "Confirm The MySQL Administrator Password"
message = "Reenter the MySQL password for username ${admin_user}"
prompt_for_password()
admin_password_two = $password
done
fi
# Get MySQL database name and database user credentials
title = "MySQL Database Name"
message = "Enter the name of the database to be used"
if [[ "${database_exists}" = "false" ]] ; then
message = "Enter the name of the database to be created"
fi
default_value = "adsbportal"
prompt_for_input()
database_name = $input
title = "MySQL Database Username"
message = "Enter the username associated with the database ${database_name}"
if [[ "${database_exists}" = "false" ]] ; then
message = "Enter the username to be added to the database ${database_name}"
fi
default_value = "adsbuser"
prompt_for_input()
database_username = $input
title = "Database Password"
message = "Enter the password assigned to the username ${database_username}"
prompt_for_password()
database_password_one = $password
title = "Confirm The Database Password"
message = "Reenter the password assigned to username ${database_username}"
prompt_for_password()
database_password_two = $password
while [[ ! $database_password_one = $database_password_two ]] ; do
database_password_one=""
database_password_two=""
title = "Database Username Passwords Did Not Match"
message = "Enter the password assigned to the username ${database_username}"
prompt_for_password()
database_password_one = $password
title = "Confirm The Database Username Password"
message = "Reenter the password assigned to the username ${database_username}"
prompt_for_password()
database_password_two = $password
done
}
## POSTGRESQL
## SQLITE

Wyświetl plik

@ -1,53 +1,18 @@
#!/bin/bash
#####################################################################################
# ADS-B RECEIVER #
#####################################################################################
# #
# This script is not meant to be executed directly. #
# Instead execute install.sh to begin the installation process. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (c) 2015-2024 Joseph A. Prochazka #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
### VARIABLES
PORTAL_BUILD_DIRECTORY="${RECEIVER_BUILD_DIRECTORY}/portal"
collectd_config="/etc/collectd/collectd.conf"
collectd_cron_file="/etc/cron.d/adsb-receiver-performance-graphs"
dump1090_max_range_rrd_database="/var/lib/collectd/rrd/localhost/dump1090-localhost/dump1090_range-max_range.rrd"
dump1090_messages_local_rrd_database="/var/lib/collectd/rrd/localhost/dump1090-localhost/dump1090_messages-local_accepted.rrd"
COLLECTD_CONFIG="/etc/collectd/collectd.conf"
COLLECTD_CRON_FILE="/etc/cron.d/adsb-receiver-performance-graphs"
DUMP1090_MAX_RANGE_RRD="/var/lib/collectd/rrd/localhost/dump1090-localhost/dump1090_range-max_range.rrd"
DUMP1090_MESSAGES_LOCAL_RRD="/var/lib/collectd/rrd/localhost/dump1090-localhost/dump1090_messages-local_accepted.rrd"
### INCLUDE EXTERNAL SCRIPTS
source ${RECEIVER_BASH_DIRECTORY}/variables.sh
source ${RECEIVER_BASH_DIRECTORY}/functions.sh
if [[ "${RECEIVER_AUTOMATED_INSTALL}" = "true" ]] && [[ -s "${RECEIVER_CONFIGURATION_FILE}" ]] ; then
source ${RECEIVER_CONFIGURATION_FILE}
fi
### BEGIN SETUP
@ -55,42 +20,30 @@ echo -e ""
echo -e "\e[95m Setting up collectd performance graphs...\e[97m"
echo -e ""
CheckPackage collectd-core
CheckPackage rrdtool
## CONFIRM INSTALLED PACKAGES
if [[ -z "${DUMP1090_INSTALLED}" ]] || [[ -z "${DUMP1090_FORK}" ]] ; then
echo -e "\e[94m Checking which dump1090 fork is installed...\e[97m"
if [[ $(dpkg-query -W -f='${STATUS}' dump1090-fa 2>/dev/null | grep -c "ok installed") -eq 1 ]] ; then
DUMP1090_FORK="fa"
DUMP1090_INSTALLED="true"
fi
echo -e "\e[94m Checking which dump1090 fork is installed...\e[97m"
if [[ $(dpkg-query -W -f='${STATUS}' dump1090-fa 2>/dev/null | grep -c "ok installed") -eq 1 ]] ; then
dump1090_fork="fa"
dump1090_is_installed="true"
fi
## MODIFY THE DUMP1090-MUTABILITY INIT SCRIPT TO MEASURE AND RETAIN NOISE DATA
if [[ "${DUMP1090_INSTALLED}" = "true" ]] && [[ "${DUMP1090_FORK}" = "mutability" ]] ; then
echo -e "\e[94m Modifying the dump1090-mutability configuration file to add noise measurements...\e[97m"
EXTRA_ARGS=`GetConfig "EXTRA_ARGS" "/etc/default/dump1090-mutability"`
EXTRA_ARGS=$(sed -e 's/^[[:space:]]*//' <<<"EXTRA_ARGS --measure-noise")
ChangeConfig "EXTRA_ARGS" "${RECEIVER_LONGITUDE}" "/etc/default/dump1090-mutability"
echo -e "\e[94m Reloading the systemd manager configuration...\e[97m"
sudo systemctl daemon-reload
echo -e "\e[94m Reloading dump1090-mutability...\e[97m"
sudo service dump1090-mutability force-reload
fi
## BACKUP AND REPLACE COLLECTD.CONF
# Check if the collectd config file exists and if so back it up.
if [[ -f "${COLLECTD_CONFIG}" ]] ; then
if [[ -f "${collectd_config}" ]] ; then
echo -e "\e[94m Backing up the current collectd.conf file...\e[97m"
sudo cp ${COLLECTD_CONFIG} ${COLLECTD_CONFIG}.bak
sudo cp ${collectd_config} ${collectd_config}.bak
fi
# Generate new collectd config.
echo -e "\e[94m Replacing the current collectd.conf file...\e[97m"
sudo tee ${COLLECTD_CONFIG} > /dev/null <<EOF
sudo tee ${collectd_config} > /dev/null <<EOF
# Config file for collectd(1).
##############################################################################
@ -113,19 +66,19 @@ WriteThreads 1
EOF
# Dump1090 specific values.
if [[ "${DUMP1090_INSTALLED}" = "true" ]] ; then
sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
if [[ "${dump1090_is_installed}" = "true" ]] ; then
sudo tee -a ${collectd_config} > /dev/null <<EOF
#----------------------------------------------------------------------------#
# Added types for dump1090. #
# Make sure the path to dump1090.db is correct. #
#----------------------------------------------------------------------------#
TypesDB "${PORTAL_BUILD_DIRECTORY}/graphs/dump1090.db" "/usr/share/collectd/types.db"
TypesDB "${RECEIVER_BUILD_DIRECTORY}/portal/graphs/dump1090.db" "/usr/share/collectd/types.db"
EOF
fi
# Config for all installations.
sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
sudo tee -a ${collectd_config} > /dev/null <<EOF
##############################################################################
# Logging #
##############################################################################
@ -190,7 +143,7 @@ EOF
# Raspberry Pi: b03112
if [[ "${RECEIVER_CPU_REVISION}" = "b03112" ]] ; then
sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
sudo tee -a ${collectd_config} > /dev/null <<EOF
<Plugin table>
<Table "/sys/class/thermal/thermal_zone0/temp">
Instance localhost
@ -212,8 +165,8 @@ EOF
fi
# Dump1090 specific values.
if [[ "${DUMP1090_INSTALLED}" = "true" ]] ; then
sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
if [[ "${dump1090_is_installed}" = "true" ]] ; then
sudo tee -a ${collectd_config} > /dev/null <<EOF
#----------------------------------------------------------------------------#
# Configure the dump1090-tools python module. #
# #
@ -222,7 +175,7 @@ if [[ "${DUMP1090_INSTALLED}" = "true" ]] ; then
# statistics will be loaded from http://localhost/dump1090/data/stats.json #
#----------------------------------------------------------------------------#
<Plugin python>
ModulePath "${PORTAL_BUILD_DIRECTORY}/graphs"
ModulePath "${RECEIVER_BUILD_DIRECTORY}/portal/graphs"
LogTraces true
Import "dump1090"
<Module dump1090>
@ -236,7 +189,7 @@ EOF
fi
# Remaining config for all installations.
sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
sudo tee -a ${collectd_config} > /dev/null <<EOF
<Chain "PostCache">
<Rule>
<Match regex>
@ -252,32 +205,25 @@ sudo tee -a ${COLLECTD_CONFIG} > /dev/null <<EOF
</Chain>
EOF
## RELOAD COLLECTD
echo -e "\e[94m Reloading collectd so the new configuration is used...\e[97m"
sudo service collectd force-reload
## EDIT CRONTAB
if [[ ! -x "${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh" ]] ; then
echo -e "\e[94m Making the make-collectd-graphs.sh script executable...\e[97m"
chmod +x ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh
fi
echo -e "\e[94m Making the make-collectd-graphs.sh script executable...\e[97m"
chmod +x ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh
# The next block is temporary in order to insure this file is
# deleted on older installation before the project renaming.
if [[ -f "/etc/cron.d/adsb-feeder-performance-graphs" ]] ; then
echo -e "\e[94m Removing outdated performance graphs cron file...\e[97m"
sudo rm -f /etc/cron.d/adsb-feeder-performance-graphs
fi
if [[ -f "${COLLECTD_CRON_FILE}" ]] ; then
if [[ -f "${collectd_cron_file}" ]] ; then
echo -e "\e[94m Removing previously installed performance graphs cron file...\e[97m"
sudo rm -f ${COLLECTD_CRON_FILE}
sudo rm -f ${collectd_cron_file}
fi
echo -e "\e[94m Adding performance graphs cron file...\e[97m"
sudo tee ${COLLECTD_CRON_FILE} > /dev/null <<EOF
sudo tee ${collectd_cron_file} > /dev/null <<EOF
# Updates the portal's performance graphs.
#
# Every 5 minutes new hourly graphs are generated.
@ -289,32 +235,33 @@ sudo tee ${COLLECTD_CRON_FILE} > /dev/null <<EOF
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/5 * * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 1h >/dev/null 2>&1
*/10 * * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 6h >/dev/null 2>&1
2,12,22,32,42,52 * * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 24h >/dev/null 2>&1
4,24,44 * * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 7d >/dev/null 2>&1
6 * * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 30d >/dev/null 2>&1
8 */12 * * * root bash ${PORTAL_BUILD_DIRECTORY}/graphs/make-collectd-graphs.sh 365d >/dev/null 2>&1
*/5 * * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 1h >/dev/null 2>&1
*/10 * * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 6h >/dev/null 2>&1
2,12,22,32,42,52 * * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 24h >/dev/null 2>&1
4,24,44 * * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 7d >/dev/null 2>&1
6 * * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 30d >/dev/null 2>&1
8 */12 * * * root bash ${RECEIVER_BUILD_DIRECTORY}/portal/graphs/make-collectd-graphs.sh 365d >/dev/null 2>&1
EOF
# Update max_range.rrd to remove the 500 km / ~270 nmi limit.
if [ -f "/var/lib/collectd/rrd/localhost/dump1090-localhost/dump1090_range-max_range.rrd" ]; then
if [[ `rrdinfo ${DUMP1090_MAX_RANGE_RRD} | grep -c "ds\[value\].max = 1.0000000000e+06"` -eq 0 ]] ; then
if [[ `rrdinfo ${dump1090_max_range_rrd_database} | grep -c "ds\[value\].max = 1.0000000000e+06"` -eq 0 ]] ; then
echo -e "\e[94m Removing 500km/270mi limit from max_range.rrd...\e[97m"
sudo rrdtool tune ${DUMP1090_MAX_RANGE_RRD} --maximum "value:1000000"
sudo rrdtool tune ${dump1090_max_range_rrd_database} --maximum "value:1000000"
fi
fi
# Increase size of weekly messages table to 8 days
if [ -f ${DUMP1090_MESSAGES_LOCAL_RRD} ]; then
if [[ `rrdinfo ${DUMP1090_MESSAGES_LOCAL_RRD} | grep -c "rra\[6\]\.rows = 1260"` -eq 1 ]] ; then
if [ -f ${dump1090_messages_local_rrd_database} ]; then
if [[ `rrdinfo ${dump1090_messages_local_rrd_database} | grep -c "rra\[6\]\.rows = 1260"` -eq 1 ]] ; then
echo -e "\e[94m Increasing weekly table size to 8 days in messages-local_accepted.rrd...\e[97m"
sudo rrdtool tune ${DUMP1090_MESSAGES_LOCAL_RRD} 'RRA#6:=1440' 'RRA#7:=1440' 'RRA#8:=1440'
sudo rrdtool tune ${dump1090_messages_local_rrd_database} 'RRA#6:=1440' 'RRA#7:=1440' 'RRA#8:=1440'
fi
fi
### SETUP COMPLETE
# Return to the project root directory.
echo -e "\e[94m Entering the ADS-B Receiver Project root directory...\e[97m"
cd ${RECEIVER_ROOT_DIRECTORY}
cd ${RECEIVER_ROOT_DIRECTORY}

Wyświetl plik

@ -1,14 +1,11 @@
#!/bin/bash
## VARIABLES
PORTAL_BUILD_DIRECTORY="${RECEIVER_BUILD_DIRECTORY}/portal"
## INCLUDE EXTERNAL SCRIPTS
source ${RECEIVER_BASH_DIRECTORY}/variables.sh
source ${RECEIVER_BASH_DIRECTORY}/functions.sh
## BEGIN SETUP
clear
@ -17,7 +14,7 @@ echo -e ""
echo -e "\e[92m Setting up the ADS-B Receiver Project Portal..."
echo -e "\e[93m ------------------------------------------------------------------------------\e[96m"
echo -e ""
whiptail --backtitle "${RECEIVER_PROJECT_TITLE}" --title "ADS-B ADS-B Receiver Project Portal Setup" --yesno "The ADS-B ADS-B Receiver Project Portal adds a web accessable portal to your receiver. The portal contains allows you to view performance graphs, system information, and live maps containing the current aircraft being tracked.\n\nBy enabling the portal's advanced features you can also view historical data on flight that have been seen in the past as well as view more detailed information on each of these aircraft.\n\nTHE ADVANCED PORTAL FEATURES ARE STILL IN DEVELOPMENT\n\nIt is recomended that only those wishing to contribute to the development of these features or those wishing to test out the new features enable them. Do not be surprised if you run into any major bugs after enabling the advanced features at this time!\n\nDo you wish to continue with the ADS-B Receiver Project Portal setup?" 23 78
whiptail --backtitle "${RECEIVER_PROJECT_TITLE}" --title "ADS-B ADS-B Receiver Project Portal Setup" --yesno "The ADS-B ADS-B Receiver Project Portal adds a web accessable portal to your receiver. The portal contains allows you to view performance graphs, system information, and live maps containing the current aircraft being tracked.\n\nBy enabling the portal's advanced features you can also view historical data on flight that have been seen in the past as well as view more detailed information on each of these aircraft.\n\nDo you wish to continue with the ADS-B Receiver Project Portal setup?" 23 78
CONTINUE_SETUP=$?
if [[ "${CONTINUE_SETUP}" = 1 ]] ; then
# Setup has been halted by the user.
@ -33,41 +30,26 @@ if [[ "${CONTINUE_SETUP}" = 1 ]] ; then
exit 1
fi
## GATHER NEEDED INFORMATION FROM THE USER
# We will need to make sure Lighttpd is installed first before we go any further.
echo -e "\e[95m Installing packages needed to fulfill dependencies...\e[97m"
echo -e ""
CheckPackage lighttpd
# Assign the Lighthttpd document root directory to a variable.
RAW_DOCUMENT_ROOT=`/usr/sbin/lighttpd -f /etc/lighttpd/lighttpd.conf -p | grep server.document-root`
LIGHTTPD_DOCUMENT_ROOT=`sed 's/.*"\(.*\)"[^"]*$/\1/' <<< ${RAW_DOCUMENT_ROOT}`
## GATHER INSTALLATION INFORMATION FROM THE USER
# TODO: CHECK IF PORTAL IS INSTALLED
# Check if there is already an existing portal installation.
if [[ -f "${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php" ]] ; then
RECEIVER_PORTAL_INSTALLED="true"
else
RECEIVER_PORTAL_INSTALLED="false"
portal_installed="false"
if [[ -f "" ]] ; then
portal_installed="true"
fi
if [[ "${RECEIVER_PORTAL_INSTALLED}" = "true" ]] ; then
# Assign needed variables using the driver setting in settings.class.php.
DATABASEENGINE=`grep 'db_driver' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
if [[ "${DATABASEENGINE}" = "xml" ]] ; then
ADVANCED="false"
else
ADVANCED="true"
fi
if [[ "${ADVANCED}" = "true" ]] ; then
case "${DATABASEENGINE}" in
"mysql") DATABASEENGINE="MySQL" ;;
"sqlite") DATABASEENGINE="SQLite" ;;
esac
DATABASEHOSTNAME=`grep 'db_host' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASEUSER=`grep 'db_username' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASEPASSWORD1=`grep 'db_password' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASENAME=`grep 'db_database' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
if [[ "${portal_installed}" = "true" ]] ; then
case "${DATABASEENGINE}" in
"mysql") DATABASEENGINE="MySQL" ;;
"postgresql") DATABASEENGINE="PostgreSQL" ;;
"sqlite") DATABASEENGINE="SQLite" ;;
esac
database_host_name=`grep 'db_host' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASEUSER=`grep 'db_username' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASEPASSWORD1=`grep 'db_password' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
DATABASENAME=`grep 'db_database' ${LIGHTTPD_DOCUMENT_ROOT}/classes/settings.class.php | tail -n1 | cut -d\' -f2`
fi
@ -94,10 +76,10 @@ else
esac
if [[ "${LOCALMYSQLSERVER}" = "false" ]] ; then
# Ask for the remote MySQL servers hostname.
DATABASEHOSTNAME_TITLE="MySQL Database Server Hostname"
while [[ -z "${DATABASEHOSTNAME}" ]] ; do
DATABASEHOSTNAME=$(whiptail --backtitle "${RECEIVER_PROJECT_TITLE}" --title "${DATABASEHOSTNAME_TITLE}" --nocancel --inputbox "\nWhat is the remote MySQL server's hostname?" 10 60 3>&1 1>&2 2>&3)
DATABASEHOSTNAME_TITLE="MySQL Database Server Hostname (REQUIRED)"
database_host_name_title="MySQL Database Server Hostname"
while [[ -z "${database_host_name}" ]] ; do
database_host_name=$(whiptail --backtitle "${RECEIVER_PROJECT_TITLE}" --title "${DATABASEHOSTNAME_TITLE}" --nocancel --inputbox "\nWhat is the remote MySQL server's hostname?" 10 60 3>&1 1>&2 2>&3)
database_host_name_title="MySQL Database Server Hostname (REQUIRED)"
done
# Ask if the remote MySQL database already exists.
@ -121,7 +103,7 @@ else
DATABASEEXISTS="false"
# Since the MySQL database server will run locally assign localhost as it's hostname.
DATABASEHOSTNAME="localhost"
database_host_name="localhost"
fi
# Ask for the MySQL administrator credentials if the database does not already exist.
@ -271,7 +253,7 @@ echo -e "\e[95m Setting up the web portal...\e[97m"
echo -e ""
# If this is an existing Lite installation being upgraded backup the XML data files.
if [[ "${RECEIVER_PORTAL_INSTALLED}" = "true" ]] && [[ "${ADVANCED}" = "false" ]] ; then
if [[ "${portal_installed}" = "true" ]] && [[ "${ADVANCED}" = "false" ]] ; then
echo -e "\e[94m Backing up the file ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.xml...\e[97m"
sudo mv ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.xml ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.backup.xml
echo -e "\e[94m Backing up the file ${LIGHTTPD_DOCUMENT_ROOT}/data/blogPosts.xml...\e[97m"
@ -292,10 +274,10 @@ if [ -f ${LIGHTTPD_DOCUMENT_ROOT}/index.lighttpd.html ]; then
fi
echo -e "\e[94m Placing portal files in Lighttpd's root directory...\e[97m"
sudo cp -R ${PORTAL_BUILD_DIRECTORY}/html/* ${LIGHTTPD_DOCUMENT_ROOT}
sudo cp -R ${RECEIVER_BUILD_DIRECTORY}/portal/html/* ${LIGHTTPD_DOCUMENT_ROOT}
# If this is an existing installation being upgraded restore the original XML data files.
if [[ "${RECEIVER_PORTAL_INSTALLED}" = "true" ]] && [[ "${ADVANCED}" = "false" ]] ; then
if [[ "${portal_installed}" = "true" ]] && [[ "${ADVANCED}" = "false" ]] ; then
echo -e "\e[94m Restoring the backup copy of the file ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.xml...\e[97m"
sudo mv ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.backup.xml ${LIGHTTPD_DOCUMENT_ROOT}/data/administrators.xml
echo -e "\e[94m Restoring the backup copy of the file ${LIGHTTPD_DOCUMENT_ROOT}/data/blogPosts.xml...\e[97m"
@ -365,7 +347,7 @@ if [[ ! -L "/etc/lighttpd/conf-enabled/87-adsb-portal.conf" ]] ; then
sudo ln -s /etc/lighttpd/conf-available/87-adsb-portal.conf /etc/lighttpd/conf-enabled/87-adsb-portal.conf
fi
if [[ "${RECEIVER_PORTAL_INSTALLED}" = "false" ]] ; then
if [[ "${portal_installed}" = "false" ]] ; then
echo -e "\e[94m Enabling the Lighttpd fastcgi-php module...\e[97m"
echo -e ""
sudo lighty-enable-mod fastcgi-php
@ -383,20 +365,20 @@ fi
## SETUP THE MYSQL DATABASE
if [[ "${RECEIVER_PORTAL_INSTALLED}" = "false" ]] && [[ "${ADVANCED}" = "true" ]] && [[ "${DATABASEENGINE}" = "MySQL" ]] && [[ "${DATABASEEXISTS}" = "false" ]] ; then
if [[ "${portal_installed}" = "false" ]] && [[ "${ADVANCED}" = "true" ]] && [[ "${DATABASEENGINE}" = "MySQL" ]] && [[ "${DATABASEEXISTS}" = "false" ]] ; then
# If MariaDB is being used we will switch the plugin from unix_socket to mysql_native_password to keep things on the same page as MySQL setups.
if [[ $(dpkg-query -W -f='${STATUS}' mariadb-server-10.1 2>/dev/null | grep -c "ok installed") -eq 1 ]] ; then
echo -e "\e[94m Switching the default MySQL plugin from unix_socket to mysql_native_password...\e[97m"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "UPDATE mysql.user SET plugin = 'mysql_native_password' WHERE user = 'root' AND plugin = 'unix_socket';"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "UPDATE mysql.user SET plugin = 'mysql_native_password' WHERE user = 'root' AND plugin = 'unix_socket';"
echo -e "\e[94m Flushing privileges on the MySQL (MariaDB) server...\e[97m"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "FLUSH PRIVILEGES;"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "FLUSH PRIVILEGES;"
echo -e "\e[94m Reloading MySQL (MariaDB)...\e[97m"
sudo service mysql force-reload
fi
# Attempt to login with the supplied MySQL administrator credentials.
echo -e "\e[94m Attempting to log into the MySQL server using the supplied administrator credentials...\e[97m"
while ! sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e ";" ; do
while ! sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e ";" ; do
echo -e "\e[94m Unable to log into the MySQL server using the supplied administrator credentials...\e[97m"
whiptail --backtitle "${RECEIVER_PROJECT_TITLE}" --title "Create Remote MySQL Database" --msgbox "The script was not able to log into the MySQL server using the administrator credentials you supplied. You will now be asked to reenter the MySQL server administrator credentials." 9 78
DATABASEADMINPASSWORD1=""
@ -438,20 +420,20 @@ if [[ "${RECEIVER_PORTAL_INSTALLED}" = "false" ]] && [[ "${ADVANCED}" = "true" ]
# Create the database use and database using the information supplied by the user.
echo -e "\e[94m Creating the MySQL database \"${DATABASENAME}\"...\e[97m"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "CREATE DATABASE ${DATABASENAME};"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "CREATE DATABASE ${DATABASENAME};"
echo -e "\e[94m Creating the MySQL user \"${DATABASEUSER}\"...\e[97m"
if [[ "${LOCALMYSQLSERVER}" = "false" ]] ; then
# If the databse resides on a remote server be sure to allow the newly created user access to it remotly.
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "CREATE USER '${DATABASEUSER}'@'%' IDENTIFIED BY \"${DATABASEPASSWORD1}\";"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "CREATE USER '${DATABASEUSER}'@'%' IDENTIFIED BY \"${DATABASEPASSWORD1}\";"
else
# Since this is a local database we will restrict this login to localhost logins only.
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "CREATE USER '${DATABASEUSER}'@'localhost' IDENTIFIED BY \"${DATABASEPASSWORD1}\";"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "CREATE USER '${DATABASEUSER}'@'localhost' IDENTIFIED BY \"${DATABASEPASSWORD1}\";"
fi
echo -e "\e[94m Granting priviledges on the MySQL database \"DATABASENAME\" to the user \"${DATABASEUSER}\"...\e[97m"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "GRANT ALL PRIVILEGES ON ${DATABASENAME}.* TO '${DATABASEUSER}'@'localhost';"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "GRANT ALL PRIVILEGES ON ${DATABASENAME}.* TO '${DATABASEUSER}'@'localhost';"
echo -e "\e[94m Flushing priviledges on the MySQL database server...\e[97m"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${DATABASEHOSTNAME} -e "FLUSH PRIVILEGES;"
sudo mysql -u${DATABASEADMINUSER} -p${DATABASEADMINPASSWORD1} -h ${database_host_name} -e "FLUSH PRIVILEGES;"
fi
## SETUP THE PERFORMANCE GRAPHS USING THE SCRIPT GRAPHS.SH

Wyświetl plik

@ -1,18 +0,0 @@
#!/bin/bash
## VARIABLES
python_path=`which python3`
## SETUP FLIGHT LOGGING
echo -e ""
echo -e "\e[95m Setting up portal flight logging and maintenance...\e[97m"
echo -e ""
# Create the cron jobs responsible for logging and maintenance.
echo -e "\e[94m Creating the portal script cron file...\e[97m"
sudo tee /etc/cron.d/adsb-receiver-flight-logging > /dev/null <<EOF
* * * * * root ${python_path} ${RECEIVER_BUILD_DIRECTORY}/portal/python/flights.py
0 0 * * * root ${python_path} ${RECEIVER_BUILD_DIRECTORY}/portal/python/maintenance.py
EOF

Wyświetl plik

@ -1,6 +1,10 @@
from datetime import timedelta
from flask import Flask, render_template
from flask_apscheduler import APScheduler
from flask_jwt_extended import JWTManager
from backend.jobs.data_collection import data_collection_job
from backend.jobs.maintenance import maintenance_job
from backend.routes.flights import flights
from backend.routes.blog import blog
from backend.routes.flights import flights
from backend.routes.links import links
@ -26,6 +30,16 @@ def create_app():
app.register_blueprint(tokens)
app.register_blueprint(users)
# /API/SCHEDULER
app.config["SCHEDULER_API_ENABLED"] = True
app.config["SCHEDULER_API_PREFIX"] = "/api/scheduler"
scheduler = APScheduler()
scheduler.add_job(id = 'data_collection', func=data_collection_job, trigger="interval", seconds=15)
scheduler.add_job(id = 'maintenance', func=maintenance_job, trigger="cron", hour=0)
scheduler.init_app(app)
scheduler.start()
# /API/DOCS
@app.route('/api/docs')

Wyświetl plik

@ -1,17 +1,21 @@
import fcntl
import json
import logging
import os
from datetime import datetime
from flask_apscheduler import APScheduler
from urllib.request import urlopen
from backend.db import create_connection
class AircraftProcessor(object):
scheduler = APScheduler()
connection = None
cursor = None
now = None
class DataProcessor(object):
# Log infromation to console
def log(self, string):
#print(f'[{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}] {string}') # uncomment to enable debug logging
print(f'[{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}] {string}') # uncomment to enable debug logging
return
# Read JSON supplied by dump1090
@ -186,26 +190,13 @@ class AircraftProcessor(object):
return
if __name__ == "__main__":
processor = AircraftProcessor()
def data_collection_job():
processor = DataProcessor()
processor.log("-- CHECKING IF FLIGHT RECORDER JOB IS ALREADY RUNNING")
# Do not allow another instance of the job to run
lock_file = open('/tmp/flights.py.lock','w')
try:
fcntl.flock(lock_file, fcntl.LOCK_EX|fcntl.LOCK_NB)
except (IOError, OSError):
processor.log("-- ANOTHER INSTANCE OF THIS JOB IS RUNNING")
quit()
lock_file.write('%d\n'%os.getpid())
# Setup and begin the data collection job
processor.log("-- BEGINING FLIGHT RECORDER JOB")
# Set up database connection
connection = create_connection()
cursor = connection.cursor()
# Begin flight recording job
now = datetime.now()
processor.process_all_aircraft()
processor.log("-- FLIGHT RECORD JOB COMPLETE")

Wyświetl plik

@ -1,10 +1,14 @@
import fcntl
import logging
import os
from datetime import datetime, timedelta
from flask_apscheduler import APScheduler
from backend.db import create_connection
scheduler = APScheduler()
connection = None
cursor = None
now = None
class MaintenanceProcessor(object):
# Log infromation to console
@ -132,24 +136,12 @@ class MaintenanceProcessor(object):
return
if __name__ == "__main__":
def maintenance_job():
processor = MaintenanceProcessor()
# Setup and begin the maintenance job
processor.log("-- BEGINING PORTAL MAINTENANCE JOB")
# Do not allow another instance of the job to run
lock_file = open('/tmp/maintenance.py.lock','w')
try:
fcntl.flock(lock_file, fcntl.LOCK_EX|fcntl.LOCK_NB)
except (IOError, OSError):
processor.log("-- ANOTHER INSTANCE OF THIS JOB IS RUNNING")
quit()
# Set up database connection
connection = create_connection()
cursor = connection.cursor()
# Begin maintenance job
lock_file.write('%d\n'%os.getpid())
processor.begin_maintenance()
processor.log("-- PORTAL MAINTENANCE JOB COMPLETE")

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from datetime import datetime
from flask import abort, Blueprint, jsonify, request

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from flask import abort, Blueprint, jsonify, request
from flask_jwt_extended import jwt_required

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from flask import abort, Blueprint, jsonify, request
from flask_jwt_extended import jwt_required

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from flask import abort, Blueprint, jsonify, request
from flask_jwt_extended import jwt_required

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from flask import abort, Blueprint, jsonify, request
from flask_jwt_extended import jwt_required

Wyświetl plik

@ -1,5 +1,4 @@
import logging
import yaml
from flask import abort, Blueprint, jsonify, request
from flask_jwt_extended import jwt_required

Wyświetl plik

@ -1,234 +0,0 @@
import fcntl
import json
import logging
import MySQLdb
import os
import sqlite3
from datetime import datetime
from time import sleep
from urllib.request import urlopen
class AircraftProcessor(object):
# Log infromation to console
def log(self, string):
#print(f'[{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}] {string}') # uncomment to enable debug logging
return
# Create database connection
def create_connection(self):
self.log("Setting up database connection")
with open(os.path.dirname(os.path.realpath(__file__)) + '/config.json') as config_file:
config = json.load(config_file)
match config["database"]["type"].lower():
case 'mysql':
return MySQLdb.connect(
host=config["database"]["host"],
user=config["database"]["user"],
passwd=config["database"]["passwd"],
db=config["database"]["db"]
)
case 'sqlite':
return sqlite3.connect(config["database"]["db"])
# Read JSON supplied by dump1090
def read_json(self):
self.log("Reading aircraft.json")
try:
raw_json = urlopen('http://127.0.0.1/dump1090/data/aircraft.json')
json_object = json.load(raw_json)
return json_object
except:
logging.error("There was a problem consuming aircraft.json")
return
# Begin processing data retrived from dump1090
def process_all_aircraft(self):
data = self.read_json()
aircraft_data = data["aircraft"]
if len(aircraft_data) == 0:
self.log(f'There is no aircraft data to process at this time')
return
self.log(f'Begining to proocess {len(aircraft_data)} aircraft')
for aircraft in aircraft_data:
self.process_aircraft(aircraft)
connection.close()
return
# Process the aircraft
def process_aircraft(self, aircraft):
tracked=False
aircraft_id=None
try:
cursor.execute("SELECT COUNT(*) FROM adsb_aircraft WHERE icao = %s", (aircraft["hex"],))
if cursor.fetchone()[0] > 0:
tracked=True
except Exception as ex:
logging.error(f'Error encountered while checking if aircraft {aircraft["hex"]} has already been added', exc_info=ex)
return
if tracked:
self.log(f'Updating aircraft ICAO {aircraft["hex"]}')
try:
cursor.execute(
"UPDATE adsb_aircraft SET lastSeen = %s WHERE icao = %s",
(now, aircraft["hex"])
)
connection.commit()
cursor.execute(
"SELECT id FROM adsb_aircraft WHERE icao = %s",
(aircraft["hex"],)
)
aircraft_id = cursor.fetchone()[0]
except Exception as ex:
logging.error(f'Error encountered while trying to update aircraft {aircraft["hex"]}', exc_info=ex)
return
else:
self.log(f'Inserting aircraft ICAO {aircraft["hex"]}')
try:
cursor.execute(
"INSERT INTO adsb_aircraft (icao, firstSeen, lastSeen) VALUES (%s, %s, %s)",
(aircraft["hex"], now, now)
)
connection.commit()
aircraft_id = cursor.lastrowid
except Exception as ex:
logging.error(f'Error encountered while trying to insert aircraft {aircraft["hex"]}', exc_info=ex)
return
if 'flight' in aircraft:
self.process_flight(aircraft_id, aircraft)
else:
self.process_positions(aircraft_id , None, aircraft)
return
# Process the flight
def process_flight(self, aircraft_id, aircraft):
if 'flight' in aircraft:
flight = aircraft["flight"].strip()
tracked=False
try:
cursor.execute("SELECT COUNT(*) FROM adsb_flights WHERE flight = %s", (flight,))
if cursor.fetchone()[0] > 0:
tracked=True
except Exception as ex:
logging.error(f'Error encountered while checking if flight {flight} has already been added', exc_info=ex)
return
if tracked:
self.log(f' Updating flight {flight} assigned to aircraft ICAO {aircraft["hex"]}')
try:
cursor.execute(
"UPDATE adsb_flights SET lastSeen = %s WHERE flight = %s",
(now, flight)
)
connection.commit()
cursor.execute(
"SELECT id FROM adsb_flights WHERE flight = %s",
(flight,)
)
flight_id = cursor.fetchone()[0]
except Exception as ex:
logging.error(f'Error encountered while trying to update flight {flight}', exc_info=ex)
return
else:
self.log(f'Inserting flight {flight} assigned to aircraft ICAO {aircraft["hex"]}')
try:
cursor.execute(
"INSERT INTO adsb_flights (aircraft, flight, firstSeen, lastSeen) VALUES (%s, %s, %s, %s)",
(aircraft_id, flight, now, now)
)
connection.commit()
flight_id = cursor.lastrowid
except Exception as ex:
logging.error(f'Error encountered while trying to insert flight {flight}', exc_info=ex)
return
else:
self.log(f' Aircraft ICAO {aircraft["hex"]} was not assigned a flight')
self.process_positions(aircraft_id, flight_id, aircraft)
return
# Process positions
def process_positions(self, aircraft_id , flight_id, aircraft):
position_keys = ('lat', 'lon', 'alt_baro', 'gs', 'track', 'geom_rate', 'hex')
if (all(key in aircraft for key in position_keys)):
tracked=False
try:
cursor.execute("SELECT COUNT(*) FROM adsb_positions WHERE flight = %s AND message = %s", (flight_id, aircraft["messages"]))
if cursor.fetchone()[0] > 0:
tracked=True
except Exception as ex:
logging.error(f'Error encountered while checking if position has already been added for message ID {aircraft["messages"]} related to flight {flight_id}', exc_info=ex)
return
if tracked:
return
squawk = None
if 'squawk' in aircraft:
squawk = aircraft["squawk"]
altitude = aircraft["alt_baro"]
if 'alt_geom' in aircraft:
altitude = aircraft["alt_geom"]
try:
if flight_id is None:
self.log(f' Inserting position for aircraft ICAO {aircraft["hex"]}')
else:
self.log(f' Inserting position for aircraft ICAO {aircraft["hex"]} assigned flight {flight_id}')
cursor.execute(
"INSERT INTO adsb_positions (flight, time, message, squawk, latitude, longitude, track, altitude, verticleRate, speed, aircraft) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(flight_id, now, aircraft["messages"], squawk, aircraft["lat"], aircraft["lon"], aircraft["track"], altitude, aircraft["geom_rate"], aircraft["gs"], aircraft_id)
)
connection.commit()
except Exception as ex:
logging.error(f'Error encountered while inserting position data for message ID {aircraft["messages"]} related to flight {flight_id}', exc_info=ex)
return
else:
self.log(f' Data required to insert position data for Aircraft ICAO {aircraft["hex"]} is not present')
return
if __name__ == "__main__":
processor = AircraftProcessor()
processor.log("-- CHECKING IF FLIGHT RECORDER JOB IS ALREADY RUNNING")
# Do not allow another instance of the job to run
lock_file = open('/tmp/flights.py.lock','w')
try:
fcntl.flock(lock_file, fcntl.LOCK_EX|fcntl.LOCK_NB)
except (IOError, OSError):
processor.log("-- ANOTHER INSTANCE OF THIS JOB IS RUNNING")
quit()
lock_file.write('%d\n'%os.getpid())
while True:
processor.log("-- BEGINING FLIGHT RECORDER JOB")
# Set up database connection
connection = processor.create_connection()
cursor = connection.cursor()
# Begin flight recording job
now = datetime.now()
processor.process_all_aircraft()
processor.log("-- FLIGHT RECORD JOB COMPLETE")
processor.log("SLEEPING 15 SECONDS BEFORE NEXT RUN")
sleep(15)

Wyświetl plik

@ -1,174 +0,0 @@
import fcntl
import json
import logging
import MySQLdb
import os
import sqlite3
from datetime import datetime, timedelta
class MaintenanceProcessor(object):
# Log infromation to console
def log(self, string):
print(f'[{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}] {string}') # uncomment to enable debug logging
return
# Create database connection
def create_connection(self):
self.log("Setting up database connection")
with open(os.path.dirname(os.path.realpath(__file__)) + '/config.json') as config_file:
config = json.load(config_file)
match config["database"]["type"].lower():
case 'mysql':
return MySQLdb.connect(
host=config["database"]["host"],
user=config["database"]["user"],
passwd=config["database"]["passwd"],
db=config["database"]["db"]
)
case 'sqlite':
return sqlite3.connect(config["database"]["db"])
# Begin maintenance
def begin_maintenance(self):
self.log("Getting maintenance settings from the database")
purge_old_aircraft = False
try:
cursor.execute("SELECT value FROM adsb_settings WHERE name = 'purge_older_data'")
result = cursor.fetchone()[0]
purge_old_aircraft = result.lower() in ['true', '1']
except Exception as ex:
logging.error(f"Error encountered while getting value for setting purge_older_data", exc_info=ex)
return
if purge_old_aircraft:
cutoff_date = datetime.now() - timedelta(years = 20)
try:
cursor.execute("SELECT value FROM adsb_settings WHERE name = 'days_to_save'")
days_to_save = cursor.fetchone()[0]
except Exception as ex:
logging.error(f"Error encountered while getting value for setting days_to_save", exc_info=ex)
return
cutoff_date = datetime.now() - timedelta(days = days_to_save)
self.purge_aircraft(cutoff_date)
self.purge_positions(cutoff_date)
else:
self.log("Maintenance is disabled")
connection.commit()
connection.close()
return
# Remove aircraft not seen since the specified date
def purge_aircraft(self, cutoff_date):
try:
cursor.execute("SELECT id FROM adsb_aircraft WHERE lastSeen < %s", (cutoff_date,))
aircraft_ids = cursor.fetchall()
except Exception as ex:
logging.error(f"Error encountered while getting aircraft IDs not seen since {cutoff_date}", exc_info=ex)
return
if len(aircraft_ids) > 0:
id = tuple(aircraft_ids)
aircraft_id_params = {'id': id}
try:
cursor.execute("DELETE FROM adsb_aircraft WHERE id IN %(t)s", aircraft_id_params)
except Exception as ex:
logging.error(f"Error deleting aircraft not seen since {cutoff_date}", exc_info=ex)
return
self.purge_flights_related_to_aircraft(aircraft_id_params, cutoff_date)
self.purge_positions_related_to_aircraft(aircraft_id_params, cutoff_date)
return
# Remove flights related to aircraft not seen since the specified date
def purge_flights_related_to_aircraft(self, aircraft_id_params, cutoff_date):
try:
cursor.execute("DELETE FROM adsb_flights WHERE aircraft = %(t)s", aircraft_id_params)
except Exception as ex:
logging.error(f"Error deleting flights related to aircraft not seen since {cutoff_date}", exc_info=ex)
return
return
# Remove positions related to aircraft not seen since the specified date
def purge_positions_related_to_aircraft(self, aircraft_id_params, cutoff_date):
try:
cursor.execute("DELETE FROM adsb_positions WHERE aircraft = %(t)s", aircraft_id_params)
except Exception as ex:
logging.error(f"Error deleting positions related to aircraft not seen since {cutoff_date}", exc_info=ex)
return
return
# Remove positions older than the specified date
def purge_flights(self, cutoff_date):
try:
cursor.execute("SELECT id FROM adsb_flights WHERE lastSeen < %s", (cutoff_date,))
flight_ids = cursor.fetchall()
except Exception as ex:
logging.error(f"Error encountered while getting aircraft IDs not seen since {cutoff_date}", exc_info=ex)
return
if len(flight_ids) > 0:
id = tuple(flight_ids)
flight_id_params = {'id': id}
try:
cursor.execute("DELETE FROM adsb_flights WHERE id IN %(t)s", flight_id_params)
except Exception as ex:
logging.error(f"Error deleting flights older than the cut off date of {cutoff_date}", exc_info=ex)
return
self.purge_positions_related_to_flights(flight_id_params, cutoff_date)
return
# Remove positions related to aircraft not seen since the specified date
def purge_positions_related_to_flights(self, flight_id_params, cutoff_date):
try:
cursor.execute("DELETE FROM adsb_positions WHERE flight = %(t)s", flight_id_params)
except Exception as ex:
logging.error(f"Error deleting positions related to flights not seen since {cutoff_date}", exc_info=ex)
return
return
# Remove positions older than the specified date
def purge_positions(self, cutoff_date):
try:
cursor.execute("DELETE FROM adsb_positions WHERE time < %s", (cutoff_date,))
except Exception as ex:
logging.error(f"Error deleting positions older than the cut off date of {cutoff_date}", exc_info=ex)
return
return
if __name__ == "__main__":
processor = MaintenanceProcessor()
processor.log("-- BEGINING PORTAL MAINTENANCE JOB")
# Do not allow another instance of the job to run
lock_file = open('/tmp/maintenance.py.lock','w')
try:
fcntl.flock(lock_file, fcntl.LOCK_EX|fcntl.LOCK_NB)
except (IOError, OSError):
processor.log("-- ANOTHER INSTANCE OF THIS JOB IS RUNNING")
quit()
# Set up database connection
connection = processor.create_connection()
cursor = connection.cursor()
# Begin maintenance job
lock_file.write('%d\n'%os.getpid())
processor.begin_maintenance()
processor.log("-- PORTAL MAINTENANCE JOB COMPLETE")