Compare commits
No commits in common. "77463904be879ff4c7f89067d394e0c5f7b6d695" and "f8953ad8990872550373d2c15b439637a46b18cf" have entirely different histories.
77463904be
...
f8953ad899
@ -4,7 +4,7 @@ from mastodon import Mastodon
|
|||||||
|
|
||||||
# === Configuration ===
|
# === Configuration ===
|
||||||
|
|
||||||
ROOT_DIR = r"/mnt/archives" # Update this path to where your shows live
|
ROOT_DIR = r"/mnt/convert/archives" # Update this path to where your shows live
|
||||||
ALLOWED_EXTENSIONS = {".mp3", ".wav", ".flac", ".m4a"}
|
ALLOWED_EXTENSIONS = {".mp3", ".wav", ".flac", ".m4a"}
|
||||||
BANNER_FILENAMES = ["banner.jpg", "banner.png", "banner.jpeg"]
|
BANNER_FILENAMES = ["banner.jpg", "banner.png", "banner.jpeg"]
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,251 +0,0 @@
|
|||||||
import os
|
|
||||||
import requests
|
|
||||||
import datetime
|
|
||||||
import paramiko
|
|
||||||
|
|
||||||
# ==== CONFIG ====
|
|
||||||
MASTODON_INSTANCE = "https://chatwithus.live"
|
|
||||||
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
|
||||||
MASTODON_USER_ID = "114386383616633367"
|
|
||||||
|
|
||||||
DISK_WARN_THRESHOLD = 10 # percent free
|
|
||||||
INODE_WARN_THRESHOLD = 10 # percent free
|
|
||||||
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
|
||||||
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
|
||||||
|
|
||||||
NODES = [
|
|
||||||
{
|
|
||||||
"name": "shredder",
|
|
||||||
"host": "38.102.127.171",
|
|
||||||
"ssh_user": "doc",
|
|
||||||
"services": [],
|
|
||||||
"disks": ["/", "/mnt/raid5"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": False,
|
|
||||||
"raid": True
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "mastodon",
|
|
||||||
"host": "chatwithus.live", # Now points directly to your Mastodon server
|
|
||||||
"ssh_user": "root",
|
|
||||||
"services": ["nginx", "mastodon-web"],
|
|
||||||
"disks": ["/"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": False,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "db1",
|
|
||||||
"host": "cluster.db1.genesishostingtechnologies.com",
|
|
||||||
"ssh_user": "doc",
|
|
||||||
"services": ["postgresql"],
|
|
||||||
"disks": ["/", "/var/lib/postgresql"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": True,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "db2",
|
|
||||||
"host": "cluster.db2.genesishostingtechnologies.com",
|
|
||||||
"ssh_user": "doc",
|
|
||||||
"services": ["postgresql"],
|
|
||||||
"disks": ["/", "/var/lib/postgresql"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": True,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
# ==== Mastodon DM function ====
|
|
||||||
def mastodon_dm(message):
|
|
||||||
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
|
||||||
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
|
||||||
payload = {
|
|
||||||
"status": message,
|
|
||||||
"visibility": "direct",
|
|
||||||
"in_reply_to_account_id": MASTODON_USER_ID
|
|
||||||
}
|
|
||||||
resp = requests.post(url, headers=headers, data=payload)
|
|
||||||
if resp.status_code != 200:
|
|
||||||
print(f"Failed to send Mastodon DM: {resp.text}")
|
|
||||||
|
|
||||||
# ==== SSH command runner ====
|
|
||||||
def ssh_command(host, user, cmd):
|
|
||||||
ssh = paramiko.SSHClient()
|
|
||||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
||||||
ssh.connect(hostname=host, username=user, timeout=10)
|
|
||||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
|
||||||
out = stdout.read().decode().strip()
|
|
||||||
ssh.close()
|
|
||||||
return out
|
|
||||||
|
|
||||||
# ==== Robust Remote disk check ====
|
|
||||||
def check_remote_disk(host, user, path, node_name):
|
|
||||||
cmd = f"df --output=pcent {path} | tail -1 | tr -dc '0-9'"
|
|
||||||
out = ssh_command(host, user, cmd)
|
|
||||||
if not out:
|
|
||||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check disk usage."
|
|
||||||
try:
|
|
||||||
percent = int(out)
|
|
||||||
except ValueError:
|
|
||||||
return f"[{node_name}] ERROR: Could not parse disk usage for {path}. Output was: '{out}'"
|
|
||||||
if percent > 90:
|
|
||||||
return f"[{node_name}] WARNING: Only {100-percent}% disk free on {path}."
|
|
||||||
# Inode check
|
|
||||||
cmd_inode = f"df --output=ipcent {path} | tail -1 | tr -dc '0-9'"
|
|
||||||
out_inode = ssh_command(host, user, cmd_inode)
|
|
||||||
if not out_inode:
|
|
||||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check inode usage."
|
|
||||||
try:
|
|
||||||
percent_inode = int(out_inode)
|
|
||||||
except ValueError:
|
|
||||||
return f"[{node_name}] ERROR: Could not parse inode usage for {path}. Output was: '{out_inode}'"
|
|
||||||
if percent_inode > 90:
|
|
||||||
return f"[{node_name}] WARNING: Only {100-percent_inode}% inodes free on {path}."
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== SMART health check (for all disks) ====
|
|
||||||
def check_remote_smart(host, user, node_name):
|
|
||||||
alerts = []
|
|
||||||
# List block devices
|
|
||||||
cmd_lsblk = "lsblk -ndo NAME,TYPE | awk '$2==\"disk\" {print $1}'"
|
|
||||||
devs = ssh_command(host, user, cmd_lsblk)
|
|
||||||
if not devs:
|
|
||||||
alerts.append(f"[{node_name}] ERROR: Could not list block devices for SMART check.")
|
|
||||||
return alerts
|
|
||||||
for dev in devs.split():
|
|
||||||
smart_cmd = f"sudo smartctl -H /dev/{dev}"
|
|
||||||
out = ssh_command(host, user, smart_cmd)
|
|
||||||
if "PASSED" in out:
|
|
||||||
continue # All good
|
|
||||||
elif "FAILED" in out or "Pre-fail" in out or "SMART support is: Unavailable" in out:
|
|
||||||
alerts.append(f"[{node_name}] CRITICAL: SMART health issue on /dev/{dev}!\n{out}")
|
|
||||||
elif "Unknown" in out or not out:
|
|
||||||
alerts.append(f"[{node_name}] ERROR: SMART status unknown on /dev/{dev}. Output: {out}")
|
|
||||||
# Optionally scan for other SMART errors
|
|
||||||
return alerts
|
|
||||||
|
|
||||||
# ==== Remote service check ====
|
|
||||||
def check_remote_service(host, user, service, node_name):
|
|
||||||
cmd = f"systemctl is-active {service}"
|
|
||||||
out = ssh_command(host, user, cmd)
|
|
||||||
if out.strip() != "active":
|
|
||||||
return f"[{node_name}] CRITICAL: Service {service} not running!"
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== Remote RAID md0 check (robust for all mdstat layouts) ====
|
|
||||||
def check_remote_raid_md0(host, user, node_name):
|
|
||||||
try:
|
|
||||||
import re
|
|
||||||
ssh = paramiko.SSHClient()
|
|
||||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
||||||
ssh.connect(hostname=host, username=user, timeout=10)
|
|
||||||
stdin, stdout, stderr = ssh.exec_command("cat /proc/mdstat")
|
|
||||||
mdstat = stdout.read().decode()
|
|
||||||
|
|
||||||
# Find the block for md0 and look for the [UU_] status
|
|
||||||
lines = mdstat.splitlines()
|
|
||||||
status = None
|
|
||||||
inside_md0 = False
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith("md0"):
|
|
||||||
inside_md0 = True
|
|
||||||
elif inside_md0:
|
|
||||||
m = re.search(r"\[(U|_)+\]", line)
|
|
||||||
if m:
|
|
||||||
status = m.group(0)
|
|
||||||
break
|
|
||||||
# Stop searching if we hit a blank line or another array
|
|
||||||
if line.strip() == "" or ":" in line:
|
|
||||||
break
|
|
||||||
|
|
||||||
ssh.close()
|
|
||||||
|
|
||||||
if status is None:
|
|
||||||
return f"[{node_name}] CRITICAL: /dev/md0 RAID status string not found!"
|
|
||||||
if "_" in status:
|
|
||||||
return f"[{node_name}] WARNING: /dev/md0 RAID degraded! Status: {status}"
|
|
||||||
# All U's means all disks up
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return f"[{node_name}] ERROR: Could not check RAID health remotely: {e}"
|
|
||||||
|
|
||||||
# ==== Remote log scan ====
|
|
||||||
def check_remote_logs(host, user, node_name):
|
|
||||||
alerts = []
|
|
||||||
for log in LOG_FILES:
|
|
||||||
cmd = f"tail -500 {log}"
|
|
||||||
try:
|
|
||||||
out = ssh_command(host, user, cmd)
|
|
||||||
lines = out.split("\n")
|
|
||||||
for pattern in LOG_PATTERNS:
|
|
||||||
if any(pattern in line for line in lines):
|
|
||||||
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
|
||||||
except Exception as e:
|
|
||||||
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
|
||||||
return alerts
|
|
||||||
|
|
||||||
# ==== Remote PostgreSQL replication check ====
|
|
||||||
def check_replication(host, node_name):
|
|
||||||
try:
|
|
||||||
import psycopg2
|
|
||||||
conn = psycopg2.connect(host=host, dbname="postgres", user="postgres", connect_timeout=5)
|
|
||||||
cur = conn.cursor()
|
|
||||||
cur.execute("SELECT pg_is_in_recovery();")
|
|
||||||
is_replica = cur.fetchone()[0]
|
|
||||||
if is_replica:
|
|
||||||
cur.execute("SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;")
|
|
||||||
lag = cur.fetchone()[0]
|
|
||||||
if lag is None:
|
|
||||||
return f"[{node_name}] CRITICAL: Standby not streaming! Replication down."
|
|
||||||
elif lag > 10:
|
|
||||||
return f"[{node_name}] WARNING: Replication lag is {lag} seconds."
|
|
||||||
cur.close()
|
|
||||||
conn.close()
|
|
||||||
except Exception as e:
|
|
||||||
return f"[{node_name}] ERROR: Could not check replication: {e}"
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== Main routine ====
|
|
||||||
def main():
|
|
||||||
problems = []
|
|
||||||
|
|
||||||
# Multi-node checks
|
|
||||||
for node in NODES:
|
|
||||||
# All checks via SSH
|
|
||||||
for disk in node["disks"]:
|
|
||||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
|
||||||
if res: problems.append(res)
|
|
||||||
# SMART check for all disks on this node
|
|
||||||
smart_alerts = check_remote_smart(node["host"], node["ssh_user"], node["name"])
|
|
||||||
if smart_alerts:
|
|
||||||
problems.extend(smart_alerts)
|
|
||||||
for svc in node["services"]:
|
|
||||||
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
|
||||||
if res: problems.append(res)
|
|
||||||
# Replication check
|
|
||||||
if node.get("db"):
|
|
||||||
res = check_replication(node["host"], node["name"])
|
|
||||||
if res: problems.append(res)
|
|
||||||
# RAID check, only for nodes with "raid": True
|
|
||||||
if node.get("raid", False):
|
|
||||||
raid_health = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
|
||||||
if raid_health:
|
|
||||||
problems.append(raid_health)
|
|
||||||
# Log scan
|
|
||||||
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
|
||||||
if logs:
|
|
||||||
problems.extend(logs)
|
|
||||||
|
|
||||||
# Send DM if anything wrong
|
|
||||||
if problems:
|
|
||||||
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
msg = f"🚨 Genesis Radio Multi-Node Healthcheck {now} 🚨\n" + "\n".join(problems)
|
|
||||||
print(msg)
|
|
||||||
mastodon_dm(msg)
|
|
||||||
else:
|
|
||||||
print("OK: All checks passed.")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,102 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# ---- CONFIG ----
|
|
||||||
PG_REMOTE_USER="postgres"
|
|
||||||
PG_REMOTE_HOST="cluster.db2.genesishostingtechnologies.com"
|
|
||||||
PG_REMOTE_PORT="5432"
|
|
||||||
PG_LOCAL_PORT="5432"
|
|
||||||
DUMP_DIR="/tmp/pgbackup_verify"
|
|
||||||
BACKUP_TARGET="root@thevault.bounceme.net:/mnt/backup3/pgdumps"
|
|
||||||
CC_TARGET="doc@clustercontrol.sshjunkie.com:/home/doc/backups"
|
|
||||||
DBS=("mastodon_production" "giteaprod")
|
|
||||||
LOGFILE="$DUMP_DIR/verify_log_$(date +%Y%m%d_%H%M%S).txt"
|
|
||||||
mkdir -p "$DUMP_DIR"
|
|
||||||
|
|
||||||
# ==== Mastodon DM function ====
|
|
||||||
mastodon_alert() {
|
|
||||||
local msg="$1"
|
|
||||||
curl -sS -X POST "https://chatwithus.live/api/v1/statuses" \
|
|
||||||
-H "Authorization: Bearer rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw" \
|
|
||||||
--data-urlencode "status=$msg" \
|
|
||||||
--data "visibility=direct" \
|
|
||||||
--data "in_reply_to_account_id=114386383616633367" >/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
ALL_OK=true
|
|
||||||
UPLOAD_LIST=()
|
|
||||||
|
|
||||||
for DB in "${DBS[@]}"; do
|
|
||||||
echo "=== [$(date)] Dumping $DB from $PG_REMOTE_HOST ===" | tee -a "$LOGFILE"
|
|
||||||
DUMPFILE="$DUMP_DIR/${DB}_$(date +%Y%m%d_%H%M%S).sql"
|
|
||||||
|
|
||||||
# Dump from remote
|
|
||||||
pg_dump -h "$PG_REMOTE_HOST" -p "$PG_REMOTE_PORT" -U "$PG_REMOTE_USER" -d "$DB" > "$DUMPFILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Failed to dump $DB! Skipping upload." | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Could not dump $DB from $PG_REMOTE_HOST on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Restore/verify on Krang
|
|
||||||
TESTDB="verify_${DB}_$RANDOM"
|
|
||||||
echo "Creating test database $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres createdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Failed to create $TESTDB!" | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Could not create test DB $TESTDB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Restoring to $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" < "$DUMPFILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Restore failed for $DB!" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres dropdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Restore failed for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Quick table listing for sanity
|
|
||||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" -c "\dt" | tee -a "$LOGFILE"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "[PASS] $DB: Dump and restore OK." | tee -a "$LOGFILE"
|
|
||||||
UPLOAD_LIST+=("$DUMPFILE")
|
|
||||||
else
|
|
||||||
echo "[FAIL] $DB: Test query failed!" | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Test query failed for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo -u postgres dropdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
echo "Cleaned up $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
echo "" | tee -a "$LOGFILE"
|
|
||||||
done
|
|
||||||
|
|
||||||
if $ALL_OK && [ "${#UPLOAD_LIST[@]}" -eq "${#DBS[@]}" ]; then
|
|
||||||
echo "All dumps verified, sending to $BACKUP_TARGET" | tee -a "$LOGFILE"
|
|
||||||
scp "${UPLOAD_LIST[@]}" "$BACKUP_TARGET"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Uploads to thevault successful." | tee -a "$LOGFILE"
|
|
||||||
# --NEW: Also upload to ClusterControl controller
|
|
||||||
echo "Uploading to ClusterControl controller at $CC_TARGET" | tee -a "$LOGFILE"
|
|
||||||
scp "${UPLOAD_LIST[@]}" "$CC_TARGET"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Uploads to ClusterControl successful." | tee -a "$LOGFILE"
|
|
||||||
rm -f "${UPLOAD_LIST[@]}"
|
|
||||||
else
|
|
||||||
echo "[WARN] Upload to ClusterControl controller failed!" | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "⚠️ Database backup verified, but upload to ClusterControl at $CC_TARGET failed on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "[FAIL] Upload to thevault failed!" | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Upload to $BACKUP_TARGET failed on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not all backups verified! Nothing uploaded." | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: One or more DBs failed verification on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "DONE. Log: $LOGFILE"
|
|
@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Find all venvs, freeze their packages to requirements.txt
|
|
||||||
|
|
||||||
BASE_DIR="$HOME" # Or wherever your projects are
|
|
||||||
|
|
||||||
echo "Scanning for venvs under $BASE_DIR ..."
|
|
||||||
|
|
||||||
find "$BASE_DIR" -type f -name "pyvenv.cfg" 2>/dev/null | while read cfg; do
|
|
||||||
venv_dir="$(dirname "$cfg")"
|
|
||||||
reqfile="$venv_dir/requirements.txt"
|
|
||||||
echo "🔒 Freezing $venv_dir → $reqfile"
|
|
||||||
"$venv_dir/bin/python" -m pip freeze > "$reqfile"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "✅ Done: $reqfile"
|
|
||||||
else
|
|
||||||
echo "❌ Failed to freeze $venv_dir"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "All venvs processed!"
|
|
@ -1,48 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
SRC_ROOT="/home/doc"
|
|
||||||
TARGET_DIR="/home/doc/genesis-tools/venvrequirements"
|
|
||||||
DRY_RUN=0
|
|
||||||
|
|
||||||
if [[ "$1" == "--dry-run" ]]; then
|
|
||||||
DRY_RUN=1
|
|
||||||
echo "Dry run mode enabled: No files will be created or copied."
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Scanning for venvs in $SRC_ROOT ..."
|
|
||||||
|
|
||||||
found_any=0
|
|
||||||
for dir in "$SRC_ROOT"/*/; do
|
|
||||||
venv_name=$(basename "$dir")
|
|
||||||
req_file="${dir}requirements.txt"
|
|
||||||
dest_file="$TARGET_DIR/requirements_${venv_name}.txt"
|
|
||||||
|
|
||||||
# Only proceed if it's a directory and requirements.txt exists
|
|
||||||
if [[ -d "$dir" && -f "$req_file" ]]; then
|
|
||||||
found_any=1
|
|
||||||
echo "Found: $req_file"
|
|
||||||
echo "→ Would copy to: $dest_file"
|
|
||||||
|
|
||||||
if [[ -f "$dest_file" ]]; then
|
|
||||||
echo " [SKIP] $dest_file already exists. Skipping."
|
|
||||||
else
|
|
||||||
if [[ "$DRY_RUN" -eq 1 ]]; then
|
|
||||||
echo " [DRY RUN] Would copy $req_file → $dest_file"
|
|
||||||
else
|
|
||||||
cp "$req_file" "$dest_file"
|
|
||||||
echo " [OK] Copied $req_file → $dest_file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ "$found_any" -eq 0 ]]; then
|
|
||||||
echo "No requirements.txt files found in $SRC_ROOT!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$DRY_RUN" -eq 1 ]]; then
|
|
||||||
echo "All requirements processed. (Dry run mode)"
|
|
||||||
else
|
|
||||||
echo "All requirements copied and organized."
|
|
||||||
fi
|
|
@ -3,30 +3,28 @@
|
|||||||
# Path to Snort's alert log (snort.alert.fast)
|
# Path to Snort's alert log (snort.alert.fast)
|
||||||
SNORT_LOG="/var/log/snort/snort.alert.fast"
|
SNORT_LOG="/var/log/snort/snort.alert.fast"
|
||||||
|
|
||||||
# Database connection details
|
# Path to store blocked IPs (to avoid duplicates)
|
||||||
DB_HOST="38.102.127.166"
|
BLOCKED_IPS="/tmp/blocked_ips.txt"
|
||||||
DB_USER="ipblocks_user"
|
|
||||||
DB_PASS="rusty2281"
|
|
||||||
DB_NAME="ipblocks"
|
|
||||||
|
|
||||||
# Function to insert blocked IP into the PostgreSQL database
|
# Function to block an IP using iptables
|
||||||
block_ip() {
|
block_ip() {
|
||||||
local ip=$1
|
local ip=$1
|
||||||
|
|
||||||
# Remove port if included in the IP
|
# Check if IP is already blocked
|
||||||
ip=${ip%%:*}
|
if ! grep -q "$ip" "$BLOCKED_IPS"; then
|
||||||
|
# Add IP to iptables block list
|
||||||
|
sudo iptables -I INPUT -s "$ip" -j DROP
|
||||||
|
|
||||||
# Insert the blocked IP into the PostgreSQL database (into the blocked_ip_log table)
|
# Log the blocked IP in the blocked_ips file
|
||||||
PGPASSWORD="$DB_PASS" psql -U "$DB_USER" -h "$DB_HOST" -d "$DB_NAME" -c "INSERT INTO blocked_ip_log (ip_address) VALUES ('$ip');"
|
echo "$ip" >> "$BLOCKED_IPS"
|
||||||
|
echo "Blocked IP: $ip"
|
||||||
# Optionally print to confirm the insertion
|
fi
|
||||||
echo "Blocked IP $ip inserted into the database log."
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Ensure the log file exists and is readable
|
# Ensure the blocked_ips.txt file exists and is writable
|
||||||
if [ ! -f "$SNORT_LOG" ]; then
|
if [ ! -f "$BLOCKED_IPS" ]; then
|
||||||
echo "Snort log file not found!"
|
sudo touch "$BLOCKED_IPS"
|
||||||
exit 1
|
sudo chmod 666 "$BLOCKED_IPS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Monitor the snort.alert.fast file for new malicious IPs
|
# Monitor the snort.alert.fast file for new malicious IPs
|
||||||
@ -37,12 +35,12 @@ tail -F "$SNORT_LOG" | while read line; do
|
|||||||
# Extract source and destination IP addresses from Snort logs
|
# Extract source and destination IP addresses from Snort logs
|
||||||
if echo "$line" | grep -q "ICMP PING NMAP"; then
|
if echo "$line" | grep -q "ICMP PING NMAP"; then
|
||||||
# Extract source IP (before "->")
|
# Extract source IP (before "->")
|
||||||
ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}' | cut -d':' -f1)
|
ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}')
|
||||||
echo "Found Source IP: $ip" # Debug: Show the IP being extracted
|
echo "Found Source IP: $ip" # Debug: Show the IP being extracted
|
||||||
block_ip "$ip"
|
block_ip "$ip"
|
||||||
elif echo "$line" | grep -q "EXPLOIT"; then
|
elif echo "$line" | grep -q "EXPLOIT"; then
|
||||||
# Extract source IP (before "->")
|
# Extract source IP (before "->")
|
||||||
ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}' | cut -d':' -f1)
|
ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}')
|
||||||
echo "Found Source IP: $ip" # Debug: Show the IP being extracted
|
echo "Found Source IP: $ip" # Debug: Show the IP being extracted
|
||||||
block_ip "$ip"
|
block_ip "$ip"
|
||||||
fi
|
fi
|
||||||
|
@ -7,10 +7,10 @@ from datetime import datetime
|
|||||||
# Function to record the radio show using ffmpeg
|
# Function to record the radio show using ffmpeg
|
||||||
def record_show(folder_name, duration, filename_prefix):
|
def record_show(folder_name, duration, filename_prefix):
|
||||||
# Set the working directory for the recording
|
# Set the working directory for the recording
|
||||||
working_directory = "home/doc/Genesis Radio"
|
working_directory = "/mnt/convert/Genesis Radio"
|
||||||
|
|
||||||
# Ensure the folder exists in archives with the prefix as the folder name
|
# Ensure the folder exists in archives with the prefix as the folder name
|
||||||
archives_directory = "/mnt/archives"
|
archives_directory = "/mnt/convert/archives"
|
||||||
target_folder = os.path.join(archives_directory, filename_prefix)
|
target_folder = os.path.join(archives_directory, filename_prefix)
|
||||||
if not os.path.exists(target_folder):
|
if not os.path.exists(target_folder):
|
||||||
os.makedirs(target_folder)
|
os.makedirs(target_folder)
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
blinker==1.9.0
|
|
||||||
blurhash==1.1.4
|
|
||||||
certifi==2025.1.31
|
|
||||||
charset-normalizer==3.4.1
|
|
||||||
click==8.1.8
|
|
||||||
decorator==5.2.1
|
|
||||||
Flask==3.1.0
|
|
||||||
Flask-SQLAlchemy==3.1.1
|
|
||||||
greenlet==3.2.0
|
|
||||||
idna==3.10
|
|
||||||
itsdangerous==2.2.0
|
|
||||||
Jinja2==3.1.6
|
|
||||||
MarkupSafe==3.0.2
|
|
||||||
Mastodon.py==2.0.1
|
|
||||||
psycopg2-binary==2.9.10
|
|
||||||
python-dateutil==2.9.0.post0
|
|
||||||
python-magic==0.4.27
|
|
||||||
requests==2.32.3
|
|
||||||
six==1.17.0
|
|
||||||
SQLAlchemy==2.0.40
|
|
||||||
typing_extensions==4.13.2
|
|
||||||
urllib3==2.4.0
|
|
||||||
watchdog==6.0.0
|
|
||||||
Werkzeug==3.1.3
|
|
@ -1,5 +0,0 @@
|
|||||||
certifi==2025.1.31
|
|
||||||
charset-normalizer==3.4.1
|
|
||||||
idna==3.10
|
|
||||||
requests==2.32.3
|
|
||||||
urllib3==2.4.0
|
|
@ -1,12 +0,0 @@
|
|||||||
bcrypt==4.3.0
|
|
||||||
certifi==2025.1.31
|
|
||||||
cffi==1.17.1
|
|
||||||
charset-normalizer==3.4.1
|
|
||||||
cryptography==44.0.2
|
|
||||||
idna==3.10
|
|
||||||
paramiko==3.5.1
|
|
||||||
psycopg2-binary==2.9.10
|
|
||||||
pycparser==2.22
|
|
||||||
PyNaCl==1.5.0
|
|
||||||
requests==2.32.3
|
|
||||||
urllib3==2.4.0
|
|
@ -1,22 +0,0 @@
|
|||||||
blinker==1.9.0
|
|
||||||
certifi==2025.1.31
|
|
||||||
charset-normalizer==3.4.1
|
|
||||||
click==8.1.8
|
|
||||||
Flask==3.1.0
|
|
||||||
idna==3.10
|
|
||||||
itsdangerous==2.2.0
|
|
||||||
Jinja2==3.1.6
|
|
||||||
linkify-it-py==2.0.3
|
|
||||||
markdown-it-py==3.0.0
|
|
||||||
MarkupSafe==3.0.2
|
|
||||||
mdit-py-plugins==0.4.2
|
|
||||||
mdurl==0.1.2
|
|
||||||
platformdirs==4.3.7
|
|
||||||
Pygments==2.19.1
|
|
||||||
requests==2.32.3
|
|
||||||
rich==14.0.0
|
|
||||||
textual==3.1.0
|
|
||||||
typing_extensions==4.13.2
|
|
||||||
uc-micro-py==1.0.3
|
|
||||||
urllib3==2.4.0
|
|
||||||
Werkzeug==3.1.3
|
|
@ -1,9 +0,0 @@
|
|||||||
APScheduler==3.11.0
|
|
||||||
blinker==1.9.0
|
|
||||||
click==8.1.8
|
|
||||||
Flask==3.1.0
|
|
||||||
itsdangerous==2.2.0
|
|
||||||
Jinja2==3.1.6
|
|
||||||
MarkupSafe==3.0.2
|
|
||||||
tzlocal==5.3.1
|
|
||||||
Werkzeug==3.1.3
|
|
@ -1,33 +0,0 @@
|
|||||||
alembic==1.13.2
|
|
||||||
blinker==1.8.2
|
|
||||||
blurhash==1.1.4
|
|
||||||
certifi==2024.7.4
|
|
||||||
charset-normalizer==3.3.2
|
|
||||||
click==8.1.7
|
|
||||||
decorator==5.1.1
|
|
||||||
dnspython==2.6.1
|
|
||||||
email_validator==2.2.0
|
|
||||||
Flask==3.0.3
|
|
||||||
Flask-Login==0.6.3
|
|
||||||
Flask-Migrate==4.0.7
|
|
||||||
Flask-SQLAlchemy==3.1.1
|
|
||||||
Flask-WTF==1.2.1
|
|
||||||
greenlet==3.0.3
|
|
||||||
idna==3.7
|
|
||||||
itsdangerous==2.2.0
|
|
||||||
Jinja2==3.1.4
|
|
||||||
Mako==1.3.5
|
|
||||||
MarkupSafe==2.1.5
|
|
||||||
Mastodon.py==1.8.1
|
|
||||||
psycopg2-binary==2.9.9
|
|
||||||
python-dateutil==2.9.0.post0
|
|
||||||
python-dotenv==1.0.1
|
|
||||||
python-magic==0.4.27
|
|
||||||
requests==2.32.3
|
|
||||||
schedule==1.2.2
|
|
||||||
six==1.16.0
|
|
||||||
SQLAlchemy==2.0.31
|
|
||||||
typing_extensions==4.12.2
|
|
||||||
urllib3==2.2.2
|
|
||||||
Werkzeug==3.0.3
|
|
||||||
WTForms==3.1.2
|
|
Loading…
x
Reference in New Issue
Block a user