Auto commit from /home/doc/genesis-tools
This commit is contained in:
parent
78fed9b3ac
commit
7ea19b66d7
68
OPS.md
Normal file
68
OPS.md
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# 🚀 Genesis Radio - Healthcheck Response Runbook
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
When an alert fires (Critical or Warning), this guide tells you what to do so that anyone can react quickly, even if the admin is not available.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛠️ How to Use
|
||||||
|
- Every Mastodon DM or Dashboard alert gives you a **timestamp**, **server name**, and **issue**.
|
||||||
|
- Look up the type of issue in the table below.
|
||||||
|
- Follow the recommended action immediately.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📋 Quick Response Table
|
||||||
|
|
||||||
|
| Type of Alert | Emoji | What it Means | Immediate Action |
|
||||||
|
|:---|:---|:---|:---|
|
||||||
|
| Critical Service Failure | 🔚 | A key service (like Mastodon, MinIO) is **down** | SSH into the server, try `systemctl restart <service>`. |
|
||||||
|
| Disk Filling Up | 📈 | Disk space critically low (under 10%) | SSH in and delete old logs/backups. Free up space **immediately**. |
|
||||||
|
| Rclone Mount Error | 🐢 | Cache failed, mount not healthy | Restart the rclone mount process. (Usually a `systemctl restart rclone@<mount>`, or remount manually.) |
|
||||||
|
| PostgreSQL Replication Lag | 💥 | Database replicas are falling behind | Check database health. Restart replication if needed. Alert admin if lag is >5 minutes. |
|
||||||
|
| RAID Degraded | 🧸 | RAID array is degraded (missing a disk) | Open server console. Identify failed drive. Replace drive if possible. Otherwise escalate immediately. |
|
||||||
|
| Log File Warnings | ⚠️ | Error patterns found in logs | Investigate. If system is healthy, **log it for later**. If errors worsen, escalate. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 💻 If Dashboard Shows
|
||||||
|
- ✅ **All Green** = No action needed.
|
||||||
|
- ⚠️ **Warnings** = Investigate soon. Not urgent unless repeated.
|
||||||
|
- 🚨 **Criticals** = Drop everything and act immediately.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛡️ Emergency Contacts
|
||||||
|
| Role | Name | Contact |
|
||||||
|
|:----|:-----|:--------|
|
||||||
|
| Primary Admin | (You) | [YOUR CONTACT INFO] |
|
||||||
|
| Secondary | Brice | [BRICE CONTACT INFO] |
|
||||||
|
|
||||||
|
(Replace placeholders with actual contact details.)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ✍️ Example Cheat Sheet for Brice
|
||||||
|
|
||||||
|
**Sample Mastodon DM:**
|
||||||
|
> 🚨 Genesis Radio Critical Healthcheck 2025-04-28 14:22:33 🚨
|
||||||
|
> ⚡ 1 critical issue found:
|
||||||
|
> - 🔚 [mastodon] CRITICAL: Service mastodon-web not running!
|
||||||
|
|
||||||
|
**Brice should:**
|
||||||
|
1. SSH into Mastodon server.
|
||||||
|
2. Run `systemctl restart mastodon-web`.
|
||||||
|
3. Confirm the service is running again.
|
||||||
|
4. If it fails or stays down, escalate to admin.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 🌟 TL;DR
|
||||||
|
- 🚨 Criticals: Act immediately.
|
||||||
|
- ⚠️ Warnings: Investigate soon.
|
||||||
|
- ✅ Healthy: No action needed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Stay sharp. Our uptime and service quality depend on quick, calm responses!** 🛡️💪
|
||||||
|
|
251
dbcheck/dbcheck1.py
Normal file
251
dbcheck/dbcheck1.py
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
import os
|
||||||
|
import requests
|
||||||
|
import datetime
|
||||||
|
import paramiko
|
||||||
|
|
||||||
|
# ==== CONFIG ====
|
||||||
|
MASTODON_INSTANCE = "https://chatwithus.live"
|
||||||
|
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
||||||
|
MASTODON_USER_ID = "114386383616633367"
|
||||||
|
|
||||||
|
DISK_WARN_THRESHOLD = 10 # percent free
|
||||||
|
INODE_WARN_THRESHOLD = 10 # percent free
|
||||||
|
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
||||||
|
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
||||||
|
|
||||||
|
NODES = [
|
||||||
|
{
|
||||||
|
"name": "shredder",
|
||||||
|
"host": "38.102.127.171",
|
||||||
|
"ssh_user": "doc",
|
||||||
|
"services": ["minio.service"],
|
||||||
|
"disks": ["/", "/mnt/raid5"],
|
||||||
|
"type": "remote",
|
||||||
|
"db": False,
|
||||||
|
"raid": True
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "mastodon",
|
||||||
|
"host": "chatwithus.live", # Now points directly to your Mastodon server
|
||||||
|
"ssh_user": "root",
|
||||||
|
"services": ["nginx", "mastodon-web"],
|
||||||
|
"disks": ["/"],
|
||||||
|
"type": "remote",
|
||||||
|
"db": False,
|
||||||
|
"raid": False
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "db1",
|
||||||
|
"host": "cluster.db1.genesishostingtechnologies.com",
|
||||||
|
"ssh_user": "doc",
|
||||||
|
"services": ["postgresql@16-main.service"],
|
||||||
|
"disks": ["/", "/var/lib/postgresql"],
|
||||||
|
"type": "remote",
|
||||||
|
"db": True,
|
||||||
|
"raid": False
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "db2",
|
||||||
|
"host": "cluster.db2.genesishostingtechnologies.com",
|
||||||
|
"ssh_user": "doc",
|
||||||
|
"services": ["postgresql@16-main.service"],
|
||||||
|
"disks": ["/", "/var/lib/postgresql"],
|
||||||
|
"type": "remote",
|
||||||
|
"db": True,
|
||||||
|
"raid": False
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
# ==== Mastodon DM function ====
|
||||||
|
def mastodon_dm(message):
|
||||||
|
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
||||||
|
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
||||||
|
payload = {
|
||||||
|
"status": message,
|
||||||
|
"visibility": "direct",
|
||||||
|
"in_reply_to_account_id": MASTODON_USER_ID
|
||||||
|
}
|
||||||
|
resp = requests.post(url, headers=headers, data=payload)
|
||||||
|
if resp.status_code != 200:
|
||||||
|
print(f"Failed to send Mastodon DM: {resp.text}")
|
||||||
|
|
||||||
|
# ==== SSH command runner ====
|
||||||
|
def ssh_command(host, user, cmd):
|
||||||
|
ssh = paramiko.SSHClient()
|
||||||
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||||
|
ssh.connect(hostname=host, username=user, timeout=10)
|
||||||
|
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||||
|
out = stdout.read().decode().strip()
|
||||||
|
ssh.close()
|
||||||
|
return out
|
||||||
|
|
||||||
|
# ==== Robust Remote disk check ====
|
||||||
|
def check_remote_disk(host, user, path, node_name):
|
||||||
|
cmd = f"df --output=pcent {path} | tail -1 | tr -dc '0-9'"
|
||||||
|
out = ssh_command(host, user, cmd)
|
||||||
|
if not out:
|
||||||
|
return f"[{node_name}] ERROR: Disk {path} not found or could not check disk usage."
|
||||||
|
try:
|
||||||
|
percent = int(out)
|
||||||
|
except ValueError:
|
||||||
|
return f"[{node_name}] ERROR: Could not parse disk usage for {path}. Output was: '{out}'"
|
||||||
|
if percent > 90:
|
||||||
|
return f"[{node_name}] WARNING: Only {100-percent}% disk free on {path}."
|
||||||
|
# Inode check
|
||||||
|
cmd_inode = f"df --output=ipcent {path} | tail -1 | tr -dc '0-9'"
|
||||||
|
out_inode = ssh_command(host, user, cmd_inode)
|
||||||
|
if not out_inode:
|
||||||
|
return f"[{node_name}] ERROR: Disk {path} not found or could not check inode usage."
|
||||||
|
try:
|
||||||
|
percent_inode = int(out_inode)
|
||||||
|
except ValueError:
|
||||||
|
return f"[{node_name}] ERROR: Could not parse inode usage for {path}. Output was: '{out_inode}'"
|
||||||
|
if percent_inode > 90:
|
||||||
|
return f"[{node_name}] WARNING: Only {100-percent_inode}% inodes free on {path}."
|
||||||
|
return None
|
||||||
|
|
||||||
|
# ==== SMART health check (for all disks) ====
|
||||||
|
def check_remote_smart(host, user, node_name):
|
||||||
|
alerts = []
|
||||||
|
# List block devices
|
||||||
|
cmd_lsblk = "lsblk -ndo NAME,TYPE | awk '$2==\"disk\" {print $1}'"
|
||||||
|
devs = ssh_command(host, user, cmd_lsblk)
|
||||||
|
if not devs:
|
||||||
|
alerts.append(f"[{node_name}] ERROR: Could not list block devices for SMART check.")
|
||||||
|
return alerts
|
||||||
|
for dev in devs.split():
|
||||||
|
smart_cmd = f"sudo smartctl -H /dev/{dev}"
|
||||||
|
out = ssh_command(host, user, smart_cmd)
|
||||||
|
if "PASSED" in out:
|
||||||
|
continue # All good
|
||||||
|
elif "FAILED" in out or "Pre-fail" in out or "SMART support is: Unavailable" in out:
|
||||||
|
alerts.append(f"[{node_name}] CRITICAL: SMART health issue on /dev/{dev}!\n{out}")
|
||||||
|
elif "Unknown" in out or not out:
|
||||||
|
alerts.append(f"[{node_name}] ERROR: SMART status unknown on /dev/{dev}. Output: {out}")
|
||||||
|
# Optionally scan for other SMART errors
|
||||||
|
return alerts
|
||||||
|
|
||||||
|
# ==== Remote service check ====
|
||||||
|
def check_remote_service(host, user, service, node_name):
|
||||||
|
cmd = f"systemctl is-active {service}"
|
||||||
|
out = ssh_command(host, user, cmd)
|
||||||
|
if out.strip() != "active":
|
||||||
|
return f"[{node_name}] CRITICAL: Service {service} not running!"
|
||||||
|
return None
|
||||||
|
|
||||||
|
# ==== Remote RAID md0 check (robust for all mdstat layouts) ====
|
||||||
|
def check_remote_raid_md0(host, user, node_name):
|
||||||
|
try:
|
||||||
|
import re
|
||||||
|
ssh = paramiko.SSHClient()
|
||||||
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||||
|
ssh.connect(hostname=host, username=user, timeout=10)
|
||||||
|
stdin, stdout, stderr = ssh.exec_command("cat /proc/mdstat")
|
||||||
|
mdstat = stdout.read().decode()
|
||||||
|
|
||||||
|
# Find the block for md0 and look for the [UU_] status
|
||||||
|
lines = mdstat.splitlines()
|
||||||
|
status = None
|
||||||
|
inside_md0 = False
|
||||||
|
for line in lines:
|
||||||
|
if line.startswith("md0"):
|
||||||
|
inside_md0 = True
|
||||||
|
elif inside_md0:
|
||||||
|
m = re.search(r"\[(U|_)+\]", line)
|
||||||
|
if m:
|
||||||
|
status = m.group(0)
|
||||||
|
break
|
||||||
|
# Stop searching if we hit a blank line or another array
|
||||||
|
if line.strip() == "" or ":" in line:
|
||||||
|
break
|
||||||
|
|
||||||
|
ssh.close()
|
||||||
|
|
||||||
|
if status is None:
|
||||||
|
return f"[{node_name}] CRITICAL: /dev/md0 RAID status string not found!"
|
||||||
|
if "_" in status:
|
||||||
|
return f"[{node_name}] WARNING: /dev/md0 RAID degraded! Status: {status}"
|
||||||
|
# All U's means all disks up
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return f"[{node_name}] ERROR: Could not check RAID health remotely: {e}"
|
||||||
|
|
||||||
|
# ==== Remote log scan ====
|
||||||
|
def check_remote_logs(host, user, node_name):
|
||||||
|
alerts = []
|
||||||
|
for log in LOG_FILES:
|
||||||
|
cmd = f"tail -500 {log}"
|
||||||
|
try:
|
||||||
|
out = ssh_command(host, user, cmd)
|
||||||
|
lines = out.split("\n")
|
||||||
|
for pattern in LOG_PATTERNS:
|
||||||
|
if any(pattern in line for line in lines):
|
||||||
|
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
||||||
|
except Exception as e:
|
||||||
|
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
||||||
|
return alerts
|
||||||
|
|
||||||
|
# ==== Remote PostgreSQL replication check ====
|
||||||
|
def check_replication(host, node_name):
|
||||||
|
try:
|
||||||
|
import psycopg2
|
||||||
|
conn = psycopg2.connect(host=host, dbname="postgres", user="postgres", connect_timeout=5)
|
||||||
|
cur = conn.cursor()
|
||||||
|
cur.execute("SELECT pg_is_in_recovery();")
|
||||||
|
is_replica = cur.fetchone()[0]
|
||||||
|
if is_replica:
|
||||||
|
cur.execute("SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;")
|
||||||
|
lag = cur.fetchone()[0]
|
||||||
|
if lag is None:
|
||||||
|
return f"[{node_name}] CRITICAL: Standby not streaming! Replication down."
|
||||||
|
elif lag > 10:
|
||||||
|
return f"[{node_name}] WARNING: Replication lag is {lag} seconds."
|
||||||
|
cur.close()
|
||||||
|
conn.close()
|
||||||
|
except Exception as e:
|
||||||
|
return f"[{node_name}] ERROR: Could not check replication: {e}"
|
||||||
|
return None
|
||||||
|
|
||||||
|
# ==== Main routine ====
|
||||||
|
def main():
|
||||||
|
problems = []
|
||||||
|
|
||||||
|
# Multi-node checks
|
||||||
|
for node in NODES:
|
||||||
|
# All checks via SSH
|
||||||
|
for disk in node["disks"]:
|
||||||
|
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||||
|
if res: problems.append(res)
|
||||||
|
# SMART check for all disks on this node
|
||||||
|
smart_alerts = check_remote_smart(node["host"], node["ssh_user"], node["name"])
|
||||||
|
if smart_alerts:
|
||||||
|
problems.extend(smart_alerts)
|
||||||
|
for svc in node["services"]:
|
||||||
|
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
||||||
|
if res: problems.append(res)
|
||||||
|
# Replication check
|
||||||
|
if node.get("db"):
|
||||||
|
res = check_replication(node["host"], node["name"])
|
||||||
|
if res: problems.append(res)
|
||||||
|
# RAID check, only for nodes with "raid": True
|
||||||
|
if node.get("raid", False):
|
||||||
|
raid_health = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
||||||
|
if raid_health:
|
||||||
|
problems.append(raid_health)
|
||||||
|
# Log scan
|
||||||
|
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
||||||
|
if logs:
|
||||||
|
problems.extend(logs)
|
||||||
|
|
||||||
|
# Send DM if anything wrong
|
||||||
|
if problems:
|
||||||
|
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
msg = f"🚨 Genesis Radio Multi-Node Healthcheck {now} 🚨\n" + "\n".join(problems)
|
||||||
|
print(msg)
|
||||||
|
mastodon_dm(msg)
|
||||||
|
else:
|
||||||
|
print("OK: All checks passed.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
17
docker/archivecontrol/Dockerfile
Normal file
17
docker/archivecontrol/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Use the official Python image
|
||||||
|
FROM python:3.12-slim
|
||||||
|
|
||||||
|
# Set the working directory inside the container
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy the application code into the container
|
||||||
|
COPY . /app/
|
||||||
|
|
||||||
|
# Install dependencies from requirements.txt inside the container
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
# Expose the port your app will run on (adjust as needed)
|
||||||
|
EXPOSE 5000
|
||||||
|
|
||||||
|
# Command to run the application (adjust script name)
|
||||||
|
CMD ["python", "recordit2.py"]
|
55
docker/archivecontrol/docker-compose.yml
Normal file
55
docker/archivecontrol/docker-compose.yml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
# Archive Control app
|
||||||
|
archivecontrol:
|
||||||
|
build: ./archivecontrol
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
||||||
|
|
||||||
|
# Archive List app
|
||||||
|
archivelist:
|
||||||
|
build: ./archivelist
|
||||||
|
ports:
|
||||||
|
- "5001:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
||||||
|
|
||||||
|
# DB Check app
|
||||||
|
dbcheck:
|
||||||
|
build: ./dbcheck
|
||||||
|
ports:
|
||||||
|
- "5002:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
||||||
|
|
||||||
|
# Hosting Toot app
|
||||||
|
hostingtoot:
|
||||||
|
build: ./hostingtoot
|
||||||
|
ports:
|
||||||
|
- "5003:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
||||||
|
|
||||||
|
# Radio Toot app
|
||||||
|
radiotoot:
|
||||||
|
build: ./radiotoot
|
||||||
|
ports:
|
||||||
|
- "5004:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
||||||
|
|
||||||
|
# Text app
|
||||||
|
text:
|
||||||
|
build: ./text
|
||||||
|
ports:
|
||||||
|
- "5005:5000"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENV=production
|
160
docker/archivecontrol/recordit2.py
Normal file
160
docker/archivecontrol/recordit2.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
import os
|
||||||
|
from flask import Flask, render_template_string, send_from_directory, abort
|
||||||
|
from mastodon import Mastodon
|
||||||
|
|
||||||
|
# === Configuration ===
|
||||||
|
|
||||||
|
ROOT_DIR = r"/mnt/archives" # Update this path to where your shows live
|
||||||
|
ALLOWED_EXTENSIONS = {".mp3", ".wav", ".flac", ".m4a"}
|
||||||
|
BANNER_FILENAMES = ["banner.jpg", "banner.png", "banner.jpeg"]
|
||||||
|
|
||||||
|
# Friendly display names
|
||||||
|
DISPLAY_NAMES = {
|
||||||
|
"80sdimension": "The 80s Dimension",
|
||||||
|
"90slunch": "The 90s Lunch",
|
||||||
|
"au": "Alternate Universe",
|
||||||
|
"welch": "Bob Welch Retrospective",
|
||||||
|
"bootycalls": "Booty Calls",
|
||||||
|
"chaos": "The Chaos Bus",
|
||||||
|
"mac": "Fleetwood Mac Retrospective",
|
||||||
|
"gog": "The Good Ol Genesis",
|
||||||
|
"housecalls": "House Calls",
|
||||||
|
"pisces": "Pisces Playhouse",
|
||||||
|
"retro": "The Retro Breakfast",
|
||||||
|
"rockvault": "Rock Vault",
|
||||||
|
"mayhem": "Rock and Roll Mayhem",
|
||||||
|
"wakeman": "Rick Wakeman Retrospective",
|
||||||
|
"sonicrevolt": "Sonic Revolt",
|
||||||
|
"tunefuse": "TuneFuse",
|
||||||
|
"wwo80s": "We Want Our 80s",
|
||||||
|
"yacht": "Yacht Vibes Only",
|
||||||
|
"yes": "Yes Retrospective",
|
||||||
|
}
|
||||||
|
|
||||||
|
# === URLs for File Hosting ===
|
||||||
|
|
||||||
|
BASE_URL = "http://server.genesis-radio.net:5020" # This is the base URL where your files live (e.g., http://localhost:5000)
|
||||||
|
|
||||||
|
SERVER_URL = "http://genesis-radio.net" # This is the general server URL if you need it for anything else
|
||||||
|
|
||||||
|
# === Flask App ===
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
HOME_TEMPLATE = """
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Genesis Radio Archives</title>
|
||||||
|
<style>
|
||||||
|
body { font-family: Arial, sans-serif; background-color: #111; color: #eee; margin: 2em; }
|
||||||
|
h1 { font-size: 2em; color: #0ff; border-bottom: 2px solid #0ff; padding-bottom: 0.5em; }
|
||||||
|
ul { list-style: none; padding-left: 0; }
|
||||||
|
li { margin: 1em 0; }
|
||||||
|
a { color: #0cf; font-size: 1.3em; text-decoration: none; font-weight: bold; }
|
||||||
|
a:hover { text-decoration: underline; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Genesis Radio: Show Archives</h1>
|
||||||
|
<ul>
|
||||||
|
{% for show in shows %}
|
||||||
|
<li><a href="{{ url_for('show_page', show_name=show) }}">{{ display_names.get(show, show) }}</a></li>
|
||||||
|
{% endfor %}
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
SHOW_TEMPLATE = """
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>{{ show_name }} - Genesis Radio</title>
|
||||||
|
<style>
|
||||||
|
body { font-family: Arial, sans-serif; background-color: #111; color: #eee; margin: 2em; }
|
||||||
|
a { color: #0cf; text-decoration: none; }
|
||||||
|
a:hover { text-decoration: underline; }
|
||||||
|
h1 { color: #0ff; font-size: 1.8em; margin-bottom: 1em; border-bottom: 2px solid #0ff; padding-bottom: 0.3em; }
|
||||||
|
.back { margin-bottom: 1.5em; display: inline-block; color: #0cf; }
|
||||||
|
.audio-block { margin-bottom: 2em; }
|
||||||
|
p { font-weight: bold; color: #fff; }
|
||||||
|
audio { width: 100%; }
|
||||||
|
.banner { width: 100%; max-height: 250px; object-fit: cover; margin-bottom: 1em; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<a href="{{ url_for('index') }}" class="back">← Back to shows</a>
|
||||||
|
<h1>{{ display_names.get(show_name, show_name) }}</h1>
|
||||||
|
{% if banner %}
|
||||||
|
<img src="{{ url_for('show_banner', show_name=show_name, banner_name=banner) }}" class="banner">
|
||||||
|
{% endif %}
|
||||||
|
{% for file in files %}
|
||||||
|
<div class="audio-block">
|
||||||
|
<p>{{ file }}</p>
|
||||||
|
<audio controls>
|
||||||
|
<source src="{{ url_for('stream_file', show=show_name, filename=file) }}" type="audio/mpeg">
|
||||||
|
Your browser does not support the audio element.
|
||||||
|
</audio>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<p>No audio files found for this show.</p>
|
||||||
|
{% endfor %}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# === Utility Functions ===
|
||||||
|
|
||||||
|
def list_shows(base_dir):
|
||||||
|
return sorted([d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d))])
|
||||||
|
|
||||||
|
def list_audio_files(show_dir):
|
||||||
|
return sorted([
|
||||||
|
f for f in os.listdir(show_dir)
|
||||||
|
if os.path.splitext(f)[1].lower() in ALLOWED_EXTENSIONS
|
||||||
|
])
|
||||||
|
|
||||||
|
def find_banner(show_dir):
|
||||||
|
for name in BANNER_FILENAMES:
|
||||||
|
if os.path.isfile(os.path.join(show_dir, name)):
|
||||||
|
return name
|
||||||
|
return None
|
||||||
|
|
||||||
|
# === Flask Routes ===
|
||||||
|
|
||||||
|
@app.route("/")
|
||||||
|
def index():
|
||||||
|
shows = list_shows(ROOT_DIR)
|
||||||
|
return render_template_string(HOME_TEMPLATE, shows=shows, display_names=DISPLAY_NAMES)
|
||||||
|
|
||||||
|
@app.route("/show/<show_name>")
|
||||||
|
def show_page(show_name):
|
||||||
|
show_path = os.path.join(ROOT_DIR, show_name)
|
||||||
|
if not os.path.isdir(show_path):
|
||||||
|
abort(404)
|
||||||
|
files = list_audio_files(show_path)
|
||||||
|
banner = find_banner(show_path)
|
||||||
|
return render_template_string(SHOW_TEMPLATE, show_name=show_name, files=files, banner=banner, display_names=DISPLAY_NAMES)
|
||||||
|
|
||||||
|
@app.route("/stream/<show>/<path:filename>")
|
||||||
|
def stream_file(show, filename):
|
||||||
|
safe_path = os.path.join(ROOT_DIR, show)
|
||||||
|
if os.path.isfile(os.path.join(safe_path, filename)):
|
||||||
|
return send_from_directory(safe_path, filename, as_attachment=False)
|
||||||
|
else:
|
||||||
|
abort(404)
|
||||||
|
|
||||||
|
@app.route("/banner/<show_name>/<banner_name>")
|
||||||
|
def show_banner(show_name, banner_name):
|
||||||
|
show_path = os.path.join(ROOT_DIR, show_name)
|
||||||
|
if os.path.isfile(os.path.join(show_path, banner_name)):
|
||||||
|
return send_from_directory(show_path, banner_name)
|
||||||
|
else:
|
||||||
|
abort(404)
|
||||||
|
|
||||||
|
# === Start Everything ===
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app.run(debug=True, host='0.0.0.0', port=5000)
|
||||||
|
|
24
docker/archivecontrol/requirements.txt
Normal file
24
docker/archivecontrol/requirements.txt
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
blinker==1.9.0
|
||||||
|
blurhash==1.1.4
|
||||||
|
certifi==2025.1.31
|
||||||
|
charset-normalizer==3.4.1
|
||||||
|
click==8.1.8
|
||||||
|
decorator==5.2.1
|
||||||
|
Flask==3.1.0
|
||||||
|
Flask-SQLAlchemy==3.1.1
|
||||||
|
greenlet==3.2.0
|
||||||
|
idna==3.10
|
||||||
|
itsdangerous==2.2.0
|
||||||
|
Jinja2==3.1.6
|
||||||
|
MarkupSafe==3.0.2
|
||||||
|
Mastodon.py==2.0.1
|
||||||
|
psycopg2-binary==2.9.10
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
python-magic==0.4.27
|
||||||
|
requests==2.32.3
|
||||||
|
six==1.17.0
|
||||||
|
SQLAlchemy==2.0.40
|
||||||
|
typing_extensions==4.13.2
|
||||||
|
urllib3==2.4.0
|
||||||
|
watchdog==6.0.0
|
||||||
|
Werkzeug==3.1.3
|
File diff suppressed because it is too large
Load Diff
@ -1,43 +1,218 @@
|
|||||||
WARNING: password file "/home/doc/.pgpass" has group or world access; permissions should be u=rw (0600) or less
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:15:17 🚨
|
||||||
WARNING: password file "/home/doc/.pgpass" has group or world access; permissions should be u=rw (0600) or less
|
|
||||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 09:00:16 🚨
|
|
||||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
Exception (client): Error reading SSH protocol banner
|
[db2] WARNING: Replication lag is 103 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:30:17 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 135 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:45:17 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 24 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:00:19 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:15:33 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 109 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:30:21 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 46 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:45:29 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:00:18 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:15:21 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 38 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:30:36 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 21 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:45:32 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 22 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:00:30 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 30 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:15:31 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 14 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:30:24 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 55 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:45:37 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:00:21 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 21 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:15:21 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 19 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:30:16 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 68 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:45:16 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 77 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:00:18 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 18 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:15:16 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 14 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:30:16 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 82 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:45:17 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 135 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:00:14 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 14 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:15:15 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 127 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:30:14 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 100 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:45:14 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 35 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:00:14 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 13 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:15:14 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 11 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:30:17 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:45:16 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 27 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:00:16 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:15:19 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 137 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:30:15 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 134 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:45:32 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 51 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:00:19 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 19 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:15:26 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:30:28 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 67 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:45:24 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:00:31 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 29 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:15:30 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 66 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:30:30 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 37 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:45:23 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 21 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:00:26 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 26 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:15:37 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 20 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:30:35 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 13 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:45:32 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 49 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:00:31 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 11 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:18:00 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 29 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:30:33 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 11 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:45:33 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 16 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:00:39 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:15:39 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:30:31 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 62 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:45:37 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 13 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:00:21 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 21 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:15:21 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:30:21 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:45:21 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 20 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 10:00:25 🚨
|
||||||
|
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||||
|
[db2] WARNING: Replication lag is 17 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 13:15:16 🚨
|
||||||
|
[db2] WARNING: Replication lag is 62 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 14:15:18 🚨
|
||||||
|
[db2] WARNING: Replication lag is 63 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 15:15:19 🚨
|
||||||
|
[db2] WARNING: Replication lag is 69 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 15:30:19 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 105 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 16:15:18 🚨
|
||||||
|
[db2] WARNING: Replication lag is 135 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 16:45:19 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
Traceback (most recent call last):
|
Traceback (most recent call last):
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2369, in _check_banner
|
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 257, in <module>
|
||||||
buf = self.packetizer.readline(timeout)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/packet.py", line 395, in readline
|
|
||||||
buf += self._read_timeout(timeout)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/packet.py", line 665, in _read_timeout
|
|
||||||
raise EOFError()
|
|
||||||
EOFError
|
|
||||||
|
|
||||||
During handling of the above exception, another exception occurred:
|
|
||||||
|
|
||||||
Traceback (most recent call last):
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2185, in run
|
|
||||||
self._check_banner()
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2373, in _check_banner
|
|
||||||
raise SSHException(
|
|
||||||
paramiko.ssh_exception.SSHException: Error reading SSH protocol banner
|
|
||||||
|
|
||||||
Traceback (most recent call last):
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2369, in _check_banner
|
|
||||||
buf = self.packetizer.readline(timeout)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/packet.py", line 395, in readline
|
|
||||||
buf += self._read_timeout(timeout)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/packet.py", line 665, in _read_timeout
|
|
||||||
raise EOFError()
|
|
||||||
EOFError
|
|
||||||
|
|
||||||
During handling of the above exception, another exception occurred:
|
|
||||||
|
|
||||||
Traceback (most recent call last):
|
|
||||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 251, in <module>
|
|
||||||
main()
|
main()
|
||||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 218, in main
|
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 218, in main
|
||||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||||
@ -47,12 +222,173 @@ Traceback (most recent call last):
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 76, in ssh_command
|
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 76, in ssh_command
|
||||||
ssh.connect(hostname=host, username=user, timeout=10)
|
ssh.connect(hostname=host, username=user, timeout=10)
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/client.py", line 451, in connect
|
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/client.py", line 386, in connect
|
||||||
t.start_client(timeout=timeout)
|
sock.connect(addr)
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 773, in start_client
|
TimeoutError: timed out
|
||||||
raise e
|
OK: All checks passed.
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2185, in run
|
OK: All checks passed.
|
||||||
self._check_banner()
|
OK: All checks passed.
|
||||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/transport.py", line 2373, in _check_banner
|
OK: All checks passed.
|
||||||
raise SSHException(
|
OK: All checks passed.
|
||||||
paramiko.ssh_exception.SSHException: Error reading SSH protocol banner
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 19:30:19 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db1] CRITICAL: Service postgresql@16-main.service not running!
|
||||||
|
[db1] ERROR: Could not check replication: connection to server at "cluster.db1.genesishostingtechnologies.com" (38.102.127.174), port 5432 failed: Connection refused
|
||||||
|
Is the server running on that host and accepting TCP/IP connections?
|
||||||
|
|
||||||
|
[db2] WARNING: Replication lag is 90 seconds.
|
||||||
|
Failed to send Mastodon DM: <!DOCTYPE html>
|
||||||
|
<html lang='en'>
|
||||||
|
<head>
|
||||||
|
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type'>
|
||||||
|
<meta charset='utf-8'>
|
||||||
|
<title>This page is not correct
|
||||||
|
- Mastodon Glitch Edition</title>
|
||||||
|
<meta content='width=device-width,initial-scale=1' name='viewport'>
|
||||||
|
<link rel="stylesheet" crossorigin="anonymous" href="/packs/css/skins/glitch/mastodon-light-ba448ef4.chunk.css" media="not all and (prefers-color-scheme: dark)" integrity="sha256-P4BQPI06vGtRMsuyy44qvSrh1psmbDTSDhcI2ZZsDcI=" /><link rel="stylesheet" crossorigin="anonymous" href="/packs/css/skins/glitch/default-b4389951.chunk.css" media="(prefers-color-scheme: dark)" integrity="sha256-Ve2Dw/Y6VxR48zbYZoB8JdFFHsH9QiYGIZs5DLuH/U4=" />
|
||||||
|
<script src="/packs/js/common-8028e752d0f2aa93a5d6.js" crossorigin="anonymous" integrity="sha256-Yu9sY5Ca6EUBu337KcDfy59RIzPAUoLndPw2vudg2pg="></script>
|
||||||
|
<script src="/packs/js/flavours/glitch/error-10438b0fe55aa68f54dd.chunk.js" crossorigin="anonymous" integrity="sha256-DL1chTim8F/pYKgN8OlM4xXp5561mQ8KKtSZsRT9bwA="></script>
|
||||||
|
</head>
|
||||||
|
<body class='error'>
|
||||||
|
<div class='dialog'>
|
||||||
|
<div class='dialog__illustration'>
|
||||||
|
<img alt='Mastodon Glitch Edition' src='/oops.png'>
|
||||||
|
</div>
|
||||||
|
<div class='dialog__message'>
|
||||||
|
<h1>We're sorry, but something went wrong on our end.
|
||||||
|
</h1>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 20:30:16 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 20:45:16 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:00:17 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:15:17 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:30:15 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 66 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:45:19 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 106 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:00:15 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:15:32 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 95 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:30:30 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 79 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:45:38 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:00:28 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:15:47 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:30:29 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:45:34 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:00:23 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:15:15 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:30:16 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:45:17 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 77 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:00:15 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:15:15 🚨
|
||||||
|
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:30:16 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 72 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 02:30:18 🚨
|
||||||
|
[db2] WARNING: Replication lag is 89 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:00:16 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:15:18 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:30:17 🚨
|
||||||
|
[db2] WARNING: Replication lag is 135 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:45:17 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 04:00:16 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 04:45:21 🚨
|
||||||
|
[db2] WARNING: Replication lag is 64 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 05:00:27 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 08:00:20 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 08:15:20 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 09:45:24 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 10:00:18 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 10:30:16 🚨
|
||||||
|
[db2] WARNING: Replication lag is 75 seconds.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 12:00:31 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 12:45:24 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
[db2] WARNING: Replication lag is 94 seconds.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 13:00:33 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
OK: All checks passed.
|
||||||
|
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 14:15:19 🚨
|
||||||
|
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||||
|
OK: All checks passed.
|
||||||
|
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 181
|
||||||
|
f.write("<table border='1' cellpadding='5' style='border-collapse: collapse;'>
|
||||||
|
^
|
||||||
|
SyntaxError: unterminated string literal (detected at line 181)
|
||||||
|
@ -2,72 +2,38 @@ import os
|
|||||||
import requests
|
import requests
|
||||||
import datetime
|
import datetime
|
||||||
import paramiko
|
import paramiko
|
||||||
|
import time
|
||||||
|
|
||||||
# ==== CONFIG ====
|
# ==== CONFIG ====
|
||||||
MASTODON_INSTANCE = "https://chatwithus.live"
|
MASTODON_INSTANCE = "https://chatwithus.live"
|
||||||
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
||||||
MASTODON_USER_ID = "114386383616633367"
|
MASTODON_USER_ID = "114386383616633367"
|
||||||
|
HEALTHCHECK_HTML = "/var/www/html/healthcheck.html"
|
||||||
|
|
||||||
DISK_WARN_THRESHOLD = 10 # percent free
|
DISK_WARN_THRESHOLD = 10
|
||||||
INODE_WARN_THRESHOLD = 10 # percent free
|
INODE_WARN_THRESHOLD = 10
|
||||||
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
||||||
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
||||||
|
SUPPRESSED_PATTERNS = ["SomeKnownHarmlessMastodonError"]
|
||||||
|
|
||||||
NODES = [
|
NODES = [
|
||||||
{
|
{"name": "shredder", "host": "38.102.127.171", "ssh_user": "doc", "services": ["minio.service"], "disks": ["/", "/mnt/raid5"], "type": "remote", "db": False, "raid": True},
|
||||||
"name": "shredder",
|
{"name": "mastodon", "host": "chatwithus.live", "ssh_user": "root", "services": ["nginx", "mastodon-web"], "disks": ["/"], "type": "remote", "db": False, "raid": False},
|
||||||
"host": "38.102.127.171",
|
{"name": "db1", "host": "cluster.db1.genesishostingtechnologies.com", "ssh_user": "doc", "services": ["postgresql@16-main.service"], "disks": ["/", "/var/lib/postgresql"], "type": "remote", "db": True, "raid": False},
|
||||||
"ssh_user": "doc",
|
{"name": "db2", "host": "cluster.db2.genesishostingtechnologies.com", "ssh_user": "doc", "services": ["postgresql@16-main.service"], "disks": ["/", "/var/lib/postgresql"], "type": "remote", "db": True, "raid": False}
|
||||||
"services": ["minio.service"],
|
|
||||||
"disks": ["/", "/mnt/raid5"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": False,
|
|
||||||
"raid": True
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "mastodon",
|
|
||||||
"host": "chatwithus.live", # Now points directly to your Mastodon server
|
|
||||||
"ssh_user": "root",
|
|
||||||
"services": ["nginx", "mastodon-web"],
|
|
||||||
"disks": ["/"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": False,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "db1",
|
|
||||||
"host": "cluster.db1.genesishostingtechnologies.com",
|
|
||||||
"ssh_user": "doc",
|
|
||||||
"services": ["postgresql@16-main.service"],
|
|
||||||
"disks": ["/", "/var/lib/postgresql"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": True,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "db2",
|
|
||||||
"host": "cluster.db2.genesishostingtechnologies.com",
|
|
||||||
"ssh_user": "doc",
|
|
||||||
"services": ["postgresql@16-main.service"],
|
|
||||||
"disks": ["/", "/var/lib/postgresql"],
|
|
||||||
"type": "remote",
|
|
||||||
"db": True,
|
|
||||||
"raid": False
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# ==== Mastodon DM function ====
|
# ==== Mastodon DM function with retry ====
|
||||||
def mastodon_dm(message):
|
def mastodon_dm(message, retries=3):
|
||||||
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
||||||
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
||||||
payload = {
|
payload = {"status": message, "visibility": "direct", "in_reply_to_account_id": MASTODON_USER_ID}
|
||||||
"status": message,
|
for attempt in range(retries):
|
||||||
"visibility": "direct",
|
resp = requests.post(url, headers=headers, data=payload)
|
||||||
"in_reply_to_account_id": MASTODON_USER_ID
|
if resp.status_code == 200:
|
||||||
}
|
return
|
||||||
resp = requests.post(url, headers=headers, data=payload)
|
print(f"Failed to send Mastodon DM (attempt {attempt+1}): {resp.text}")
|
||||||
if resp.status_code != 200:
|
time.sleep(5)
|
||||||
print(f"Failed to send Mastodon DM: {resp.text}")
|
|
||||||
|
|
||||||
# ==== SSH command runner ====
|
# ==== SSH command runner ====
|
||||||
def ssh_command(host, user, cmd):
|
def ssh_command(host, user, cmd):
|
||||||
@ -79,97 +45,33 @@ def ssh_command(host, user, cmd):
|
|||||||
ssh.close()
|
ssh.close()
|
||||||
return out
|
return out
|
||||||
|
|
||||||
# ==== Robust Remote disk check ====
|
# ==== Emoji chooser ====
|
||||||
def check_remote_disk(host, user, path, node_name):
|
def choose_emoji(line):
|
||||||
cmd = f"df --output=pcent {path} | tail -1 | tr -dc '0-9'"
|
if "RAID" in line:
|
||||||
out = ssh_command(host, user, cmd)
|
return "🧨"
|
||||||
if not out:
|
if "disk" in line.lower():
|
||||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check disk usage."
|
return "📈"
|
||||||
|
if "rclone" in line.lower():
|
||||||
|
return "🐢"
|
||||||
|
if "Service" in line:
|
||||||
|
return "🛑"
|
||||||
|
if "Replication" in line:
|
||||||
|
return "💥"
|
||||||
|
return "⚠️"
|
||||||
|
|
||||||
|
# ==== Check rclone health ====
|
||||||
|
def check_rclone_health(node):
|
||||||
try:
|
try:
|
||||||
percent = int(out)
|
result = ssh_command(node["host"], node["ssh_user"], "rclone rc vfs/stats")
|
||||||
except ValueError:
|
if "error" in result.lower() or "failed" in result.lower():
|
||||||
return f"[{node_name}] ERROR: Could not parse disk usage for {path}. Output was: '{out}'"
|
return ("critical", f"[{node['name']}] ERROR: rclone health check failed. Output: {result}")
|
||||||
if percent > 90:
|
if "bytesUsed" in result:
|
||||||
return f"[{node_name}] WARNING: Only {100-percent}% disk free on {path}."
|
bytes_used = int(result.split('"bytesUsed":')[1].split(',')[0].strip())
|
||||||
# Inode check
|
if bytes_used > 100000000000:
|
||||||
cmd_inode = f"df --output=ipcent {path} | tail -1 | tr -dc '0-9'"
|
return ("warning", f"[{node['name']}] WARNING: rclone cache usage high: {bytes_used} bytes used.")
|
||||||
out_inode = ssh_command(host, user, cmd_inode)
|
|
||||||
if not out_inode:
|
|
||||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check inode usage."
|
|
||||||
try:
|
|
||||||
percent_inode = int(out_inode)
|
|
||||||
except ValueError:
|
|
||||||
return f"[{node_name}] ERROR: Could not parse inode usage for {path}. Output was: '{out_inode}'"
|
|
||||||
if percent_inode > 90:
|
|
||||||
return f"[{node_name}] WARNING: Only {100-percent_inode}% inodes free on {path}."
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== SMART health check (for all disks) ====
|
|
||||||
def check_remote_smart(host, user, node_name):
|
|
||||||
alerts = []
|
|
||||||
# List block devices
|
|
||||||
cmd_lsblk = "lsblk -ndo NAME,TYPE | awk '$2==\"disk\" {print $1}'"
|
|
||||||
devs = ssh_command(host, user, cmd_lsblk)
|
|
||||||
if not devs:
|
|
||||||
alerts.append(f"[{node_name}] ERROR: Could not list block devices for SMART check.")
|
|
||||||
return alerts
|
|
||||||
for dev in devs.split():
|
|
||||||
smart_cmd = f"sudo smartctl -H /dev/{dev}"
|
|
||||||
out = ssh_command(host, user, smart_cmd)
|
|
||||||
if "PASSED" in out:
|
|
||||||
continue # All good
|
|
||||||
elif "FAILED" in out or "Pre-fail" in out or "SMART support is: Unavailable" in out:
|
|
||||||
alerts.append(f"[{node_name}] CRITICAL: SMART health issue on /dev/{dev}!\n{out}")
|
|
||||||
elif "Unknown" in out or not out:
|
|
||||||
alerts.append(f"[{node_name}] ERROR: SMART status unknown on /dev/{dev}. Output: {out}")
|
|
||||||
# Optionally scan for other SMART errors
|
|
||||||
return alerts
|
|
||||||
|
|
||||||
# ==== Remote service check ====
|
|
||||||
def check_remote_service(host, user, service, node_name):
|
|
||||||
cmd = f"systemctl is-active {service}"
|
|
||||||
out = ssh_command(host, user, cmd)
|
|
||||||
if out.strip() != "active":
|
|
||||||
return f"[{node_name}] CRITICAL: Service {service} not running!"
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== Remote RAID md0 check (robust for all mdstat layouts) ====
|
|
||||||
def check_remote_raid_md0(host, user, node_name):
|
|
||||||
try:
|
|
||||||
import re
|
|
||||||
ssh = paramiko.SSHClient()
|
|
||||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
||||||
ssh.connect(hostname=host, username=user, timeout=10)
|
|
||||||
stdin, stdout, stderr = ssh.exec_command("cat /proc/mdstat")
|
|
||||||
mdstat = stdout.read().decode()
|
|
||||||
|
|
||||||
# Find the block for md0 and look for the [UU_] status
|
|
||||||
lines = mdstat.splitlines()
|
|
||||||
status = None
|
|
||||||
inside_md0 = False
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith("md0"):
|
|
||||||
inside_md0 = True
|
|
||||||
elif inside_md0:
|
|
||||||
m = re.search(r"\[(U|_)+\]", line)
|
|
||||||
if m:
|
|
||||||
status = m.group(0)
|
|
||||||
break
|
|
||||||
# Stop searching if we hit a blank line or another array
|
|
||||||
if line.strip() == "" or ":" in line:
|
|
||||||
break
|
|
||||||
|
|
||||||
ssh.close()
|
|
||||||
|
|
||||||
if status is None:
|
|
||||||
return f"[{node_name}] CRITICAL: /dev/md0 RAID status string not found!"
|
|
||||||
if "_" in status:
|
|
||||||
return f"[{node_name}] WARNING: /dev/md0 RAID degraded! Status: {status}"
|
|
||||||
# All U's means all disks up
|
|
||||||
return None
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return f"[{node_name}] ERROR: Could not check RAID health remotely: {e}"
|
return ("critical", f"[{node['name']}] ERROR: Could not check rclone health: {str(e)}")
|
||||||
|
return None
|
||||||
|
|
||||||
# ==== Remote log scan ====
|
# ==== Remote log scan ====
|
||||||
def check_remote_logs(host, user, node_name):
|
def check_remote_logs(host, user, node_name):
|
||||||
@ -180,72 +82,108 @@ def check_remote_logs(host, user, node_name):
|
|||||||
out = ssh_command(host, user, cmd)
|
out = ssh_command(host, user, cmd)
|
||||||
lines = out.split("\n")
|
lines = out.split("\n")
|
||||||
for pattern in LOG_PATTERNS:
|
for pattern in LOG_PATTERNS:
|
||||||
if any(pattern in line for line in lines):
|
for line in lines:
|
||||||
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
if pattern in line and not any(suppress in line for suppress in SUPPRESSED_PATTERNS):
|
||||||
|
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
||||||
return alerts
|
return alerts
|
||||||
|
|
||||||
# ==== Remote PostgreSQL replication check ====
|
# ==== Main Routine ====
|
||||||
def check_replication(host, node_name):
|
|
||||||
try:
|
|
||||||
import psycopg2
|
|
||||||
conn = psycopg2.connect(host=host, dbname="postgres", user="postgres", connect_timeout=5)
|
|
||||||
cur = conn.cursor()
|
|
||||||
cur.execute("SELECT pg_is_in_recovery();")
|
|
||||||
is_replica = cur.fetchone()[0]
|
|
||||||
if is_replica:
|
|
||||||
cur.execute("SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;")
|
|
||||||
lag = cur.fetchone()[0]
|
|
||||||
if lag is None:
|
|
||||||
return f"[{node_name}] CRITICAL: Standby not streaming! Replication down."
|
|
||||||
elif lag > 10:
|
|
||||||
return f"[{node_name}] WARNING: Replication lag is {lag} seconds."
|
|
||||||
cur.close()
|
|
||||||
conn.close()
|
|
||||||
except Exception as e:
|
|
||||||
return f"[{node_name}] ERROR: Could not check replication: {e}"
|
|
||||||
return None
|
|
||||||
|
|
||||||
# ==== Main routine ====
|
|
||||||
def main():
|
def main():
|
||||||
problems = []
|
critical_problems = []
|
||||||
|
warning_problems = []
|
||||||
|
node_status = {}
|
||||||
|
|
||||||
# Multi-node checks
|
|
||||||
for node in NODES:
|
for node in NODES:
|
||||||
# All checks via SSH
|
status = "✅ Healthy"
|
||||||
|
|
||||||
|
if "rclone" in node.get("services", []):
|
||||||
|
res = check_rclone_health(node)
|
||||||
|
if res:
|
||||||
|
level, msg = res
|
||||||
|
if level == "critical":
|
||||||
|
critical_problems.append(msg)
|
||||||
|
status = "🚨 Critical"
|
||||||
|
else:
|
||||||
|
warning_problems.append(msg)
|
||||||
|
if status != "🚨 Critical":
|
||||||
|
status = "⚠️ Warning"
|
||||||
|
|
||||||
for disk in node["disks"]:
|
for disk in node["disks"]:
|
||||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||||
if res: problems.append(res)
|
if res:
|
||||||
# SMART check for all disks on this node
|
if "CRITICAL" in res:
|
||||||
smart_alerts = check_remote_smart(node["host"], node["ssh_user"], node["name"])
|
critical_problems.append(res)
|
||||||
if smart_alerts:
|
status = "🚨 Critical"
|
||||||
problems.extend(smart_alerts)
|
elif "WARNING" in res and status != "🚨 Critical":
|
||||||
|
warning_problems.append(res)
|
||||||
|
status = "⚠️ Warning"
|
||||||
|
|
||||||
for svc in node["services"]:
|
for svc in node["services"]:
|
||||||
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
||||||
if res: problems.append(res)
|
if res:
|
||||||
# Replication check
|
if "CRITICAL" in res:
|
||||||
|
critical_problems.append(res)
|
||||||
|
status = "🚨 Critical"
|
||||||
|
elif "WARNING" in res and status != "🚨 Critical":
|
||||||
|
warning_problems.append(res)
|
||||||
|
status = "⚠️ Warning"
|
||||||
|
|
||||||
if node.get("db"):
|
if node.get("db"):
|
||||||
res = check_replication(node["host"], node["name"])
|
res = check_replication(node["host"], node["name"])
|
||||||
if res: problems.append(res)
|
if res:
|
||||||
# RAID check, only for nodes with "raid": True
|
critical_problems.append(res)
|
||||||
|
status = "🚨 Critical"
|
||||||
|
|
||||||
if node.get("raid", False):
|
if node.get("raid", False):
|
||||||
raid_health = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
res = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
||||||
if raid_health:
|
if res:
|
||||||
problems.append(raid_health)
|
if "CRITICAL" in res:
|
||||||
# Log scan
|
critical_problems.append(res)
|
||||||
|
status = "🚨 Critical"
|
||||||
|
elif status != "🚨 Critical":
|
||||||
|
warning_problems.append(res)
|
||||||
|
status = "⚠️ Warning"
|
||||||
|
|
||||||
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
||||||
if logs:
|
if logs:
|
||||||
problems.extend(logs)
|
warning_problems.extend(logs)
|
||||||
|
if status != "🚨 Critical":
|
||||||
|
status = "⚠️ Warning"
|
||||||
|
|
||||||
# Send DM if anything wrong
|
node_status[node["name"]] = status
|
||||||
if problems:
|
|
||||||
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
msg = f"🚨 Genesis Radio Multi-Node Healthcheck {now} 🚨\n" + "\n".join(problems)
|
|
||||||
|
if critical_problems:
|
||||||
|
formatted = "\n".join(f"- {choose_emoji(p)} {p}" for p in critical_problems)
|
||||||
|
msg = f"🚨 Genesis Radio Critical Healthcheck {now} 🚨\n⚡ {len(critical_problems)} critical issues found:\n{formatted}"
|
||||||
print(msg)
|
print(msg)
|
||||||
mastodon_dm(msg)
|
mastodon_dm(msg)
|
||||||
else:
|
|
||||||
print("OK: All checks passed.")
|
if warning_problems:
|
||||||
|
formatted = "\n".join(f"- {choose_emoji(p)} {p}" for p in warning_problems)
|
||||||
|
msg = f"⚠️ Genesis Radio Warning Healthcheck {now} ⚠️\n⚡ {len(warning_problems)} warnings found:\n{formatted}"
|
||||||
|
print(msg)
|
||||||
|
mastodon_dm(msg)
|
||||||
|
|
||||||
|
if not critical_problems and not warning_problems:
|
||||||
|
msg = f"✅ Genesis Radio Healthcheck {now}: All systems normal."
|
||||||
|
print(msg)
|
||||||
|
mastodon_dm(msg)
|
||||||
|
|
||||||
|
# Write healthcheck HTML dashboard
|
||||||
|
with open(HEALTHCHECK_HTML, "w") as f:
|
||||||
|
f.write("<html><head><title>Genesis Radio Healthcheck</title><meta http-equiv='refresh' content='60'></head><body>")
|
||||||
|
f.write(f"<h1>Genesis Radio System Health</h1>")
|
||||||
|
f.write(f"<p>Last Checked: {now}</p>")
|
||||||
|
f.write("<table border='1' cellpadding='5' style='border-collapse: collapse;'>
|
||||||
|
<tr><th>System</th><th>Status</th></tr>")
|
||||||
|
for node, status in node_status.items():
|
||||||
|
color = 'green' if 'Healthy' in status else ('orange' if 'Warning' in status else 'red')
|
||||||
|
f.write(f"<tr><td>{node}</td><td style='color:{color};'>{status}</td></tr>")
|
||||||
|
f.write("</table></body></html>")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -1,102 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# ---- CONFIG ----
|
|
||||||
PG_REMOTE_USER="postgres"
|
|
||||||
PG_REMOTE_HOST="cluster.db2.genesishostingtechnologies.com"
|
|
||||||
PG_REMOTE_PORT="5432"
|
|
||||||
PG_LOCAL_PORT="5432"
|
|
||||||
DUMP_DIR="/tmp/pgbackup_verify"
|
|
||||||
BACKUP_TARGET="root@backup.sshjunkie.com:/mnt/backup/pgdumps"
|
|
||||||
CC_TARGET="doc@clustercontrol.sshjunkie.com:/home/doc/backups"
|
|
||||||
DBS=("mastodon_production" "giteaprod" "hostingtootdb" "radiotootdb")
|
|
||||||
LOGFILE="$DUMP_DIR/verify_log_$(date +%Y%m%d_%H%M%S).txt"
|
|
||||||
mkdir -p "$DUMP_DIR"
|
|
||||||
|
|
||||||
# ==== Mastodon DM function ====
|
|
||||||
mastodon_alert() {
|
|
||||||
local msg="$1"
|
|
||||||
curl -sS -X POST "https://chatwithus.live/api/v1/statuses" \
|
|
||||||
-H "Authorization: Bearer rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw" \
|
|
||||||
--data-urlencode "status=$msg" \
|
|
||||||
--data "visibility=direct" \
|
|
||||||
--data "in_reply_to_account_id=114386383616633367" >/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
ALL_OK=true
|
|
||||||
UPLOAD_LIST=()
|
|
||||||
|
|
||||||
for DB in "${DBS[@]}"; do
|
|
||||||
echo "=== [$(date)] Dumping $DB from $PG_REMOTE_HOST ===" | tee -a "$LOGFILE"
|
|
||||||
DUMPFILE="$DUMP_DIR/${DB}_$(date +%Y%m%d_%H%M%S).sql"
|
|
||||||
|
|
||||||
# Dump from remote
|
|
||||||
pg_dump -h "$PG_REMOTE_HOST" -p "$PG_REMOTE_PORT" -U "$PG_REMOTE_USER" -d "$DB" > "$DUMPFILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Failed to dump $DB! Skipping upload." | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Could not dump $DB from $PG_REMOTE_HOST on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Restore/verify on Krang
|
|
||||||
TESTDB="verify_${DB}_$RANDOM"
|
|
||||||
echo "Creating test database $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres createdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Failed to create $TESTDB!" | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Could not create test DB $TESTDB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Restoring to $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" < "$DUMPFILE"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "[FAIL] Restore failed for $DB!" | tee -a "$LOGFILE"
|
|
||||||
sudo -u postgres dropdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Restore failed for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Quick table listing for sanity
|
|
||||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" -c "\dt" | tee -a "$LOGFILE"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "[PASS] $DB: Dump and restore OK." | tee -a "$LOGFILE"
|
|
||||||
UPLOAD_LIST+=("$DUMPFILE")
|
|
||||||
else
|
|
||||||
echo "[FAIL] $DB: Test query failed!" | tee -a "$LOGFILE"
|
|
||||||
ALL_OK=false
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Test query failed for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo -u postgres dropdb -p "$PG_LOCAL_PORT" "$TESTDB"
|
|
||||||
echo "Cleaned up $TESTDB" | tee -a "$LOGFILE"
|
|
||||||
echo "" | tee -a "$LOGFILE"
|
|
||||||
done
|
|
||||||
|
|
||||||
if $ALL_OK && [ "${#UPLOAD_LIST[@]}" -eq "${#DBS[@]}" ]; then
|
|
||||||
echo "All dumps verified, sending to $BACKUP_TARGET" | tee -a "$LOGFILE"
|
|
||||||
scp "${UPLOAD_LIST[@]}" "$BACKUP_TARGET"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Uploads to thevault successful." | tee -a "$LOGFILE"
|
|
||||||
# --NEW: Also upload to ClusterControl controller
|
|
||||||
echo "Uploading to ClusterControl controller at $CC_TARGET" | tee -a "$LOGFILE"
|
|
||||||
scp "${UPLOAD_LIST[@]}" "$CC_TARGET"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Uploads to ClusterControl successful." | tee -a "$LOGFILE"
|
|
||||||
rm -f "${UPLOAD_LIST[@]}"
|
|
||||||
else
|
|
||||||
echo "[WARN] Upload to ClusterControl controller failed!" | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "⚠️ Database backup verified, but upload to ClusterControl at $CC_TARGET failed on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "[FAIL] Upload to thevault failed!" | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: Upload to $BACKUP_TARGET failed on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not all backups verified! Nothing uploaded." | tee -a "$LOGFILE"
|
|
||||||
mastodon_alert "🚨 Database backup/verify FAILED: One or more DBs failed verification on $(hostname) at $(date). See log: $LOGFILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "DONE. Log: $LOGFILE"
|
|
@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
BIN_DIR="/usr/local/bin"
|
|
||||||
|
|
||||||
echo "📦 Deploying Genesis Tools..."
|
|
||||||
|
|
||||||
declare -a SCRIPTS=(
|
|
||||||
"venv_launcher.sh"
|
|
||||||
"KodakMomentV1.sh"
|
|
||||||
)
|
|
||||||
|
|
||||||
for script in "${SCRIPTS[@]}"; do
|
|
||||||
if [[ -f "$script" ]]; then
|
|
||||||
echo "🔁 Installing $script to $BIN_DIR..."
|
|
||||||
cp -f "$script" "$BIN_DIR/${script%.sh}"
|
|
||||||
chmod +x "$BIN_DIR/${script%.sh}"
|
|
||||||
else
|
|
||||||
echo "⚠️ Warning: $script not found in $(pwd)"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "✅ Deployment complete. Try running: venv_launcher --check"
|
|
@ -1,130 +0,0 @@
|
|||||||
import os
|
|
||||||
import time
|
|
||||||
import re
|
|
||||||
from urllib.parse import quote
|
|
||||||
from watchdog.observers import Observer
|
|
||||||
from watchdog.events import FileSystemEventHandler
|
|
||||||
from mastodon import Mastodon, MastodonError
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
SHOW_NAMES = {
|
|
||||||
"retro": "The Retro Breakfast",
|
|
||||||
"90slunch": "90s Lunch",
|
|
||||||
"80sdimension": "80s Dimension",
|
|
||||||
"au": "Alternate Universe",
|
|
||||||
"bootycalls": "Booty Calls",
|
|
||||||
"chaos": "Chaos",
|
|
||||||
"gog": "Gog",
|
|
||||||
"housecalls": "House Calls",
|
|
||||||
"mac": "March of the Mac",
|
|
||||||
"yes": "Yes Series",
|
|
||||||
"welch": "Bob Welch Special",
|
|
||||||
"wakeman": "Caped Crusader: The Rick Wakeman Retrospective",
|
|
||||||
"mayhem": "Mayhem",
|
|
||||||
"pisces": "Pisces Playhouse",
|
|
||||||
"rockvault": "Rock Vault",
|
|
||||||
"sonicrevolt": "Sonic Revolt",
|
|
||||||
"tunefuse": "TuneFuse",
|
|
||||||
"wwo80s": "WWO 80s",
|
|
||||||
"yacht": "Yacht Rock",
|
|
||||||
}
|
|
||||||
|
|
||||||
BASE_URL = "http://www.server.genesis-radio.net:5020"
|
|
||||||
INSTANCE_URL = "https://chatwithus.live"
|
|
||||||
ACCESS_TOKEN = "y6cAV7FvTqtJzTHe8QoCO5JSlugIwHgy7zki6Lb5xns"
|
|
||||||
|
|
||||||
mastodon = Mastodon(
|
|
||||||
access_token=ACCESS_TOKEN,
|
|
||||||
api_base_url=INSTANCE_URL
|
|
||||||
)
|
|
||||||
|
|
||||||
# Keep track of files processed and the time they were processed
|
|
||||||
processed_files = {}
|
|
||||||
DEBOUNCE_TIME = 5 # Time in seconds to wait before processing the same file again
|
|
||||||
|
|
||||||
# Improved show name extraction based on directory aliasing
|
|
||||||
def extract_show_name(file_path):
|
|
||||||
parent_dir = os.path.basename(os.path.dirname(file_path))
|
|
||||||
return SHOW_NAMES.get(parent_dir, "Genesis Radio")
|
|
||||||
|
|
||||||
class FileEventHandler(FileSystemEventHandler):
|
|
||||||
def on_created(self, event):
|
|
||||||
if event.is_directory:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Only process .mp3 files
|
|
||||||
if not event.src_path.endswith('.mp3'):
|
|
||||||
print(f"Skipping non-mp3 file: {event.src_path}")
|
|
||||||
return
|
|
||||||
|
|
||||||
current_time = time.time() # Get the current time in seconds
|
|
||||||
|
|
||||||
# If the file has been processed within the debounce window, skip it
|
|
||||||
if event.src_path in processed_files:
|
|
||||||
last_processed_time = processed_files[event.src_path]
|
|
||||||
if current_time - last_processed_time < DEBOUNCE_TIME:
|
|
||||||
print(f"Skipping duplicate event for file: {event.src_path}")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Update the time of processing for this file
|
|
||||||
processed_files[event.src_path] = current_time
|
|
||||||
|
|
||||||
# Debugging: Confirm file creation detection
|
|
||||||
print(f"File detected: {event.src_path}")
|
|
||||||
|
|
||||||
file_path = event.src_path
|
|
||||||
filename = os.path.basename(file_path)
|
|
||||||
show_name = extract_show_name(file_path)
|
|
||||||
|
|
||||||
# URL encode the filename and parent directory
|
|
||||||
encoded_filename = quote(filename, safe='')
|
|
||||||
parent_dir = os.path.basename(os.path.dirname(file_path))
|
|
||||||
encoded_parent_dir = quote(parent_dir, safe='')
|
|
||||||
|
|
||||||
# Construct the file URL to go to the new path format
|
|
||||||
file_url = f"{BASE_URL}/show/{encoded_parent_dir}/{encoded_filename}"
|
|
||||||
|
|
||||||
# Constructing a cleaner and more engaging Mastodon message
|
|
||||||
message = f"🎉 New Archive Alert! 🎧 {show_name}'s latest episode is now available! 🎶\n\nTune in: {file_url}"
|
|
||||||
|
|
||||||
# Debugging: Check the message before posting
|
|
||||||
print(f"Message to post: {message}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
mastodon.status_post(message)
|
|
||||||
print("✅ Successfully posted.")
|
|
||||||
except MastodonError as e:
|
|
||||||
print(f"❌ Mastodon API Error: {e}")
|
|
||||||
print(f"Full error: {e.args}")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
observer = Observer()
|
|
||||||
handler = FileEventHandler()
|
|
||||||
|
|
||||||
valid_directories = []
|
|
||||||
for directory in SHOW_NAMES.keys():
|
|
||||||
directory_path = os.path.join("/mnt/convert/archives", directory)
|
|
||||||
if os.path.exists(directory_path):
|
|
||||||
print(f"✅ Monitoring: {directory_path}")
|
|
||||||
valid_directories.append(directory_path)
|
|
||||||
else:
|
|
||||||
print(f"❌ Skipping non-existent directory: {directory_path}")
|
|
||||||
|
|
||||||
if not valid_directories:
|
|
||||||
print("❌ No valid directories found to monitor. Exiting.")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
for directory in valid_directories:
|
|
||||||
observer.schedule(handler, directory, recursive=False)
|
|
||||||
|
|
||||||
print("🔔 Genesis Radio Mastodon Notifier running. Press Ctrl+C to stop.")
|
|
||||||
observer.start()
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
time.sleep(1)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("\n🔒 Shutting down observer...")
|
|
||||||
observer.stop()
|
|
||||||
|
|
||||||
observer.join()
|
|
@ -7,6 +7,7 @@ declare -A VENV_APPS=(
|
|||||||
[recordtheshow]="app.py"
|
[recordtheshow]="app.py"
|
||||||
[radiotoot]="app.py"
|
[radiotoot]="app.py"
|
||||||
[hostingtoot]="app.py"
|
[hostingtoot]="app.py"
|
||||||
|
[dbcheck]="dbcheck1.py"
|
||||||
)
|
)
|
||||||
|
|
||||||
SCRIPT_BASE="/home/doc/genesis-tools"
|
SCRIPT_BASE="/home/doc/genesis-tools"
|
||||||
|
@ -7,7 +7,7 @@ from datetime import datetime
|
|||||||
# Function to record the radio show using ffmpeg
|
# Function to record the radio show using ffmpeg
|
||||||
def record_show(folder_name, duration, filename_prefix):
|
def record_show(folder_name, duration, filename_prefix):
|
||||||
# Set the working directory for the recording
|
# Set the working directory for the recording
|
||||||
working_directory = "home/doc/Genesis Radio"
|
working_directory = "/home/doc/Genesis"
|
||||||
|
|
||||||
# Ensure the folder exists in archives with the prefix as the folder name
|
# Ensure the folder exists in archives with the prefix as the folder name
|
||||||
archives_directory = "/mnt/archives"
|
archives_directory = "/mnt/archives"
|
||||||
|
@ -234,13 +234,13 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"testshow": {
|
"testshow": {
|
||||||
"recording": false,
|
"recording": true,
|
||||||
"duration": 900,
|
"duration": 900,
|
||||||
"schedule": [
|
"schedule": [
|
||||||
{
|
{
|
||||||
"day": "Wednesday",
|
"day": "Saturday",
|
||||||
"time": "08:45"
|
"time": "14:17"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user