Compare commits
4 Commits
3f187aebb1
...
7ea19b66d7
Author | SHA1 | Date | |
---|---|---|---|
7ea19b66d7 | |||
78fed9b3ac | |||
16fd65acc4 | |||
4924d79c07 |
68
OPS.md
Normal file
68
OPS.md
Normal file
@ -0,0 +1,68 @@
|
||||
# 🚀 Genesis Radio - Healthcheck Response Runbook
|
||||
|
||||
## Purpose
|
||||
When an alert fires (Critical or Warning), this guide tells you what to do so that anyone can react quickly, even if the admin is not available.
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ How to Use
|
||||
- Every Mastodon DM or Dashboard alert gives you a **timestamp**, **server name**, and **issue**.
|
||||
- Look up the type of issue in the table below.
|
||||
- Follow the recommended action immediately.
|
||||
|
||||
---
|
||||
|
||||
## 📋 Quick Response Table
|
||||
|
||||
| Type of Alert | Emoji | What it Means | Immediate Action |
|
||||
|:---|:---|:---|:---|
|
||||
| Critical Service Failure | 🔚 | A key service (like Mastodon, MinIO) is **down** | SSH into the server, try `systemctl restart <service>`. |
|
||||
| Disk Filling Up | 📈 | Disk space critically low (under 10%) | SSH in and delete old logs/backups. Free up space **immediately**. |
|
||||
| Rclone Mount Error | 🐢 | Cache failed, mount not healthy | Restart the rclone mount process. (Usually a `systemctl restart rclone@<mount>`, or remount manually.) |
|
||||
| PostgreSQL Replication Lag | 💥 | Database replicas are falling behind | Check database health. Restart replication if needed. Alert admin if lag is >5 minutes. |
|
||||
| RAID Degraded | 🧸 | RAID array is degraded (missing a disk) | Open server console. Identify failed drive. Replace drive if possible. Otherwise escalate immediately. |
|
||||
| Log File Warnings | ⚠️ | Error patterns found in logs | Investigate. If system is healthy, **log it for later**. If errors worsen, escalate. |
|
||||
|
||||
---
|
||||
|
||||
## 💻 If Dashboard Shows
|
||||
- ✅ **All Green** = No action needed.
|
||||
- ⚠️ **Warnings** = Investigate soon. Not urgent unless repeated.
|
||||
- 🚨 **Criticals** = Drop everything and act immediately.
|
||||
|
||||
---
|
||||
|
||||
## 🛡️ Emergency Contacts
|
||||
| Role | Name | Contact |
|
||||
|:----|:-----|:--------|
|
||||
| Primary Admin | (You) | [YOUR CONTACT INFO] |
|
||||
| Secondary | Brice | [BRICE CONTACT INFO] |
|
||||
|
||||
(Replace placeholders with actual contact details.)
|
||||
|
||||
---
|
||||
|
||||
## ✍️ Example Cheat Sheet for Brice
|
||||
|
||||
**Sample Mastodon DM:**
|
||||
> 🚨 Genesis Radio Critical Healthcheck 2025-04-28 14:22:33 🚨
|
||||
> ⚡ 1 critical issue found:
|
||||
> - 🔚 [mastodon] CRITICAL: Service mastodon-web not running!
|
||||
|
||||
**Brice should:**
|
||||
1. SSH into Mastodon server.
|
||||
2. Run `systemctl restart mastodon-web`.
|
||||
3. Confirm the service is running again.
|
||||
4. If it fails or stays down, escalate to admin.
|
||||
|
||||
---
|
||||
|
||||
# 🌟 TL;DR
|
||||
- 🚨 Criticals: Act immediately.
|
||||
- ⚠️ Warnings: Investigate soon.
|
||||
- ✅ Healthy: No action needed.
|
||||
|
||||
---
|
||||
|
||||
**Stay sharp. Our uptime and service quality depend on quick, calm responses!** 🛡️💪
|
||||
|
251
dbcheck/dbcheck1.py
Normal file
251
dbcheck/dbcheck1.py
Normal file
@ -0,0 +1,251 @@
|
||||
import os
|
||||
import requests
|
||||
import datetime
|
||||
import paramiko
|
||||
|
||||
# ==== CONFIG ====
|
||||
MASTODON_INSTANCE = "https://chatwithus.live"
|
||||
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
||||
MASTODON_USER_ID = "114386383616633367"
|
||||
|
||||
DISK_WARN_THRESHOLD = 10 # percent free
|
||||
INODE_WARN_THRESHOLD = 10 # percent free
|
||||
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
||||
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
||||
|
||||
NODES = [
|
||||
{
|
||||
"name": "shredder",
|
||||
"host": "38.102.127.171",
|
||||
"ssh_user": "doc",
|
||||
"services": ["minio.service"],
|
||||
"disks": ["/", "/mnt/raid5"],
|
||||
"type": "remote",
|
||||
"db": False,
|
||||
"raid": True
|
||||
},
|
||||
{
|
||||
"name": "mastodon",
|
||||
"host": "chatwithus.live", # Now points directly to your Mastodon server
|
||||
"ssh_user": "root",
|
||||
"services": ["nginx", "mastodon-web"],
|
||||
"disks": ["/"],
|
||||
"type": "remote",
|
||||
"db": False,
|
||||
"raid": False
|
||||
},
|
||||
{
|
||||
"name": "db1",
|
||||
"host": "cluster.db1.genesishostingtechnologies.com",
|
||||
"ssh_user": "doc",
|
||||
"services": ["postgresql@16-main.service"],
|
||||
"disks": ["/", "/var/lib/postgresql"],
|
||||
"type": "remote",
|
||||
"db": True,
|
||||
"raid": False
|
||||
},
|
||||
{
|
||||
"name": "db2",
|
||||
"host": "cluster.db2.genesishostingtechnologies.com",
|
||||
"ssh_user": "doc",
|
||||
"services": ["postgresql@16-main.service"],
|
||||
"disks": ["/", "/var/lib/postgresql"],
|
||||
"type": "remote",
|
||||
"db": True,
|
||||
"raid": False
|
||||
},
|
||||
]
|
||||
|
||||
# ==== Mastodon DM function ====
|
||||
def mastodon_dm(message):
|
||||
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
||||
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
||||
payload = {
|
||||
"status": message,
|
||||
"visibility": "direct",
|
||||
"in_reply_to_account_id": MASTODON_USER_ID
|
||||
}
|
||||
resp = requests.post(url, headers=headers, data=payload)
|
||||
if resp.status_code != 200:
|
||||
print(f"Failed to send Mastodon DM: {resp.text}")
|
||||
|
||||
# ==== SSH command runner ====
|
||||
def ssh_command(host, user, cmd):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(hostname=host, username=user, timeout=10)
|
||||
stdin, stdout, stderr = ssh.exec_command(cmd)
|
||||
out = stdout.read().decode().strip()
|
||||
ssh.close()
|
||||
return out
|
||||
|
||||
# ==== Robust Remote disk check ====
|
||||
def check_remote_disk(host, user, path, node_name):
|
||||
cmd = f"df --output=pcent {path} | tail -1 | tr -dc '0-9'"
|
||||
out = ssh_command(host, user, cmd)
|
||||
if not out:
|
||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check disk usage."
|
||||
try:
|
||||
percent = int(out)
|
||||
except ValueError:
|
||||
return f"[{node_name}] ERROR: Could not parse disk usage for {path}. Output was: '{out}'"
|
||||
if percent > 90:
|
||||
return f"[{node_name}] WARNING: Only {100-percent}% disk free on {path}."
|
||||
# Inode check
|
||||
cmd_inode = f"df --output=ipcent {path} | tail -1 | tr -dc '0-9'"
|
||||
out_inode = ssh_command(host, user, cmd_inode)
|
||||
if not out_inode:
|
||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check inode usage."
|
||||
try:
|
||||
percent_inode = int(out_inode)
|
||||
except ValueError:
|
||||
return f"[{node_name}] ERROR: Could not parse inode usage for {path}. Output was: '{out_inode}'"
|
||||
if percent_inode > 90:
|
||||
return f"[{node_name}] WARNING: Only {100-percent_inode}% inodes free on {path}."
|
||||
return None
|
||||
|
||||
# ==== SMART health check (for all disks) ====
|
||||
def check_remote_smart(host, user, node_name):
|
||||
alerts = []
|
||||
# List block devices
|
||||
cmd_lsblk = "lsblk -ndo NAME,TYPE | awk '$2==\"disk\" {print $1}'"
|
||||
devs = ssh_command(host, user, cmd_lsblk)
|
||||
if not devs:
|
||||
alerts.append(f"[{node_name}] ERROR: Could not list block devices for SMART check.")
|
||||
return alerts
|
||||
for dev in devs.split():
|
||||
smart_cmd = f"sudo smartctl -H /dev/{dev}"
|
||||
out = ssh_command(host, user, smart_cmd)
|
||||
if "PASSED" in out:
|
||||
continue # All good
|
||||
elif "FAILED" in out or "Pre-fail" in out or "SMART support is: Unavailable" in out:
|
||||
alerts.append(f"[{node_name}] CRITICAL: SMART health issue on /dev/{dev}!\n{out}")
|
||||
elif "Unknown" in out or not out:
|
||||
alerts.append(f"[{node_name}] ERROR: SMART status unknown on /dev/{dev}. Output: {out}")
|
||||
# Optionally scan for other SMART errors
|
||||
return alerts
|
||||
|
||||
# ==== Remote service check ====
|
||||
def check_remote_service(host, user, service, node_name):
|
||||
cmd = f"systemctl is-active {service}"
|
||||
out = ssh_command(host, user, cmd)
|
||||
if out.strip() != "active":
|
||||
return f"[{node_name}] CRITICAL: Service {service} not running!"
|
||||
return None
|
||||
|
||||
# ==== Remote RAID md0 check (robust for all mdstat layouts) ====
|
||||
def check_remote_raid_md0(host, user, node_name):
|
||||
try:
|
||||
import re
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(hostname=host, username=user, timeout=10)
|
||||
stdin, stdout, stderr = ssh.exec_command("cat /proc/mdstat")
|
||||
mdstat = stdout.read().decode()
|
||||
|
||||
# Find the block for md0 and look for the [UU_] status
|
||||
lines = mdstat.splitlines()
|
||||
status = None
|
||||
inside_md0 = False
|
||||
for line in lines:
|
||||
if line.startswith("md0"):
|
||||
inside_md0 = True
|
||||
elif inside_md0:
|
||||
m = re.search(r"\[(U|_)+\]", line)
|
||||
if m:
|
||||
status = m.group(0)
|
||||
break
|
||||
# Stop searching if we hit a blank line or another array
|
||||
if line.strip() == "" or ":" in line:
|
||||
break
|
||||
|
||||
ssh.close()
|
||||
|
||||
if status is None:
|
||||
return f"[{node_name}] CRITICAL: /dev/md0 RAID status string not found!"
|
||||
if "_" in status:
|
||||
return f"[{node_name}] WARNING: /dev/md0 RAID degraded! Status: {status}"
|
||||
# All U's means all disks up
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
return f"[{node_name}] ERROR: Could not check RAID health remotely: {e}"
|
||||
|
||||
# ==== Remote log scan ====
|
||||
def check_remote_logs(host, user, node_name):
|
||||
alerts = []
|
||||
for log in LOG_FILES:
|
||||
cmd = f"tail -500 {log}"
|
||||
try:
|
||||
out = ssh_command(host, user, cmd)
|
||||
lines = out.split("\n")
|
||||
for pattern in LOG_PATTERNS:
|
||||
if any(pattern in line for line in lines):
|
||||
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
||||
except Exception as e:
|
||||
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
||||
return alerts
|
||||
|
||||
# ==== Remote PostgreSQL replication check ====
|
||||
def check_replication(host, node_name):
|
||||
try:
|
||||
import psycopg2
|
||||
conn = psycopg2.connect(host=host, dbname="postgres", user="postgres", connect_timeout=5)
|
||||
cur = conn.cursor()
|
||||
cur.execute("SELECT pg_is_in_recovery();")
|
||||
is_replica = cur.fetchone()[0]
|
||||
if is_replica:
|
||||
cur.execute("SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;")
|
||||
lag = cur.fetchone()[0]
|
||||
if lag is None:
|
||||
return f"[{node_name}] CRITICAL: Standby not streaming! Replication down."
|
||||
elif lag > 10:
|
||||
return f"[{node_name}] WARNING: Replication lag is {lag} seconds."
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
return f"[{node_name}] ERROR: Could not check replication: {e}"
|
||||
return None
|
||||
|
||||
# ==== Main routine ====
|
||||
def main():
|
||||
problems = []
|
||||
|
||||
# Multi-node checks
|
||||
for node in NODES:
|
||||
# All checks via SSH
|
||||
for disk in node["disks"]:
|
||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||
if res: problems.append(res)
|
||||
# SMART check for all disks on this node
|
||||
smart_alerts = check_remote_smart(node["host"], node["ssh_user"], node["name"])
|
||||
if smart_alerts:
|
||||
problems.extend(smart_alerts)
|
||||
for svc in node["services"]:
|
||||
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
||||
if res: problems.append(res)
|
||||
# Replication check
|
||||
if node.get("db"):
|
||||
res = check_replication(node["host"], node["name"])
|
||||
if res: problems.append(res)
|
||||
# RAID check, only for nodes with "raid": True
|
||||
if node.get("raid", False):
|
||||
raid_health = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
||||
if raid_health:
|
||||
problems.append(raid_health)
|
||||
# Log scan
|
||||
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
||||
if logs:
|
||||
problems.extend(logs)
|
||||
|
||||
# Send DM if anything wrong
|
||||
if problems:
|
||||
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
msg = f"🚨 Genesis Radio Multi-Node Healthcheck {now} 🚨\n" + "\n".join(problems)
|
||||
print(msg)
|
||||
mastodon_dm(msg)
|
||||
else:
|
||||
print("OK: All checks passed.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
17
docker/archivecontrol/Dockerfile
Normal file
17
docker/archivecontrol/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
||||
# Use the official Python image
|
||||
FROM python:3.12-slim
|
||||
|
||||
# Set the working directory inside the container
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the application code into the container
|
||||
COPY . /app/
|
||||
|
||||
# Install dependencies from requirements.txt inside the container
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Expose the port your app will run on (adjust as needed)
|
||||
EXPOSE 5000
|
||||
|
||||
# Command to run the application (adjust script name)
|
||||
CMD ["python", "recordit2.py"]
|
55
docker/archivecontrol/docker-compose.yml
Normal file
55
docker/archivecontrol/docker-compose.yml
Normal file
@ -0,0 +1,55 @@
|
||||
version: '3'
|
||||
services:
|
||||
# Archive Control app
|
||||
archivecontrol:
|
||||
build: ./archivecontrol
|
||||
ports:
|
||||
- "5000:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
||||
|
||||
# Archive List app
|
||||
archivelist:
|
||||
build: ./archivelist
|
||||
ports:
|
||||
- "5001:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
||||
|
||||
# DB Check app
|
||||
dbcheck:
|
||||
build: ./dbcheck
|
||||
ports:
|
||||
- "5002:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
||||
|
||||
# Hosting Toot app
|
||||
hostingtoot:
|
||||
build: ./hostingtoot
|
||||
ports:
|
||||
- "5003:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
||||
|
||||
# Radio Toot app
|
||||
radiotoot:
|
||||
build: ./radiotoot
|
||||
ports:
|
||||
- "5004:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
||||
|
||||
# Text app
|
||||
text:
|
||||
build: ./text
|
||||
ports:
|
||||
- "5005:5000"
|
||||
restart: always
|
||||
environment:
|
||||
- ENV=production
|
160
docker/archivecontrol/recordit2.py
Normal file
160
docker/archivecontrol/recordit2.py
Normal file
@ -0,0 +1,160 @@
|
||||
import os
|
||||
from flask import Flask, render_template_string, send_from_directory, abort
|
||||
from mastodon import Mastodon
|
||||
|
||||
# === Configuration ===
|
||||
|
||||
ROOT_DIR = r"/mnt/archives" # Update this path to where your shows live
|
||||
ALLOWED_EXTENSIONS = {".mp3", ".wav", ".flac", ".m4a"}
|
||||
BANNER_FILENAMES = ["banner.jpg", "banner.png", "banner.jpeg"]
|
||||
|
||||
# Friendly display names
|
||||
DISPLAY_NAMES = {
|
||||
"80sdimension": "The 80s Dimension",
|
||||
"90slunch": "The 90s Lunch",
|
||||
"au": "Alternate Universe",
|
||||
"welch": "Bob Welch Retrospective",
|
||||
"bootycalls": "Booty Calls",
|
||||
"chaos": "The Chaos Bus",
|
||||
"mac": "Fleetwood Mac Retrospective",
|
||||
"gog": "The Good Ol Genesis",
|
||||
"housecalls": "House Calls",
|
||||
"pisces": "Pisces Playhouse",
|
||||
"retro": "The Retro Breakfast",
|
||||
"rockvault": "Rock Vault",
|
||||
"mayhem": "Rock and Roll Mayhem",
|
||||
"wakeman": "Rick Wakeman Retrospective",
|
||||
"sonicrevolt": "Sonic Revolt",
|
||||
"tunefuse": "TuneFuse",
|
||||
"wwo80s": "We Want Our 80s",
|
||||
"yacht": "Yacht Vibes Only",
|
||||
"yes": "Yes Retrospective",
|
||||
}
|
||||
|
||||
# === URLs for File Hosting ===
|
||||
|
||||
BASE_URL = "http://server.genesis-radio.net:5020" # This is the base URL where your files live (e.g., http://localhost:5000)
|
||||
|
||||
SERVER_URL = "http://genesis-radio.net" # This is the general server URL if you need it for anything else
|
||||
|
||||
# === Flask App ===
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
HOME_TEMPLATE = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Genesis Radio Archives</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; background-color: #111; color: #eee; margin: 2em; }
|
||||
h1 { font-size: 2em; color: #0ff; border-bottom: 2px solid #0ff; padding-bottom: 0.5em; }
|
||||
ul { list-style: none; padding-left: 0; }
|
||||
li { margin: 1em 0; }
|
||||
a { color: #0cf; font-size: 1.3em; text-decoration: none; font-weight: bold; }
|
||||
a:hover { text-decoration: underline; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Genesis Radio: Show Archives</h1>
|
||||
<ul>
|
||||
{% for show in shows %}
|
||||
<li><a href="{{ url_for('show_page', show_name=show) }}">{{ display_names.get(show, show) }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
SHOW_TEMPLATE = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{ show_name }} - Genesis Radio</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; background-color: #111; color: #eee; margin: 2em; }
|
||||
a { color: #0cf; text-decoration: none; }
|
||||
a:hover { text-decoration: underline; }
|
||||
h1 { color: #0ff; font-size: 1.8em; margin-bottom: 1em; border-bottom: 2px solid #0ff; padding-bottom: 0.3em; }
|
||||
.back { margin-bottom: 1.5em; display: inline-block; color: #0cf; }
|
||||
.audio-block { margin-bottom: 2em; }
|
||||
p { font-weight: bold; color: #fff; }
|
||||
audio { width: 100%; }
|
||||
.banner { width: 100%; max-height: 250px; object-fit: cover; margin-bottom: 1em; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<a href="{{ url_for('index') }}" class="back">← Back to shows</a>
|
||||
<h1>{{ display_names.get(show_name, show_name) }}</h1>
|
||||
{% if banner %}
|
||||
<img src="{{ url_for('show_banner', show_name=show_name, banner_name=banner) }}" class="banner">
|
||||
{% endif %}
|
||||
{% for file in files %}
|
||||
<div class="audio-block">
|
||||
<p>{{ file }}</p>
|
||||
<audio controls>
|
||||
<source src="{{ url_for('stream_file', show=show_name, filename=file) }}" type="audio/mpeg">
|
||||
Your browser does not support the audio element.
|
||||
</audio>
|
||||
</div>
|
||||
{% else %}
|
||||
<p>No audio files found for this show.</p>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# === Utility Functions ===
|
||||
|
||||
def list_shows(base_dir):
|
||||
return sorted([d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d))])
|
||||
|
||||
def list_audio_files(show_dir):
|
||||
return sorted([
|
||||
f for f in os.listdir(show_dir)
|
||||
if os.path.splitext(f)[1].lower() in ALLOWED_EXTENSIONS
|
||||
])
|
||||
|
||||
def find_banner(show_dir):
|
||||
for name in BANNER_FILENAMES:
|
||||
if os.path.isfile(os.path.join(show_dir, name)):
|
||||
return name
|
||||
return None
|
||||
|
||||
# === Flask Routes ===
|
||||
|
||||
@app.route("/")
|
||||
def index():
|
||||
shows = list_shows(ROOT_DIR)
|
||||
return render_template_string(HOME_TEMPLATE, shows=shows, display_names=DISPLAY_NAMES)
|
||||
|
||||
@app.route("/show/<show_name>")
|
||||
def show_page(show_name):
|
||||
show_path = os.path.join(ROOT_DIR, show_name)
|
||||
if not os.path.isdir(show_path):
|
||||
abort(404)
|
||||
files = list_audio_files(show_path)
|
||||
banner = find_banner(show_path)
|
||||
return render_template_string(SHOW_TEMPLATE, show_name=show_name, files=files, banner=banner, display_names=DISPLAY_NAMES)
|
||||
|
||||
@app.route("/stream/<show>/<path:filename>")
|
||||
def stream_file(show, filename):
|
||||
safe_path = os.path.join(ROOT_DIR, show)
|
||||
if os.path.isfile(os.path.join(safe_path, filename)):
|
||||
return send_from_directory(safe_path, filename, as_attachment=False)
|
||||
else:
|
||||
abort(404)
|
||||
|
||||
@app.route("/banner/<show_name>/<banner_name>")
|
||||
def show_banner(show_name, banner_name):
|
||||
show_path = os.path.join(ROOT_DIR, show_name)
|
||||
if os.path.isfile(os.path.join(show_path, banner_name)):
|
||||
return send_from_directory(show_path, banner_name)
|
||||
else:
|
||||
abort(404)
|
||||
|
||||
# === Start Everything ===
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True, host='0.0.0.0', port=5000)
|
||||
|
24
docker/archivecontrol/requirements.txt
Normal file
24
docker/archivecontrol/requirements.txt
Normal file
@ -0,0 +1,24 @@
|
||||
blinker==1.9.0
|
||||
blurhash==1.1.4
|
||||
certifi==2025.1.31
|
||||
charset-normalizer==3.4.1
|
||||
click==8.1.8
|
||||
decorator==5.2.1
|
||||
Flask==3.1.0
|
||||
Flask-SQLAlchemy==3.1.1
|
||||
greenlet==3.2.0
|
||||
idna==3.10
|
||||
itsdangerous==2.2.0
|
||||
Jinja2==3.1.6
|
||||
MarkupSafe==3.0.2
|
||||
Mastodon.py==2.0.1
|
||||
psycopg2-binary==2.9.10
|
||||
python-dateutil==2.9.0.post0
|
||||
python-magic==0.4.27
|
||||
requests==2.32.3
|
||||
six==1.17.0
|
||||
SQLAlchemy==2.0.40
|
||||
typing_extensions==4.13.2
|
||||
urllib3==2.4.0
|
||||
watchdog==6.0.0
|
||||
Werkzeug==3.1.3
|
@ -1,5 +1,5 @@
|
||||
DB_USER=
|
||||
DB_PASSWORD=
|
||||
DB_NAME=
|
||||
DB_HOST_PRIMARY=
|
||||
MASTODON_ACCESS_TOKEN=
|
||||
DB_USER=hostingtootuser
|
||||
DB_PASSWORD=rusty2281
|
||||
DB_NAME=hostingtootdb
|
||||
DB_HOST_PRIMARY=38.102.127.174
|
||||
MASTODON_ACCESS_TOKEN=07w3Emdw-cv_TncysrNU8Ed_sHJhwtnvKmnLqKlHmKA
|
||||
|
File diff suppressed because it is too large
Load Diff
394
miscellaneous/dbcheck.log
Normal file
394
miscellaneous/dbcheck.log
Normal file
@ -0,0 +1,394 @@
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:15:17 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 103 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:30:17 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 135 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 19:45:17 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 24 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:00:19 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:15:33 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 109 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:30:21 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 46 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 20:45:29 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:00:18 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:15:21 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 38 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:30:36 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 21 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 21:45:32 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 22 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:00:30 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 30 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:15:31 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 14 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:30:24 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 55 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 22:45:37 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:00:21 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 21 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:15:21 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 19 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:30:16 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 68 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-26 23:45:16 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 77 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:00:18 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 18 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:15:16 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 14 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:30:16 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 82 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 00:45:17 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 135 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:00:14 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 14 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:15:15 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 127 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:30:14 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 100 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 01:45:14 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 35 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:00:14 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 13 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:15:14 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 11 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:30:17 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 02:45:16 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 27 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:00:16 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:15:19 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 137 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:30:15 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 134 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 03:45:32 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 51 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:00:19 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 19 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:15:26 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:30:28 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 67 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 04:45:24 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:00:31 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 29 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:15:30 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 66 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:30:30 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 37 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 05:45:23 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 21 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:00:26 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 26 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:15:37 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 20 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:30:35 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 13 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 06:45:32 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 49 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:00:31 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 11 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:18:00 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 29 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:30:33 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 11 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 07:45:33 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 16 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:00:39 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:15:39 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:30:31 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 62 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 08:45:37 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 13 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:00:21 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 21 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:15:21 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:30:21 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 09:45:21 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 20 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 10:00:25 🚨
|
||||
[db1] ERROR: SMART status unknown on /dev/sda. Output:
|
||||
[db2] WARNING: Replication lag is 17 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 13:15:16 🚨
|
||||
[db2] WARNING: Replication lag is 62 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 14:15:18 🚨
|
||||
[db2] WARNING: Replication lag is 63 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 15:15:19 🚨
|
||||
[db2] WARNING: Replication lag is 69 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 15:30:19 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 105 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 16:15:18 🚨
|
||||
[db2] WARNING: Replication lag is 135 seconds.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 16:45:19 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
Traceback (most recent call last):
|
||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 257, in <module>
|
||||
main()
|
||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 218, in main
|
||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 85, in check_remote_disk
|
||||
out = ssh_command(host, user, cmd)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 76, in ssh_command
|
||||
ssh.connect(hostname=host, username=user, timeout=10)
|
||||
File "/home/doc/dbcheck/lib/python3.12/site-packages/paramiko/client.py", line 386, in connect
|
||||
sock.connect(addr)
|
||||
TimeoutError: timed out
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 19:30:19 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db1] CRITICAL: Service postgresql@16-main.service not running!
|
||||
[db1] ERROR: Could not check replication: connection to server at "cluster.db1.genesishostingtechnologies.com" (38.102.127.174), port 5432 failed: Connection refused
|
||||
Is the server running on that host and accepting TCP/IP connections?
|
||||
|
||||
[db2] WARNING: Replication lag is 90 seconds.
|
||||
Failed to send Mastodon DM: <!DOCTYPE html>
|
||||
<html lang='en'>
|
||||
<head>
|
||||
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type'>
|
||||
<meta charset='utf-8'>
|
||||
<title>This page is not correct
|
||||
- Mastodon Glitch Edition</title>
|
||||
<meta content='width=device-width,initial-scale=1' name='viewport'>
|
||||
<link rel="stylesheet" crossorigin="anonymous" href="/packs/css/skins/glitch/mastodon-light-ba448ef4.chunk.css" media="not all and (prefers-color-scheme: dark)" integrity="sha256-P4BQPI06vGtRMsuyy44qvSrh1psmbDTSDhcI2ZZsDcI=" /><link rel="stylesheet" crossorigin="anonymous" href="/packs/css/skins/glitch/default-b4389951.chunk.css" media="(prefers-color-scheme: dark)" integrity="sha256-Ve2Dw/Y6VxR48zbYZoB8JdFFHsH9QiYGIZs5DLuH/U4=" />
|
||||
<script src="/packs/js/common-8028e752d0f2aa93a5d6.js" crossorigin="anonymous" integrity="sha256-Yu9sY5Ca6EUBu337KcDfy59RIzPAUoLndPw2vudg2pg="></script>
|
||||
<script src="/packs/js/flavours/glitch/error-10438b0fe55aa68f54dd.chunk.js" crossorigin="anonymous" integrity="sha256-DL1chTim8F/pYKgN8OlM4xXp5561mQ8KKtSZsRT9bwA="></script>
|
||||
</head>
|
||||
<body class='error'>
|
||||
<div class='dialog'>
|
||||
<div class='dialog__illustration'>
|
||||
<img alt='Mastodon Glitch Edition' src='/oops.png'>
|
||||
</div>
|
||||
<div class='dialog__message'>
|
||||
<h1>We're sorry, but something went wrong on our end.
|
||||
</h1>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 20:30:16 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 20:45:16 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:00:17 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:15:17 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:30:15 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 66 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 21:45:19 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 106 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:00:15 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:15:32 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 95 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:30:30 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 79 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 22:45:38 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:00:28 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:15:47 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:30:29 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-27 23:45:34 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:00:23 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:15:15 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:30:16 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 00:45:17 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 77 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:00:15 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:15:15 🚨
|
||||
[shredder] WARNING: Pattern 'FATAL' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 01:30:16 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 72 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 02:30:18 🚨
|
||||
[db2] WARNING: Replication lag is 89 seconds.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:00:16 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:15:18 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:30:17 🚨
|
||||
[db2] WARNING: Replication lag is 135 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 03:45:17 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 04:00:16 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 04:45:21 🚨
|
||||
[db2] WARNING: Replication lag is 64 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 05:00:27 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 08:00:20 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 08:15:20 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 09:45:24 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 10:00:18 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 10:30:16 🚨
|
||||
[db2] WARNING: Replication lag is 75 seconds.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 12:00:31 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 12:45:24 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
[db2] WARNING: Replication lag is 94 seconds.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 13:00:33 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
OK: All checks passed.
|
||||
🚨 Genesis Radio Multi-Node Healthcheck 2025-04-28 14:15:19 🚨
|
||||
[mastodon] WARNING: Pattern 'ERROR' in /var/log/syslog
|
||||
OK: All checks passed.
|
||||
File "/home/doc/genesis-tools/miscellaneous/dbcheck1.py", line 181
|
||||
f.write("<table border='1' cellpadding='5' style='border-collapse: collapse;'>
|
||||
^
|
||||
SyntaxError: unterminated string literal (detected at line 181)
|
@ -2,72 +2,38 @@ import os
|
||||
import requests
|
||||
import datetime
|
||||
import paramiko
|
||||
import time
|
||||
|
||||
# ==== CONFIG ====
|
||||
MASTODON_INSTANCE = "https://chatwithus.live"
|
||||
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
|
||||
MASTODON_USER_ID = "114386383616633367"
|
||||
HEALTHCHECK_HTML = "/var/www/html/healthcheck.html"
|
||||
|
||||
DISK_WARN_THRESHOLD = 10 # percent free
|
||||
INODE_WARN_THRESHOLD = 10 # percent free
|
||||
DISK_WARN_THRESHOLD = 10
|
||||
INODE_WARN_THRESHOLD = 10
|
||||
LOG_FILES = ["/var/log/syslog", "/var/log/nginx/error.log"]
|
||||
LOG_PATTERNS = ["ERROR", "FATAL", "disk full", "out of memory"]
|
||||
SUPPRESSED_PATTERNS = ["SomeKnownHarmlessMastodonError"]
|
||||
|
||||
NODES = [
|
||||
{
|
||||
"name": "shredder",
|
||||
"host": "38.102.127.171",
|
||||
"ssh_user": "doc",
|
||||
"services": [],
|
||||
"disks": ["/", "/mnt/raid5"],
|
||||
"type": "remote",
|
||||
"db": False,
|
||||
"raid": True
|
||||
},
|
||||
{
|
||||
"name": "mastodon",
|
||||
"host": "chatwithus.live", # Now points directly to your Mastodon server
|
||||
"ssh_user": "root",
|
||||
"services": ["nginx", "mastodon-web"],
|
||||
"disks": ["/"],
|
||||
"type": "remote",
|
||||
"db": False,
|
||||
"raid": False
|
||||
},
|
||||
{
|
||||
"name": "db1",
|
||||
"host": "cluster.db1.genesishostingtechnologies.com",
|
||||
"ssh_user": "doc",
|
||||
"services": ["postgresql"],
|
||||
"disks": ["/", "/var/lib/postgresql"],
|
||||
"type": "remote",
|
||||
"db": True,
|
||||
"raid": False
|
||||
},
|
||||
{
|
||||
"name": "db2",
|
||||
"host": "cluster.db2.genesishostingtechnologies.com",
|
||||
"ssh_user": "doc",
|
||||
"services": ["postgresql"],
|
||||
"disks": ["/", "/var/lib/postgresql"],
|
||||
"type": "remote",
|
||||
"db": True,
|
||||
"raid": False
|
||||
},
|
||||
{"name": "shredder", "host": "38.102.127.171", "ssh_user": "doc", "services": ["minio.service"], "disks": ["/", "/mnt/raid5"], "type": "remote", "db": False, "raid": True},
|
||||
{"name": "mastodon", "host": "chatwithus.live", "ssh_user": "root", "services": ["nginx", "mastodon-web"], "disks": ["/"], "type": "remote", "db": False, "raid": False},
|
||||
{"name": "db1", "host": "cluster.db1.genesishostingtechnologies.com", "ssh_user": "doc", "services": ["postgresql@16-main.service"], "disks": ["/", "/var/lib/postgresql"], "type": "remote", "db": True, "raid": False},
|
||||
{"name": "db2", "host": "cluster.db2.genesishostingtechnologies.com", "ssh_user": "doc", "services": ["postgresql@16-main.service"], "disks": ["/", "/var/lib/postgresql"], "type": "remote", "db": True, "raid": False}
|
||||
]
|
||||
|
||||
# ==== Mastodon DM function ====
|
||||
def mastodon_dm(message):
|
||||
# ==== Mastodon DM function with retry ====
|
||||
def mastodon_dm(message, retries=3):
|
||||
url = f"{MASTODON_INSTANCE}/api/v1/statuses"
|
||||
headers = {"Authorization": f"Bearer {MASTODON_TOKEN}"}
|
||||
payload = {
|
||||
"status": message,
|
||||
"visibility": "direct",
|
||||
"in_reply_to_account_id": MASTODON_USER_ID
|
||||
}
|
||||
resp = requests.post(url, headers=headers, data=payload)
|
||||
if resp.status_code != 200:
|
||||
print(f"Failed to send Mastodon DM: {resp.text}")
|
||||
payload = {"status": message, "visibility": "direct", "in_reply_to_account_id": MASTODON_USER_ID}
|
||||
for attempt in range(retries):
|
||||
resp = requests.post(url, headers=headers, data=payload)
|
||||
if resp.status_code == 200:
|
||||
return
|
||||
print(f"Failed to send Mastodon DM (attempt {attempt+1}): {resp.text}")
|
||||
time.sleep(5)
|
||||
|
||||
# ==== SSH command runner ====
|
||||
def ssh_command(host, user, cmd):
|
||||
@ -79,97 +45,33 @@ def ssh_command(host, user, cmd):
|
||||
ssh.close()
|
||||
return out
|
||||
|
||||
# ==== Robust Remote disk check ====
|
||||
def check_remote_disk(host, user, path, node_name):
|
||||
cmd = f"df --output=pcent {path} | tail -1 | tr -dc '0-9'"
|
||||
out = ssh_command(host, user, cmd)
|
||||
if not out:
|
||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check disk usage."
|
||||
# ==== Emoji chooser ====
|
||||
def choose_emoji(line):
|
||||
if "RAID" in line:
|
||||
return "🧨"
|
||||
if "disk" in line.lower():
|
||||
return "📈"
|
||||
if "rclone" in line.lower():
|
||||
return "🐢"
|
||||
if "Service" in line:
|
||||
return "🛑"
|
||||
if "Replication" in line:
|
||||
return "💥"
|
||||
return "⚠️"
|
||||
|
||||
# ==== Check rclone health ====
|
||||
def check_rclone_health(node):
|
||||
try:
|
||||
percent = int(out)
|
||||
except ValueError:
|
||||
return f"[{node_name}] ERROR: Could not parse disk usage for {path}. Output was: '{out}'"
|
||||
if percent > 90:
|
||||
return f"[{node_name}] WARNING: Only {100-percent}% disk free on {path}."
|
||||
# Inode check
|
||||
cmd_inode = f"df --output=ipcent {path} | tail -1 | tr -dc '0-9'"
|
||||
out_inode = ssh_command(host, user, cmd_inode)
|
||||
if not out_inode:
|
||||
return f"[{node_name}] ERROR: Disk {path} not found or could not check inode usage."
|
||||
try:
|
||||
percent_inode = int(out_inode)
|
||||
except ValueError:
|
||||
return f"[{node_name}] ERROR: Could not parse inode usage for {path}. Output was: '{out_inode}'"
|
||||
if percent_inode > 90:
|
||||
return f"[{node_name}] WARNING: Only {100-percent_inode}% inodes free on {path}."
|
||||
return None
|
||||
|
||||
# ==== SMART health check (for all disks) ====
|
||||
def check_remote_smart(host, user, node_name):
|
||||
alerts = []
|
||||
# List block devices
|
||||
cmd_lsblk = "lsblk -ndo NAME,TYPE | awk '$2==\"disk\" {print $1}'"
|
||||
devs = ssh_command(host, user, cmd_lsblk)
|
||||
if not devs:
|
||||
alerts.append(f"[{node_name}] ERROR: Could not list block devices for SMART check.")
|
||||
return alerts
|
||||
for dev in devs.split():
|
||||
smart_cmd = f"sudo smartctl -H /dev/{dev}"
|
||||
out = ssh_command(host, user, smart_cmd)
|
||||
if "PASSED" in out:
|
||||
continue # All good
|
||||
elif "FAILED" in out or "Pre-fail" in out or "SMART support is: Unavailable" in out:
|
||||
alerts.append(f"[{node_name}] CRITICAL: SMART health issue on /dev/{dev}!\n{out}")
|
||||
elif "Unknown" in out or not out:
|
||||
alerts.append(f"[{node_name}] ERROR: SMART status unknown on /dev/{dev}. Output: {out}")
|
||||
# Optionally scan for other SMART errors
|
||||
return alerts
|
||||
|
||||
# ==== Remote service check ====
|
||||
def check_remote_service(host, user, service, node_name):
|
||||
cmd = f"systemctl is-active {service}"
|
||||
out = ssh_command(host, user, cmd)
|
||||
if out.strip() != "active":
|
||||
return f"[{node_name}] CRITICAL: Service {service} not running!"
|
||||
return None
|
||||
|
||||
# ==== Remote RAID md0 check (robust for all mdstat layouts) ====
|
||||
def check_remote_raid_md0(host, user, node_name):
|
||||
try:
|
||||
import re
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(hostname=host, username=user, timeout=10)
|
||||
stdin, stdout, stderr = ssh.exec_command("cat /proc/mdstat")
|
||||
mdstat = stdout.read().decode()
|
||||
|
||||
# Find the block for md0 and look for the [UU_] status
|
||||
lines = mdstat.splitlines()
|
||||
status = None
|
||||
inside_md0 = False
|
||||
for line in lines:
|
||||
if line.startswith("md0"):
|
||||
inside_md0 = True
|
||||
elif inside_md0:
|
||||
m = re.search(r"\[(U|_)+\]", line)
|
||||
if m:
|
||||
status = m.group(0)
|
||||
break
|
||||
# Stop searching if we hit a blank line or another array
|
||||
if line.strip() == "" or ":" in line:
|
||||
break
|
||||
|
||||
ssh.close()
|
||||
|
||||
if status is None:
|
||||
return f"[{node_name}] CRITICAL: /dev/md0 RAID status string not found!"
|
||||
if "_" in status:
|
||||
return f"[{node_name}] WARNING: /dev/md0 RAID degraded! Status: {status}"
|
||||
# All U's means all disks up
|
||||
return None
|
||||
|
||||
result = ssh_command(node["host"], node["ssh_user"], "rclone rc vfs/stats")
|
||||
if "error" in result.lower() or "failed" in result.lower():
|
||||
return ("critical", f"[{node['name']}] ERROR: rclone health check failed. Output: {result}")
|
||||
if "bytesUsed" in result:
|
||||
bytes_used = int(result.split('"bytesUsed":')[1].split(',')[0].strip())
|
||||
if bytes_used > 100000000000:
|
||||
return ("warning", f"[{node['name']}] WARNING: rclone cache usage high: {bytes_used} bytes used.")
|
||||
except Exception as e:
|
||||
return f"[{node_name}] ERROR: Could not check RAID health remotely: {e}"
|
||||
return ("critical", f"[{node['name']}] ERROR: Could not check rclone health: {str(e)}")
|
||||
return None
|
||||
|
||||
# ==== Remote log scan ====
|
||||
def check_remote_logs(host, user, node_name):
|
||||
@ -180,72 +82,108 @@ def check_remote_logs(host, user, node_name):
|
||||
out = ssh_command(host, user, cmd)
|
||||
lines = out.split("\n")
|
||||
for pattern in LOG_PATTERNS:
|
||||
if any(pattern in line for line in lines):
|
||||
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
||||
for line in lines:
|
||||
if pattern in line and not any(suppress in line for suppress in SUPPRESSED_PATTERNS):
|
||||
alerts.append(f"[{node_name}] WARNING: Pattern '{pattern}' in {log}")
|
||||
except Exception as e:
|
||||
alerts.append(f"[{node_name}] ERROR: Could not read log {log}: {e}")
|
||||
return alerts
|
||||
|
||||
# ==== Remote PostgreSQL replication check ====
|
||||
def check_replication(host, node_name):
|
||||
try:
|
||||
import psycopg2
|
||||
conn = psycopg2.connect(host=host, dbname="postgres", user="postgres", connect_timeout=5)
|
||||
cur = conn.cursor()
|
||||
cur.execute("SELECT pg_is_in_recovery();")
|
||||
is_replica = cur.fetchone()[0]
|
||||
if is_replica:
|
||||
cur.execute("SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;")
|
||||
lag = cur.fetchone()[0]
|
||||
if lag is None:
|
||||
return f"[{node_name}] CRITICAL: Standby not streaming! Replication down."
|
||||
elif lag > 10:
|
||||
return f"[{node_name}] WARNING: Replication lag is {lag} seconds."
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
return f"[{node_name}] ERROR: Could not check replication: {e}"
|
||||
return None
|
||||
|
||||
# ==== Main routine ====
|
||||
# ==== Main Routine ====
|
||||
def main():
|
||||
problems = []
|
||||
critical_problems = []
|
||||
warning_problems = []
|
||||
node_status = {}
|
||||
|
||||
# Multi-node checks
|
||||
for node in NODES:
|
||||
# All checks via SSH
|
||||
status = "✅ Healthy"
|
||||
|
||||
if "rclone" in node.get("services", []):
|
||||
res = check_rclone_health(node)
|
||||
if res:
|
||||
level, msg = res
|
||||
if level == "critical":
|
||||
critical_problems.append(msg)
|
||||
status = "🚨 Critical"
|
||||
else:
|
||||
warning_problems.append(msg)
|
||||
if status != "🚨 Critical":
|
||||
status = "⚠️ Warning"
|
||||
|
||||
for disk in node["disks"]:
|
||||
res = check_remote_disk(node["host"], node["ssh_user"], disk, node["name"])
|
||||
if res: problems.append(res)
|
||||
# SMART check for all disks on this node
|
||||
smart_alerts = check_remote_smart(node["host"], node["ssh_user"], node["name"])
|
||||
if smart_alerts:
|
||||
problems.extend(smart_alerts)
|
||||
if res:
|
||||
if "CRITICAL" in res:
|
||||
critical_problems.append(res)
|
||||
status = "🚨 Critical"
|
||||
elif "WARNING" in res and status != "🚨 Critical":
|
||||
warning_problems.append(res)
|
||||
status = "⚠️ Warning"
|
||||
|
||||
for svc in node["services"]:
|
||||
res = check_remote_service(node["host"], node["ssh_user"], svc, node["name"])
|
||||
if res: problems.append(res)
|
||||
# Replication check
|
||||
if res:
|
||||
if "CRITICAL" in res:
|
||||
critical_problems.append(res)
|
||||
status = "🚨 Critical"
|
||||
elif "WARNING" in res and status != "🚨 Critical":
|
||||
warning_problems.append(res)
|
||||
status = "⚠️ Warning"
|
||||
|
||||
if node.get("db"):
|
||||
res = check_replication(node["host"], node["name"])
|
||||
if res: problems.append(res)
|
||||
# RAID check, only for nodes with "raid": True
|
||||
if res:
|
||||
critical_problems.append(res)
|
||||
status = "🚨 Critical"
|
||||
|
||||
if node.get("raid", False):
|
||||
raid_health = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
||||
if raid_health:
|
||||
problems.append(raid_health)
|
||||
# Log scan
|
||||
res = check_remote_raid_md0(node["host"], node["ssh_user"], node["name"])
|
||||
if res:
|
||||
if "CRITICAL" in res:
|
||||
critical_problems.append(res)
|
||||
status = "🚨 Critical"
|
||||
elif status != "🚨 Critical":
|
||||
warning_problems.append(res)
|
||||
status = "⚠️ Warning"
|
||||
|
||||
logs = check_remote_logs(node["host"], node["ssh_user"], node["name"])
|
||||
if logs:
|
||||
problems.extend(logs)
|
||||
warning_problems.extend(logs)
|
||||
if status != "🚨 Critical":
|
||||
status = "⚠️ Warning"
|
||||
|
||||
# Send DM if anything wrong
|
||||
if problems:
|
||||
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
msg = f"🚨 Genesis Radio Multi-Node Healthcheck {now} 🚨\n" + "\n".join(problems)
|
||||
node_status[node["name"]] = status
|
||||
|
||||
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
if critical_problems:
|
||||
formatted = "\n".join(f"- {choose_emoji(p)} {p}" for p in critical_problems)
|
||||
msg = f"🚨 Genesis Radio Critical Healthcheck {now} 🚨\n⚡ {len(critical_problems)} critical issues found:\n{formatted}"
|
||||
print(msg)
|
||||
mastodon_dm(msg)
|
||||
else:
|
||||
print("OK: All checks passed.")
|
||||
|
||||
if warning_problems:
|
||||
formatted = "\n".join(f"- {choose_emoji(p)} {p}" for p in warning_problems)
|
||||
msg = f"⚠️ Genesis Radio Warning Healthcheck {now} ⚠️\n⚡ {len(warning_problems)} warnings found:\n{formatted}"
|
||||
print(msg)
|
||||
mastodon_dm(msg)
|
||||
|
||||
if not critical_problems and not warning_problems:
|
||||
msg = f"✅ Genesis Radio Healthcheck {now}: All systems normal."
|
||||
print(msg)
|
||||
mastodon_dm(msg)
|
||||
|
||||
# Write healthcheck HTML dashboard
|
||||
with open(HEALTHCHECK_HTML, "w") as f:
|
||||
f.write("<html><head><title>Genesis Radio Healthcheck</title><meta http-equiv='refresh' content='60'></head><body>")
|
||||
f.write(f"<h1>Genesis Radio System Health</h1>")
|
||||
f.write(f"<p>Last Checked: {now}</p>")
|
||||
f.write("<table border='1' cellpadding='5' style='border-collapse: collapse;'>
|
||||
<tr><th>System</th><th>Status</th></tr>")
|
||||
for node, status in node_status.items():
|
||||
color = 'green' if 'Healthy' in status else ('orange' if 'Warning' in status else 'red')
|
||||
f.write(f"<tr><td>{node}</td><td style='color:{color};'>{status}</td></tr>")
|
||||
f.write("</table></body></html>")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -6,9 +6,9 @@ PG_REMOTE_HOST="cluster.db2.genesishostingtechnologies.com"
|
||||
PG_REMOTE_PORT="5432"
|
||||
PG_LOCAL_PORT="5432"
|
||||
DUMP_DIR="/tmp/pgbackup_verify"
|
||||
BACKUP_TARGET="root@thevault.bounceme.net:/mnt/backup3/pgdumps"
|
||||
BACKUP_TARGET="root@backup.sshjunkie.com:/mnt/backup/pgdumps"
|
||||
CC_TARGET="doc@clustercontrol.sshjunkie.com:/home/doc/backups"
|
||||
DBS=("mastodon_production" "giteaprod")
|
||||
DBS=("mastodon_production" "giteaprod" "hostingtootdb" "radiotootdb")
|
||||
LOGFILE="$DUMP_DIR/verify_log_$(date +%Y%m%d_%H%M%S).txt"
|
||||
mkdir -p "$DUMP_DIR"
|
||||
|
||||
@ -38,6 +38,10 @@ for DB in "${DBS[@]}"; do
|
||||
continue
|
||||
fi
|
||||
|
||||
# Generate checksum for the dump file
|
||||
CHECKSUM_FILE="$DUMPFILE.sha256"
|
||||
sha256sum "$DUMPFILE" > "$CHECKSUM_FILE"
|
||||
|
||||
# Restore/verify on Krang
|
||||
TESTDB="verify_${DB}_$RANDOM"
|
||||
echo "Creating test database $TESTDB" | tee -a "$LOGFILE"
|
||||
@ -49,6 +53,7 @@ for DB in "${DBS[@]}"; do
|
||||
continue
|
||||
fi
|
||||
|
||||
# Restore the dump
|
||||
echo "Restoring to $TESTDB" | tee -a "$LOGFILE"
|
||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" < "$DUMPFILE"
|
||||
if [ $? -ne 0 ]; then
|
||||
@ -59,6 +64,30 @@ for DB in "${DBS[@]}"; do
|
||||
continue
|
||||
fi
|
||||
|
||||
# Sanity check: check row count of a critical table (e.g., 'users' table)
|
||||
ROW_COUNT=$(sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" -t -c "SELECT count(*) FROM users;")
|
||||
echo "Row count for 'users' table in $TESTDB: $ROW_COUNT" | tee -a "$LOGFILE"
|
||||
|
||||
if [ "$ROW_COUNT" -le 0 ]; then
|
||||
echo "[FAIL] No rows in 'users' table after restore!" | tee -a "$LOGFILE"
|
||||
ALL_OK=false
|
||||
mastodon_alert "🚨 Database backup/verify FAILED: No rows found in 'users' table after restore for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
||||
continue
|
||||
else
|
||||
echo "[PASS] Row count OK for 'users' table in $DB." | tee -a "$LOGFILE"
|
||||
fi
|
||||
|
||||
# Verify checksum
|
||||
RESTORED_CHECKSUM_FILE="$DUMPFILE.sha256"
|
||||
if ! cmp -s "$CHECKSUM_FILE" "$RESTORED_CHECKSUM_FILE"; then
|
||||
echo "[FAIL] Checksum mismatch for $DB!" | tee -a "$LOGFILE"
|
||||
ALL_OK=false
|
||||
mastodon_alert "🚨 Database backup/verify FAILED: Checksum mismatch for $DB on $(hostname) at $(date). See log: $LOGFILE"
|
||||
continue
|
||||
else
|
||||
echo "[PASS] Checksum verified for $DB." | tee -a "$LOGFILE"
|
||||
fi
|
||||
|
||||
# Quick table listing for sanity
|
||||
sudo -u postgres psql -p "$PG_LOCAL_PORT" -d "$TESTDB" -c "\dt" | tee -a "$LOGFILE"
|
||||
if [ $? -eq 0 ]; then
|
@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
BIN_DIR="/usr/local/bin"
|
||||
|
||||
echo "📦 Deploying Genesis Tools..."
|
||||
|
||||
declare -a SCRIPTS=(
|
||||
"venv_launcher.sh"
|
||||
"KodakMomentV1.sh"
|
||||
)
|
||||
|
||||
for script in "${SCRIPTS[@]}"; do
|
||||
if [[ -f "$script" ]]; then
|
||||
echo "🔁 Installing $script to $BIN_DIR..."
|
||||
cp -f "$script" "$BIN_DIR/${script%.sh}"
|
||||
chmod +x "$BIN_DIR/${script%.sh}"
|
||||
else
|
||||
echo "⚠️ Warning: $script not found in $(pwd)"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Deployment complete. Try running: venv_launcher --check"
|
@ -1,130 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
from urllib.parse import quote
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
from mastodon import Mastodon, MastodonError
|
||||
|
||||
# Configuration
|
||||
SHOW_NAMES = {
|
||||
"retro": "The Retro Breakfast",
|
||||
"90slunch": "90s Lunch",
|
||||
"80sdimension": "80s Dimension",
|
||||
"au": "Alternate Universe",
|
||||
"bootycalls": "Booty Calls",
|
||||
"chaos": "Chaos",
|
||||
"gog": "Gog",
|
||||
"housecalls": "House Calls",
|
||||
"mac": "March of the Mac",
|
||||
"yes": "Yes Series",
|
||||
"welch": "Bob Welch Special",
|
||||
"wakeman": "Caped Crusader: The Rick Wakeman Retrospective",
|
||||
"mayhem": "Mayhem",
|
||||
"pisces": "Pisces Playhouse",
|
||||
"rockvault": "Rock Vault",
|
||||
"sonicrevolt": "Sonic Revolt",
|
||||
"tunefuse": "TuneFuse",
|
||||
"wwo80s": "WWO 80s",
|
||||
"yacht": "Yacht Rock",
|
||||
}
|
||||
|
||||
BASE_URL = "http://www.server.genesis-radio.net:5020"
|
||||
INSTANCE_URL = "https://chatwithus.live"
|
||||
ACCESS_TOKEN = "y6cAV7FvTqtJzTHe8QoCO5JSlugIwHgy7zki6Lb5xns"
|
||||
|
||||
mastodon = Mastodon(
|
||||
access_token=ACCESS_TOKEN,
|
||||
api_base_url=INSTANCE_URL
|
||||
)
|
||||
|
||||
# Keep track of files processed and the time they were processed
|
||||
processed_files = {}
|
||||
DEBOUNCE_TIME = 5 # Time in seconds to wait before processing the same file again
|
||||
|
||||
# Improved show name extraction based on directory aliasing
|
||||
def extract_show_name(file_path):
|
||||
parent_dir = os.path.basename(os.path.dirname(file_path))
|
||||
return SHOW_NAMES.get(parent_dir, "Genesis Radio")
|
||||
|
||||
class FileEventHandler(FileSystemEventHandler):
|
||||
def on_created(self, event):
|
||||
if event.is_directory:
|
||||
return
|
||||
|
||||
# Only process .mp3 files
|
||||
if not event.src_path.endswith('.mp3'):
|
||||
print(f"Skipping non-mp3 file: {event.src_path}")
|
||||
return
|
||||
|
||||
current_time = time.time() # Get the current time in seconds
|
||||
|
||||
# If the file has been processed within the debounce window, skip it
|
||||
if event.src_path in processed_files:
|
||||
last_processed_time = processed_files[event.src_path]
|
||||
if current_time - last_processed_time < DEBOUNCE_TIME:
|
||||
print(f"Skipping duplicate event for file: {event.src_path}")
|
||||
return
|
||||
|
||||
# Update the time of processing for this file
|
||||
processed_files[event.src_path] = current_time
|
||||
|
||||
# Debugging: Confirm file creation detection
|
||||
print(f"File detected: {event.src_path}")
|
||||
|
||||
file_path = event.src_path
|
||||
filename = os.path.basename(file_path)
|
||||
show_name = extract_show_name(file_path)
|
||||
|
||||
# URL encode the filename and parent directory
|
||||
encoded_filename = quote(filename, safe='')
|
||||
parent_dir = os.path.basename(os.path.dirname(file_path))
|
||||
encoded_parent_dir = quote(parent_dir, safe='')
|
||||
|
||||
# Construct the file URL to go to the new path format
|
||||
file_url = f"{BASE_URL}/show/{encoded_parent_dir}/{encoded_filename}"
|
||||
|
||||
# Constructing a cleaner and more engaging Mastodon message
|
||||
message = f"🎉 New Archive Alert! 🎧 {show_name}'s latest episode is now available! 🎶\n\nTune in: {file_url}"
|
||||
|
||||
# Debugging: Check the message before posting
|
||||
print(f"Message to post: {message}")
|
||||
|
||||
try:
|
||||
mastodon.status_post(message)
|
||||
print("✅ Successfully posted.")
|
||||
except MastodonError as e:
|
||||
print(f"❌ Mastodon API Error: {e}")
|
||||
print(f"Full error: {e.args}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
observer = Observer()
|
||||
handler = FileEventHandler()
|
||||
|
||||
valid_directories = []
|
||||
for directory in SHOW_NAMES.keys():
|
||||
directory_path = os.path.join("/mnt/convert/archives", directory)
|
||||
if os.path.exists(directory_path):
|
||||
print(f"✅ Monitoring: {directory_path}")
|
||||
valid_directories.append(directory_path)
|
||||
else:
|
||||
print(f"❌ Skipping non-existent directory: {directory_path}")
|
||||
|
||||
if not valid_directories:
|
||||
print("❌ No valid directories found to monitor. Exiting.")
|
||||
exit(1)
|
||||
|
||||
for directory in valid_directories:
|
||||
observer.schedule(handler, directory, recursive=False)
|
||||
|
||||
print("🔔 Genesis Radio Mastodon Notifier running. Press Ctrl+C to stop.")
|
||||
observer.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\n🔒 Shutting down observer...")
|
||||
observer.stop()
|
||||
|
||||
observer.join()
|
@ -3,8 +3,8 @@ set -euo pipefail
|
||||
|
||||
### CONFIG ###
|
||||
SOURCE_DIR="/home/doc/genesis-tools"
|
||||
DEST_HOST="root@thevault.bounceme.net"
|
||||
DEST_PATH="/mnt/backup3/images/genesis-tools"
|
||||
DEST_HOST="root@backup.sshjunkie.com"
|
||||
DEST_PATH="/mnt/backup/images/genesis-tools"
|
||||
REMOTE_LATEST_LINK="$DEST_PATH/latest"
|
||||
RETENTION_DAYS=7
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
SNORT_LOG="/var/log/snort/snort.alert.fast"
|
||||
|
||||
# Database connection details
|
||||
DB_HOST="38.102.127.166"
|
||||
DB_HOST="38.102.127.174"
|
||||
DB_USER="ipblocks_user"
|
||||
DB_PASS="rusty2281"
|
||||
DB_NAME="ipblocks"
|
||||
|
@ -6,7 +6,7 @@ from mastodon import Mastodon
|
||||
ICECAST_JSON_URL = "http://cast3.my-control-panel.com:7454/status-json.xsl"
|
||||
SONG_TRIGGER = "Spiderbait - Stevie"
|
||||
MASTODON_BASE_URL = "https://chatwithus.live"
|
||||
MASTODON_TOKEN = "rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw" # replace with your token
|
||||
MASTODON_TOKEN = "lKw-AO-vNZ0389uTlIlKxZlGJdNWPd6wdQ5dlHOAqyg" # replace with your token
|
||||
TOOT_TEXT = "AH BUU BUU BUU BUU"
|
||||
|
||||
# --- END CONFIG ---
|
||||
|
@ -7,6 +7,7 @@ declare -A VENV_APPS=(
|
||||
[recordtheshow]="app.py"
|
||||
[radiotoot]="app.py"
|
||||
[hostingtoot]="app.py"
|
||||
[dbcheck]="dbcheck1.py"
|
||||
)
|
||||
|
||||
SCRIPT_BASE="/home/doc/genesis-tools"
|
||||
|
48
miscellaneous/ttsnews.py
Normal file
48
miscellaneous/ttsnews.py
Normal file
@ -0,0 +1,48 @@
|
||||
import feedparser
|
||||
import requests
|
||||
import os
|
||||
from elevenlabs import ElevenLabs
|
||||
|
||||
def fetch_news(rss_url):
|
||||
"""Fetch the latest news article from a satirical RSS feed."""
|
||||
feed = feedparser.parse(rss_url)
|
||||
if feed.entries:
|
||||
article_title = feed.entries[0].title
|
||||
article_summary = feed.entries[0].summary
|
||||
return article_title, article_summary
|
||||
return None, None
|
||||
|
||||
def generate_news_script(title, summary):
|
||||
"""Generate a humorous news script from the full article."""
|
||||
if title and summary:
|
||||
script = f"Here is your latest Genesis Radio news update. "
|
||||
script += f"Today's story: {title}. {summary} "
|
||||
script += "For more news and entertainment keep it locked right here on Genesis Radio, Beginning with Great Music!"
|
||||
return script
|
||||
return "No new satirical news available at the moment."
|
||||
|
||||
def text_to_speech(text, output_file="latest_news.mp3"):
|
||||
"""Convert text to speech using ElevenLabs and save as an MP3 file."""
|
||||
elevenlabs = ElevenLabs(api_key="sk_d2c55a2f1f71cd91fb498a986300e0aaf53879e54f53f5c0")
|
||||
audio = elevenlabs.generate(
|
||||
text=text,
|
||||
voice="David Hertel", # Change to preferred ElevenLabs voice
|
||||
model="eleven_multilingual_v2"
|
||||
)
|
||||
|
||||
audio_bytes = b"".join(audio) # Convert generator to bytes
|
||||
|
||||
with open(output_file, "wb") as f:
|
||||
f.write(audio_bytes)
|
||||
|
||||
print(f"Satirical news update saved as {output_file}")
|
||||
|
||||
def main():
|
||||
rss_url = "https://www.theonion.com/rss" # Satirical news source
|
||||
title, summary = fetch_news(rss_url)
|
||||
news_script = generate_news_script(title, summary)
|
||||
text_to_speech(news_script, "X:/rssnews/latest_news.mp3") # Adjust path as needed
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,5 +1,5 @@
|
||||
DB_USER=
|
||||
DB_PASSWORD=
|
||||
DB_NAME=
|
||||
DB_HOST_PRIMARY=
|
||||
MASTODON_ACCESS_TOKEN=
|
||||
DB_USER=radiotootuser
|
||||
DB_PASSWORD=rusty2281
|
||||
DB_NAME=radiotootdb
|
||||
DB_HOST_PRIMARY=38.102.127.174
|
||||
MASTODON_ACCESS_TOKEN=07w3Emdw-cv_TncysrNU8Ed_sHJhwtnvKmnLqKlHmKA
|
||||
|
BIN
radiotoot/__pycache__/app.cpython-312.pyc
Normal file
BIN
radiotoot/__pycache__/app.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
@ -188,6 +188,7 @@ def logout():
|
||||
logout_user()
|
||||
return redirect(url_for('login'))
|
||||
|
||||
# Route for the login page
|
||||
@app.route('/login', methods=['GET', 'POST'])
|
||||
def login():
|
||||
logger.debug("Rendering login page")
|
||||
@ -204,6 +205,35 @@ def login():
|
||||
flash('Invalid username or password')
|
||||
return render_template('login.html', form=form)
|
||||
|
||||
# Route for the registration page
|
||||
@app.route('/register', methods=['GET', 'POST'])
|
||||
def register():
|
||||
form = RegistrationForm()
|
||||
|
||||
if form.validate_on_submit():
|
||||
# Get form data and hash the password
|
||||
username = form.username.data
|
||||
email = form.email.data
|
||||
password = form.password.data
|
||||
hashed_password = generate_password_hash(password)
|
||||
|
||||
# Create a new user object
|
||||
new_user = User(
|
||||
username=username,
|
||||
email=email,
|
||||
password=hashed_password
|
||||
)
|
||||
|
||||
# Add the new user to the database
|
||||
db.session.add(new_user)
|
||||
db.session.commit()
|
||||
|
||||
# Flash a success message and redirect to login page
|
||||
flash('Your account has been created! You can now log in.', 'success')
|
||||
return redirect(url_for('login'))
|
||||
|
||||
return render_template('register.html', form=form)
|
||||
|
||||
scheduler_lock = threading.Lock()
|
||||
|
||||
def schedule_toot(toot):
|
||||
|
1
radiotoot/migrations/README
Normal file
1
radiotoot/migrations/README
Normal file
@ -0,0 +1 @@
|
||||
Single-database configuration for Flask.
|
BIN
radiotoot/migrations/__pycache__/env.cpython-312.pyc
Normal file
BIN
radiotoot/migrations/__pycache__/env.cpython-312.pyc
Normal file
Binary file not shown.
50
radiotoot/migrations/alembic.ini
Normal file
50
radiotoot/migrations/alembic.ini
Normal file
@ -0,0 +1,50 @@
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic,flask_migrate
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[logger_flask_migrate]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = flask_migrate
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
113
radiotoot/migrations/env.py
Normal file
113
radiotoot/migrations/env.py
Normal file
@ -0,0 +1,113 @@
|
||||
import logging
|
||||
from logging.config import fileConfig
|
||||
|
||||
from flask import current_app
|
||||
|
||||
from alembic import context
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
fileConfig(config.config_file_name)
|
||||
logger = logging.getLogger('alembic.env')
|
||||
|
||||
|
||||
def get_engine():
|
||||
try:
|
||||
# this works with Flask-SQLAlchemy<3 and Alchemical
|
||||
return current_app.extensions['migrate'].db.get_engine()
|
||||
except (TypeError, AttributeError):
|
||||
# this works with Flask-SQLAlchemy>=3
|
||||
return current_app.extensions['migrate'].db.engine
|
||||
|
||||
|
||||
def get_engine_url():
|
||||
try:
|
||||
return get_engine().url.render_as_string(hide_password=False).replace(
|
||||
'%', '%%')
|
||||
except AttributeError:
|
||||
return str(get_engine().url).replace('%', '%%')
|
||||
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
config.set_main_option('sqlalchemy.url', get_engine_url())
|
||||
target_db = current_app.extensions['migrate'].db
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def get_metadata():
|
||||
if hasattr(target_db, 'metadatas'):
|
||||
return target_db.metadatas[None]
|
||||
return target_db.metadata
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url, target_metadata=get_metadata(), literal_binds=True
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
|
||||
# this callback is used to prevent an auto-migration from being generated
|
||||
# when there are no changes to the schema
|
||||
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
|
||||
def process_revision_directives(context, revision, directives):
|
||||
if getattr(config.cmd_opts, 'autogenerate', False):
|
||||
script = directives[0]
|
||||
if script.upgrade_ops.is_empty():
|
||||
directives[:] = []
|
||||
logger.info('No changes in schema detected.')
|
||||
|
||||
conf_args = current_app.extensions['migrate'].configure_args
|
||||
if conf_args.get("process_revision_directives") is None:
|
||||
conf_args["process_revision_directives"] = process_revision_directives
|
||||
|
||||
connectable = get_engine()
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=get_metadata(),
|
||||
**conf_args
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
24
radiotoot/migrations/script.py.mako
Normal file
24
radiotoot/migrations/script.py.mako
Normal file
@ -0,0 +1,24 @@
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
branch_labels = ${repr(branch_labels)}
|
||||
depends_on = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
@ -0,0 +1,84 @@
|
||||
"""Add email column to user table
|
||||
|
||||
Revision ID: 27b841f29edb
|
||||
Revises:
|
||||
Create Date: 2025-04-24 18:14:20.471072
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '27b841f29edb'
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('toot', schema=None) as batch_op:
|
||||
batch_op.alter_column('message',
|
||||
existing_type=sa.VARCHAR(length=512),
|
||||
type_=sa.String(length=255),
|
||||
existing_nullable=False)
|
||||
batch_op.alter_column('toot_time',
|
||||
existing_type=sa.VARCHAR(length=8),
|
||||
type_=sa.String(length=5),
|
||||
existing_nullable=False)
|
||||
batch_op.alter_column('day',
|
||||
existing_type=sa.VARCHAR(length=10),
|
||||
type_=sa.String(length=9),
|
||||
existing_nullable=False)
|
||||
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('email', sa.String(length=120), nullable=False))
|
||||
batch_op.add_column(sa.Column('password', sa.String(length=200), nullable=False))
|
||||
batch_op.alter_column('id',
|
||||
existing_type=sa.VARCHAR(length=36),
|
||||
type_=sa.Integer(),
|
||||
existing_nullable=False,
|
||||
autoincrement=True)
|
||||
batch_op.alter_column('username',
|
||||
existing_type=sa.VARCHAR(length=80),
|
||||
type_=sa.String(length=100),
|
||||
existing_nullable=False)
|
||||
batch_op.create_unique_constraint(None, ['email'])
|
||||
batch_op.drop_column('password_hash')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('password_hash', sa.VARCHAR(length=128), autoincrement=False, nullable=False))
|
||||
batch_op.drop_constraint(None, type_='unique')
|
||||
batch_op.alter_column('username',
|
||||
existing_type=sa.String(length=100),
|
||||
type_=sa.VARCHAR(length=80),
|
||||
existing_nullable=False)
|
||||
batch_op.alter_column('id',
|
||||
existing_type=sa.Integer(),
|
||||
type_=sa.VARCHAR(length=36),
|
||||
existing_nullable=False,
|
||||
autoincrement=True)
|
||||
batch_op.drop_column('password')
|
||||
batch_op.drop_column('email')
|
||||
|
||||
with op.batch_alter_table('toot', schema=None) as batch_op:
|
||||
batch_op.alter_column('day',
|
||||
existing_type=sa.String(length=9),
|
||||
type_=sa.VARCHAR(length=10),
|
||||
existing_nullable=False)
|
||||
batch_op.alter_column('toot_time',
|
||||
existing_type=sa.String(length=5),
|
||||
type_=sa.VARCHAR(length=8),
|
||||
existing_nullable=False)
|
||||
batch_op.alter_column('message',
|
||||
existing_type=sa.String(length=255),
|
||||
type_=sa.VARCHAR(length=512),
|
||||
existing_nullable=False)
|
||||
|
||||
# ### end Alembic commands ###
|
Binary file not shown.
@ -1,25 +1,21 @@
|
||||
from flask_sqlalchemy import SQLAlchemy
|
||||
from flask_login import UserMixin
|
||||
from flask_login import UserMixin # Import UserMixin for Flask-Login integration
|
||||
|
||||
db = SQLAlchemy()
|
||||
|
||||
class User(db.Model, UserMixin):
|
||||
id = db.Column(db.String(36), primary_key=True)
|
||||
username = db.Column(db.String(80), unique=True, nullable=False)
|
||||
password_hash = db.Column(db.String(128), nullable=False)
|
||||
|
||||
def set_password(self, password):
|
||||
from werkzeug.security import generate_password_hash
|
||||
self.password_hash = generate_password_hash(password)
|
||||
class User(UserMixin, db.Model): # Inherit from UserMixin!
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
username = db.Column(db.String(100), unique=True, nullable=False)
|
||||
email = db.Column(db.String(120), unique=True, nullable=False)
|
||||
password_hash = db.Column(db.String(255), nullable=False)
|
||||
|
||||
def check_password(self, password):
|
||||
from werkzeug.security import check_password_hash
|
||||
return check_password_hash(self.password_hash, password)
|
||||
|
||||
|
||||
class Toot(db.Model):
|
||||
id = db.Column(db.String(36), primary_key=True)
|
||||
message = db.Column(db.String(512), nullable=False)
|
||||
toot_time = db.Column(db.String(8), nullable=False)
|
||||
day = db.Column(db.String(10), nullable=False)
|
||||
message = db.Column(db.String(255), nullable=False)
|
||||
toot_time = db.Column(db.String(5), nullable=False)
|
||||
day = db.Column(db.String(9), nullable=False)
|
||||
suspended = db.Column(db.Boolean, default=False)
|
||||
|
@ -7,7 +7,7 @@ from datetime import datetime
|
||||
# Function to record the radio show using ffmpeg
|
||||
def record_show(folder_name, duration, filename_prefix):
|
||||
# Set the working directory for the recording
|
||||
working_directory = "home/doc/Genesis Radio"
|
||||
working_directory = "/home/doc/Genesis"
|
||||
|
||||
# Ensure the folder exists in archives with the prefix as the folder name
|
||||
archives_directory = "/mnt/archives"
|
||||
|
@ -234,13 +234,13 @@
|
||||
]
|
||||
},
|
||||
"testshow": {
|
||||
"recording": false,
|
||||
"recording": true,
|
||||
"duration": 900,
|
||||
"schedule": [
|
||||
{
|
||||
"day": "Wednesday",
|
||||
"time": "08:45"
|
||||
"day": "Saturday",
|
||||
"time": "14:17"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user