Auto-commit from giteapush.sh at 2025-05-18 09:55:34
This commit is contained in:
parent
62f54aac76
commit
2937929338
59
miscellaneous/bash/backup.sh
Executable file
59
miscellaneous/bash/backup.sh
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SRC_DIR="/home/mastodon/live"
|
||||||
|
DEST_DIR="/home/mastodon/backup"
|
||||||
|
PG_DB_NAME="mastodon_production"
|
||||||
|
PG_USER="mastodon"
|
||||||
|
PG_HOST="" # Leave empty for local socket connection
|
||||||
|
PG_PORT="5432"
|
||||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||||
|
BACKUP_DIR="${DEST_DIR}/mastodon_backup_${TIMESTAMP}"
|
||||||
|
LOG_FILE="${DEST_DIR}/backup_${TIMESTAMP}.log"
|
||||||
|
|
||||||
|
# Ensure the destination directory exists
|
||||||
|
mkdir -p "$BACKUP_DIR" || { echo "Failed to create backup directory"; exit 1; }
|
||||||
|
|
||||||
|
# Backup Mastodon files
|
||||||
|
echo "Starting rsync backup of Mastodon files..."
|
||||||
|
rsync -av --delete "$SRC_DIR" "$BACKUP_DIR/mastodon_files" >> "$LOG_FILE" 2>&1 || { echo "rsync failed"; exit 1; }
|
||||||
|
|
||||||
|
# Backup Nginx configuration files
|
||||||
|
echo "Starting backup of Nginx configuration files..."
|
||||||
|
rsync -av /etc/nginx "$BACKUP_DIR/nginx_configs" >> "$LOG_FILE" 2>&1 || { echo "rsync failed to backup Nginx configs"; exit 1; }
|
||||||
|
|
||||||
|
# Backup PostgreSQL database
|
||||||
|
echo "Starting PostgreSQL database backup..."
|
||||||
|
pg_dump -U "$PG_USER" -d "$PG_DB_NAME" > "$BACKUP_DIR/mastodon_db_${TIMESTAMP}.sql" >> "$LOG_FILE" 2>&1 || { echo "pg_dump failed"; exit 1; }
|
||||||
|
|
||||||
|
# Compress the backup
|
||||||
|
echo "Compressing backup..."
|
||||||
|
tar -czf "${BACKUP_DIR}.tar.gz" -C "$DEST_DIR" "mastodon_backup_${TIMESTAMP}" >> "$LOG_FILE" 2>&1 || { echo "Compression failed"; exit 1; }
|
||||||
|
|
||||||
|
# Remove the uncompressed backup directory
|
||||||
|
echo "Removing uncompressed backup directory..."
|
||||||
|
ls -l "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 # Debugging output
|
||||||
|
rm -rf "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 || { echo "Failed to remove uncompressed backup directory"; exit 1; }
|
||||||
|
|
||||||
|
# Transfer backup to remote server
|
||||||
|
REMOTE_USER="root"
|
||||||
|
REMOTE_HOST="209.209.9.128"
|
||||||
|
REMOTE_DIR="/mnt/e"
|
||||||
|
|
||||||
|
echo "Transferring backup to remote server..." >> "$LOG_FILE" 2>&1
|
||||||
|
rsync -av "${DEST_DIR}/mastodon_backup_${TIMESTAMP}.tar.gz" "${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DIR}" >> "$LOG_FILE" 2>&1 || { echo "Remote rsync failed"; exit 1; }
|
||||||
|
|
||||||
|
# Remove local compressed backup file
|
||||||
|
echo "Removing local compressed backup file..." >> "$LOG_FILE" 2>&1
|
||||||
|
rm "${DEST_DIR}/mastodon_backup_${TIMESTAMP}.tar.gz" >> "$LOG_FILE" 2>&1 || { echo "Failed to remove local backup file"; exit 1; }
|
||||||
|
|
||||||
|
# Move log files to /home/mastodon/logs
|
||||||
|
LOG_DEST_DIR="/home/mastodon/logs"
|
||||||
|
mkdir -p "$LOG_DEST_DIR" >> "$LOG_FILE" 2>&1 || { echo "Failed to create log destination directory"; exit 1; }
|
||||||
|
mv "$LOG_FILE" "${LOG_DEST_DIR}/backup_${TIMESTAMP}.log" >> "$LOG_FILE" 2>&1 || { echo "Failed to move log file"; exit 1; }
|
||||||
|
|
||||||
|
# Clean up backup directory
|
||||||
|
echo "Cleaning up backup directory..." >> "$LOG_FILE" 2>&1
|
||||||
|
rm -rf "${DEST_DIR}"/* >> "$LOG_FILE" 2>&1 || { echo "Failed to clean up backup directory"; exit 1; }
|
||||||
|
|
||||||
|
echo "Backup completed: ${BACKUP_DIR}.tar.gz"
|
4
miscellaneous/bash/clean_media.sh
Executable file
4
miscellaneous/bash/clean_media.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cd /home/mastodon/live
|
||||||
|
PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
|
||||||
|
RAILS_ENV=production bin/tootctl media remove --days=1 > log/media_remove.log 2>&1
|
3
miscellaneous/bash/clean_orphans.sh
Executable file
3
miscellaneous/bash/clean_orphans.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cd /home/mastodon/live
|
||||||
|
PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
|
4
miscellaneous/bash/clean_previewcards.sh
Executable file
4
miscellaneous/bash/clean_previewcards.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cd /home/mastodon/live
|
||||||
|
PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
|
||||||
|
RAILS_ENV=production bin/tootctl preview-cards remove --days=14 > log/preview-cards_remove.log 2>&1
|
32
miscellaneous/bash/copydunkadunk.sh
Executable file
32
miscellaneous/bash/copydunkadunk.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Base path where your current datasets are mounted
|
||||||
|
BASE_PATH="/assets"
|
||||||
|
|
||||||
|
# Mapping of underscore-named folders to dash-named equivalents
|
||||||
|
declare -A BUCKETS=(
|
||||||
|
["assets_azuracast"]="assets-azuracast"
|
||||||
|
["assets_archives"]="assets-archives"
|
||||||
|
["assets_genesisassets"]="assets-genesisassets"
|
||||||
|
["assets_genesislibrary"]="assets-genesislibrary"
|
||||||
|
["assets_teamtalkdata"]="assets-teamtalkdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "=== Copying underscore-named folders to dash-named MinIO bucket folders ==="
|
||||||
|
for SRC in "${!BUCKETS[@]}"; do
|
||||||
|
DEST="${BUCKETS[$SRC]}"
|
||||||
|
echo "📦 Copying $SRC to $DEST ..."
|
||||||
|
rsync -a --info=progress2 "$BASE_PATH/$SRC/" "$BASE_PATH/$DEST/"
|
||||||
|
chown -R minio-user:minio-user "$BASE_PATH/$DEST"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ Done. You can now point MinIO at these dash-named paths:"
|
||||||
|
for DEST in "${BUCKETS[@]}"; do
|
||||||
|
echo " /assets/$DEST"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "🔄 Then restart MinIO:"
|
||||||
|
echo " systemctl daemon-reload && systemctl restart minio"
|
2
miscellaneous/bash/deldirectories.sh
Executable file
2
miscellaneous/bash/deldirectories.sh
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
find /home/mastodon/backup/mastodon_backup* -mindepth 1 -type d -exec rm -rf {} +
|
60
miscellaneous/bash/do_the_needful.sh
Executable file
60
miscellaneous/bash/do_the_needful.sh
Executable file
@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# === CONFIG ===
|
||||||
|
SWAPPINESS_LEVEL=10
|
||||||
|
LOG_CLEANUP_LIMIT_DAYS=14
|
||||||
|
APACHE_SERVICES=("apache2" "httpd")
|
||||||
|
HOST=$(hostname)
|
||||||
|
TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
# === Telegram Config ===
|
||||||
|
BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
|
||||||
|
CHAT_ID="1559582356"
|
||||||
|
|
||||||
|
echo "🔧 [$HOST] Starting health cleanup..."
|
||||||
|
|
||||||
|
# 1. Tune swappiness
|
||||||
|
echo "→ Setting vm.swappiness to $SWAPPINESS_LEVEL"
|
||||||
|
echo "vm.swappiness=$SWAPPINESS_LEVEL" | tee /etc/sysctl.d/99-swappiness.conf > /dev/null
|
||||||
|
sysctl -p /etc/sysctl.d/99-swappiness.conf > /dev/null
|
||||||
|
|
||||||
|
# 2. Disable Apache if not needed
|
||||||
|
apache_disabled=""
|
||||||
|
for svc in "${APACHE_SERVICES[@]}"; do
|
||||||
|
if systemctl list-units --type=service --all | grep -q "$svc"; then
|
||||||
|
echo "→ Apache service '$svc' detected"
|
||||||
|
if ! ss -tulpn | grep -q ":80"; then
|
||||||
|
echo " 🔕 Apache appears idle. Disabling..."
|
||||||
|
systemctl disable --now "$svc"
|
||||||
|
apache_disabled="yes"
|
||||||
|
else
|
||||||
|
echo " ⚠️ Apache is running and serving. Skipping stop."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# 3. Clean logs older than X days
|
||||||
|
echo "→ Cleaning logs older than $LOG_CLEANUP_LIMIT_DAYS days in /var/log"
|
||||||
|
find /var/log -type f -name "*.log" -mtime +$LOG_CLEANUP_LIMIT_DAYS -exec rm -f {} \;
|
||||||
|
|
||||||
|
# 4. Summary Info
|
||||||
|
MEM=$(free -h | grep Mem | awk '{print $4 " free"}')
|
||||||
|
SWAP=$(free -h | grep Swap | awk '{print $3 " used"}')
|
||||||
|
DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
|
||||||
|
LOAD=$(uptime | awk -F'load average:' '{print $2}' | xargs)
|
||||||
|
|
||||||
|
MSG="✅ [$HOST] Cleanup completed at $TIMESTAMP
|
||||||
|
Memory: $MEM
|
||||||
|
Swap: $SWAP
|
||||||
|
Disk: $DISK
|
||||||
|
Load: $LOAD"
|
||||||
|
|
||||||
|
if [ "$apache_disabled" == "yes" ]; then
|
||||||
|
MSG="$MSG
|
||||||
|
Apache was detected and disabled ✅"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 5. Send Telegram message
|
||||||
|
curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
|
||||||
|
-d chat_id="$CHAT_ID" \
|
||||||
|
-d text="$MSG"
|
44
miscellaneous/bash/dr_mirror_to_linode.sh
Normal file
44
miscellaneous/bash/dr_mirror_to_linode.sh
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# === CONFIG ===
|
||||||
|
ZFS_MOUNT="/assets"
|
||||||
|
LINODE_ALIAS="linode"
|
||||||
|
KRANG_BOT_TOKEN="your-bot-token-here"
|
||||||
|
CHAT_ID="your-chat-id-here"
|
||||||
|
MINIO_SERVICE="minio"
|
||||||
|
LOG_DIR="/home/doc/genesisdr" # <- customize this!
|
||||||
|
|
||||||
|
# === SETUP ===
|
||||||
|
mkdir -p "$LOG_DIR"
|
||||||
|
TIMESTAMP=$(date '+%Y-%m-%d_%H-%M-%S')
|
||||||
|
LOG_FILE="$LOG_DIR/mirror_$TIMESTAMP.log"
|
||||||
|
|
||||||
|
# === START LOGGING ===
|
||||||
|
exec > >(tee -a "$LOG_FILE") 2>&1
|
||||||
|
|
||||||
|
echo "🔐 Genesis DR MinIO Mirror Log — $TIMESTAMP"
|
||||||
|
echo "Log file: $LOG_FILE"
|
||||||
|
echo "Starting DR mirror from $ZFS_MOUNT to $LINODE_ALIAS"
|
||||||
|
echo "-------------------------------------------"
|
||||||
|
|
||||||
|
# === SYNC ===
|
||||||
|
mc mirror --overwrite "$ZFS_MOUNT" "$LINODE_ALIAS" --quiet
|
||||||
|
MIRROR_STATUS=$?
|
||||||
|
|
||||||
|
if [[ $MIRROR_STATUS -ne 0 ]]; then
|
||||||
|
echo "❌ Mirror failed with exit code $MIRROR_STATUS"
|
||||||
|
curl -s -X POST https://api.telegram.org/bot$KRANG_BOT_TOKEN/sendMessage \
|
||||||
|
-d chat_id="$CHAT_ID" \
|
||||||
|
-d text="❌ MinIO DR mirror to Linode FAILED. MinIO remains offline. Log: $LOG_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Mirror complete. Starting MinIO..."
|
||||||
|
systemctl start "$MINIO_SERVICE"
|
||||||
|
|
||||||
|
curl -s -X POST https://api.telegram.org/bot$KRANG_BOT_TOKEN/sendMessage \
|
||||||
|
-d chat_id="$CHAT_ID" \
|
||||||
|
-d text="✅ MinIO DR mirror to Linode completed successfully. MinIO is online. Log: $LOG_FILE"
|
||||||
|
|
||||||
|
echo "🚀 All done."
|
||||||
|
echo "-------------------------------------------"
|
38
miscellaneous/bash/fix_queue.sh
Executable file
38
miscellaneous/bash/fix_queue.sh
Executable file
@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# ===== CONFIG =====
|
||||||
|
USERNAME="$1"
|
||||||
|
RAILS_ENV=production
|
||||||
|
cd /home/mastodon/live || exit 1
|
||||||
|
|
||||||
|
if [[ -z "$USERNAME" ]]; then
|
||||||
|
echo "❌ Usage: $0 <username>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔍 Looking up account ID for @$USERNAME..."
|
||||||
|
ACCOUNT_ID=$(sudo -u mastodon -H bundle exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
puts acct&.id || 'not_found'
|
||||||
|
")
|
||||||
|
|
||||||
|
if [[ "$ACCOUNT_ID" == "not_found" ]]; then
|
||||||
|
echo "❌ Account @$USERNAME not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🗑️ Deleting Redis cache for home timeline..."
|
||||||
|
sudo -u mastodon -H redis-cli DEL feed:home:$ACCOUNT_ID
|
||||||
|
|
||||||
|
echo "🧱 Rebuilding timeline from followed accounts..."
|
||||||
|
sudo -u mastodon -H bundle exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
if acct
|
||||||
|
FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
|
||||||
|
[follower_id, acct.id]
|
||||||
|
end
|
||||||
|
puts '✅ Timeline repopulation enqueued.'
|
||||||
|
end
|
||||||
|
"
|
||||||
|
|
||||||
|
echo "✅ Done. Home timeline for @$USERNAME reset and rebuilt."
|
44
miscellaneous/bash/fix_queue2.sh
Executable file
44
miscellaneous/bash/fix_queue2.sh
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# ===== CONFIG =====
|
||||||
|
USERNAME="$1"
|
||||||
|
RAILS_ENV=production
|
||||||
|
cd /home/mastodon/live || exit 1
|
||||||
|
|
||||||
|
if [[ -z "$USERNAME" ]]; then
|
||||||
|
echo "❌ Usage: $0 <username>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set full path for bundle
|
||||||
|
BUNDLE_PATH="/home/mastodon/.rbenv/shims/bundle"
|
||||||
|
|
||||||
|
# Set RAILS_ENV for the script execution
|
||||||
|
export RAILS_ENV=production
|
||||||
|
|
||||||
|
echo "🔍 Looking up account ID for @$USERNAME..."
|
||||||
|
ACCOUNT_ID=$(sudo -u mastodon -H $BUNDLE_PATH exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
puts acct&.id || 'not_found'
|
||||||
|
")
|
||||||
|
|
||||||
|
if [[ "$ACCOUNT_ID" == "not_found" ]]; then
|
||||||
|
echo "❌ Account @$USERNAME not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🗑️ Deleting Redis cache for home timeline..."
|
||||||
|
sudo -u mastodon -H redis-cli DEL feed:home:$ACCOUNT_ID
|
||||||
|
|
||||||
|
echo "🧱 Rebuilding timeline from followed accounts..."
|
||||||
|
sudo -u mastodon -H $BUNDLE_PATH exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
if acct
|
||||||
|
FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
|
||||||
|
[follower_id, acct.id]
|
||||||
|
end
|
||||||
|
puts '✅ Timeline repopulation enqueued.'
|
||||||
|
end
|
||||||
|
"
|
||||||
|
|
||||||
|
echo "✅ Done. Home timeline for @$USERNAME reset and rebuilt."
|
41
miscellaneous/bash/fix_queue3.sh
Executable file
41
miscellaneous/bash/fix_queue3.sh
Executable file
@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# ===== CONFIG =====
|
||||||
|
USERNAME="$1"
|
||||||
|
RAILS_ENV=production
|
||||||
|
cd /home/mastodon/live || exit 1
|
||||||
|
|
||||||
|
if [[ -z "$USERNAME" ]]; then
|
||||||
|
echo "❌ Usage: $0 <username>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set full path for bundle
|
||||||
|
BUNDLE_PATH="/home/mastodon/.rbenv/shims/bundle"
|
||||||
|
|
||||||
|
echo "🔍 Looking up account ID for @$USERNAME..."
|
||||||
|
ACCOUNT_ID=$(sudo -u mastodon -E env RAILS_ENV=production $BUNDLE_PATH exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
puts acct&.id || 'not_found'
|
||||||
|
")
|
||||||
|
|
||||||
|
if [[ "$ACCOUNT_ID" == "not_found" ]]; then
|
||||||
|
echo "❌ Account @$USERNAME not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🗑️ Deleting Redis cache for home timeline..."
|
||||||
|
sudo -u mastodon -E env RAILS_ENV=production redis-cli DEL feed:home:$ACCOUNT_ID
|
||||||
|
|
||||||
|
echo "🧱 Rebuilding timeline from followed accounts..."
|
||||||
|
sudo -u mastodon -E env RAILS_ENV=production $BUNDLE_PATH exec rails runner "
|
||||||
|
acct = Account.find_by(username: '$USERNAME')
|
||||||
|
if acct
|
||||||
|
FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
|
||||||
|
[follower_id, acct.id]
|
||||||
|
end
|
||||||
|
puts '✅ Timeline repopulation enqueued.'
|
||||||
|
end
|
||||||
|
"
|
||||||
|
|
||||||
|
echo "✅ Done. Home timeline for @$USERNAME reset and rebuilt."
|
25
miscellaneous/bash/genesis_sync_progress.sh
Executable file
25
miscellaneous/bash/genesis_sync_progress.sh
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# GenesisSync Progress Tracker - No hangs, no nonsense
|
||||||
|
|
||||||
|
SOURCE="/mnt/raid5/minio-data/linodeassets"
|
||||||
|
DEST="/assets/minio-data/mastodon"
|
||||||
|
LOG="/root/genesis_sync_progress.log"
|
||||||
|
INTERVAL=300 # in seconds
|
||||||
|
|
||||||
|
mkdir -p $(dirname "$LOG")
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
SRC_COUNT=$(rclone size "$SOURCE" --json | jq .objects)
|
||||||
|
DST_COUNT=$(rclone size "$DEST" --json | jq .objects)
|
||||||
|
|
||||||
|
if [[ -z "$SRC_COUNT" || -z "$DST_COUNT" ]]; then
|
||||||
|
echo "[$TIMESTAMP] Error getting file counts. Retrying in $INTERVAL seconds..." | tee -a "$LOG"
|
||||||
|
else
|
||||||
|
PERCENT=$(( DST_COUNT * 100 / SRC_COUNT ))
|
||||||
|
echo "[$TIMESTAMP] Synced: $DST_COUNT / $SRC_COUNT ($PERCENT%)" | tee -a "$LOG"
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep $INTERVAL
|
||||||
|
done
|
72
miscellaneous/bash/migrationtoblock.sh
Executable file
72
miscellaneous/bash/migrationtoblock.sh
Executable file
@ -0,0 +1,72 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# === CONFIG ===
|
||||||
|
SRC="/mnt/raid5/minio-data/linodeassets"
|
||||||
|
DST="/mnt/mastodon-assets"
|
||||||
|
MOUNTPOINT="/home/mastodon/live/public/system"
|
||||||
|
LOGFILE="/var/log/mastodon_asset_migration_$(date +%Y%m%d_%H%M%S).log"
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo "[$(date '+%F %T')] $*" | tee -a "$LOGFILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_sync() {
|
||||||
|
local src_count=$(find "$SRC" -type f | wc -l)
|
||||||
|
local dst_count=$(find "$DST" -type f | wc -l)
|
||||||
|
local src_bytes=$(du -sb "$SRC" | awk '{print $1}')
|
||||||
|
local dst_bytes=$(du -sb "$DST" | awk '{print $1}')
|
||||||
|
|
||||||
|
echo "--- Verification Results ---" | tee -a "$LOGFILE"
|
||||||
|
echo "Files: $src_count → $dst_count" | tee -a "$LOGFILE"
|
||||||
|
echo "Bytes: $src_bytes → $dst_bytes" | tee -a "$LOGFILE"
|
||||||
|
|
||||||
|
if [[ "$src_count" -ne "$dst_count" || "$src_bytes" -ne "$dst_bytes" ]]; then
|
||||||
|
echo "❌ MISMATCH detected. Please review the rsync log." | tee -a "$LOGFILE"
|
||||||
|
else
|
||||||
|
echo "✅ Verified: source and destination match." | tee -a "$LOGFILE"
|
||||||
|
fi
|
||||||
|
echo "---------------------------" | tee -a "$LOGFILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# === PHASE 1: Live Sync ===
|
||||||
|
log "🚀 Starting Phase 1: Live rsync"
|
||||||
|
rsync -aAXv --progress "$SRC/" "$DST/" | tee -a "$LOGFILE"
|
||||||
|
|
||||||
|
# === Stop Mastodon ===
|
||||||
|
log "🛑 Stopping Mastodon services..."
|
||||||
|
systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming || {
|
||||||
|
log "❌ Failed to stop Mastodon services"; exit 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# === PHASE 2: Final Sync ===
|
||||||
|
log "🔁 Starting Phase 2: Final rsync with --delete"
|
||||||
|
rsync -aAXv --delete "$SRC/" "$DST/" | tee -a "$LOGFILE"
|
||||||
|
|
||||||
|
# === Bind Mount Cutover ===
|
||||||
|
log "🔗 Swapping in block storage as $MOUNTPOINT"
|
||||||
|
if [[ -d "$MOUNTPOINT" ]]; then
|
||||||
|
mv "$MOUNTPOINT" "${MOUNTPOINT}.bak" || {
|
||||||
|
log "❌ Could not move existing mountpoint"; exit 1;
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$MOUNTPOINT"
|
||||||
|
mount --bind "$DST" "$MOUNTPOINT"
|
||||||
|
grep -q "$MOUNTPOINT" /etc/fstab || echo "$DST $MOUNTPOINT none bind 0 0" >> /etc/fstab
|
||||||
|
log "[✓] Bind mount active and persisted"
|
||||||
|
|
||||||
|
# === Permissions ===
|
||||||
|
log "🔧 Fixing permissions on $DST"
|
||||||
|
chown -R mastodon:mastodon "$DST"
|
||||||
|
|
||||||
|
# === Restart Mastodon ===
|
||||||
|
log "🚀 Restarting Mastodon services..."
|
||||||
|
systemctl start mastodon-web mastodon-sidekiq mastodon-streaming || {
|
||||||
|
log "❌ Failed to restart Mastodon services"; exit 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# === VERIFY ===
|
||||||
|
log "🧪 Verifying file count and byte totals"
|
||||||
|
verify_sync
|
||||||
|
|
||||||
|
log "🎉 Migration completed successfully. Mastodon is live on block storage."
|
74
miscellaneous/bash/p1.sh
Executable file
74
miscellaneous/bash/p1.sh
Executable file
@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SRC_DIR="/home/mastodon/live"
|
||||||
|
DEST_DIR="/home/mastodon/backup"
|
||||||
|
PG_DB_NAME="mastodon_production"
|
||||||
|
PG_USER="mastodon"
|
||||||
|
PG_HOST="38.102.127.174" # Use database IP
|
||||||
|
PG_PORT="5432"
|
||||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||||
|
BACKUP_DIR="${DEST_DIR}/mastodon_backup" # Removed the timestamp here for simplicity
|
||||||
|
LOG_FILE="$(pwd)/migration_checklist_${TIMESTAMP}.log" # Create log file in the same directory
|
||||||
|
REMOTE_USER="root"
|
||||||
|
REMOTE_HOST="38.102.127.167" # New server IP
|
||||||
|
REMOTE_DIR="/home/mastodon"
|
||||||
|
|
||||||
|
# Initialize the log file
|
||||||
|
echo "Migration checklist for real run on $(date)" > $LOG_FILE
|
||||||
|
echo "========================================" >> $LOG_FILE
|
||||||
|
|
||||||
|
# Step 1: Ensure necessary directories exist on the new server
|
||||||
|
echo "Checking if 'mastodon' user exists..." >> $LOG_FILE
|
||||||
|
id -u mastodon &>/dev/null || useradd -m mastodon
|
||||||
|
|
||||||
|
echo "Ensuring backup and log directories exist..." >> $LOG_FILE
|
||||||
|
mkdir -p /home/mastodon/mastodon_backup
|
||||||
|
mkdir -p /home/mastodon/logs
|
||||||
|
|
||||||
|
echo "Ensuring mastodon directory exists on remote server..." >> $LOG_FILE
|
||||||
|
mkdir -p "$DEST_DIR/mastodon_backup"
|
||||||
|
|
||||||
|
# Step 2: Check if the database is reachable
|
||||||
|
echo "Checking if the database is reachable..." >> $LOG_FILE
|
||||||
|
psql -U $PG_USER -h $PG_HOST -d $PG_DB_NAME -c 'SELECT 1;' || { echo "Database connection failed" >> $LOG_FILE; exit 1; }
|
||||||
|
|
||||||
|
# Step 3: Check if S3 storage is reachable
|
||||||
|
echo "Checking if S3 storage is reachable..." >> $LOG_FILE
|
||||||
|
curl --silent --head --fail 'https://chatwithus-live.us-east-1.linodeobjects.com' || echo 'S3 storage is not reachable' >> $LOG_FILE
|
||||||
|
|
||||||
|
# Step 4: Transfer files and directories
|
||||||
|
echo "Starting backup transfer..." >> $LOG_FILE
|
||||||
|
|
||||||
|
# Ensure the destination directory exists
|
||||||
|
mkdir -p $BACKUP_DIR
|
||||||
|
|
||||||
|
# Transfer Mastodon files from old server
|
||||||
|
rsync -avz --delete $SRC_DIR $BACKUP_DIR/mastodon_files # The '-z' flag compresses the data during transfer
|
||||||
|
|
||||||
|
# Transfer Nginx config
|
||||||
|
rsync -avz /etc/nginx $BACKUP_DIR/nginx_configs # Added compression for Nginx config transfer
|
||||||
|
|
||||||
|
# Backup PostgreSQL database
|
||||||
|
echo "Backing up PostgreSQL database..." >> $LOG_FILE
|
||||||
|
pg_dump -U $PG_USER -d $PG_DB_NAME > "$DEST_DIR/mastodon_db.sql"
|
||||||
|
|
||||||
|
# Ensure the backup directory is created (to be safe)
|
||||||
|
mkdir -p "$DEST_DIR/mastodon_backup"
|
||||||
|
|
||||||
|
# Compress the backup directory with tar (to reduce size)
|
||||||
|
echo "Creating backup archive..." >> $LOG_FILE
|
||||||
|
tar -czf "$DEST_DIR/mastodon_backup.tar.gz" -C "$DEST_DIR" mastodon_backup # Compress the backup directory
|
||||||
|
|
||||||
|
# Step 5: Transfer backup to new server
|
||||||
|
echo "Transferring backup to new server..." >> $LOG_FILE
|
||||||
|
rsync -avz ${DEST_DIR}/mastodon_backup.tar.gz ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DIR} # Using compression during transfer
|
||||||
|
|
||||||
|
# Step 6: Remove local compressed backup file
|
||||||
|
rm ${DEST_DIR}/mastodon_backup.tar.gz
|
||||||
|
|
||||||
|
# Step 7: Move log files to /home/mastodon/logs
|
||||||
|
mv $LOG_FILE /home/mastodon/logs/backup_${TIMESTAMP}.log
|
||||||
|
|
||||||
|
# End of Part 1: Setup, checks, and transfer.
|
||||||
|
echo "Step 1-7 completed. Proceed with Part 2 to install Glitch-Soc." >> $LOG_FILE
|
74
miscellaneous/bash/p2.sh
Executable file
74
miscellaneous/bash/p2.sh
Executable file
@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Function to print dry-run actions and log them
|
||||||
|
dry_run_echo() {
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
echo "Dry run: $1"
|
||||||
|
else
|
||||||
|
eval $1
|
||||||
|
STATUS=$?
|
||||||
|
if [ $STATUS -eq 0 ]; then
|
||||||
|
echo "Success: $1"
|
||||||
|
else
|
||||||
|
echo "Failure: $1"
|
||||||
|
echo "$1 failed" >> "$LOG_FILE"
|
||||||
|
exit 1 # Optionally exit on failure
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
# Configuration
|
||||||
|
REMOTE_USER="root"
|
||||||
|
REMOTE_HOST="38.102.127.167" # New server IP
|
||||||
|
REMOTE_DIR="/home/mastodon"
|
||||||
|
PG_DB_NAME="mastodon_production"
|
||||||
|
PG_USER="mastodon"
|
||||||
|
PG_HOST="38.102.127.174"
|
||||||
|
PG_PORT="5432"
|
||||||
|
DRY_RUN=false # Set to true for dry-run, false for actual migration
|
||||||
|
LOG_FILE="$(pwd)/migration_checklist_${TIMESTAMP}.log" # Reuse the same log file
|
||||||
|
|
||||||
|
# Check if a dry run is requested
|
||||||
|
if [[ "$1" == "--dry-run" ]]; then
|
||||||
|
DRY_RUN=true
|
||||||
|
echo "Dry run mode activated."
|
||||||
|
else
|
||||||
|
echo "Running the migration for real."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 1: Install Glitch-Soc dependencies on the new server
|
||||||
|
dry_run_echo "Installing dependencies for Glitch-Soc on the new server..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'apt update && apt upgrade -y && apt install -y git curl wget vim unzip sudo build-essential libpq-dev libssl-dev libreadline-dev zlib1g-dev libyaml-dev libcurl4-openssl-dev libffi-dev libgdbm-dev nginx postgresql postgresql-contrib nodejs yarn ruby-full certbot python3-certbot-nginx'"
|
||||||
|
|
||||||
|
# Step 2: Clone Glitch-Soc and install
|
||||||
|
dry_run_echo "Cloning Glitch-Soc repository..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'git clone https://github.com/glitch-soc/glitch-soc.git /home/mastodon/live'"
|
||||||
|
|
||||||
|
dry_run_echo "Installing Mastodon dependencies on the new server..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'cd /home/mastodon/live && bundle install --deployment'"
|
||||||
|
|
||||||
|
dry_run_echo "Running Mastodon asset precompilation..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'cd /home/mastodon/live && RAILS_ENV=production bundle exec rake assets:precompile'"
|
||||||
|
|
||||||
|
dry_run_echo "Setting up Mastodon services..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'systemctl enable mastodon-web mastodon-sidekiq mastodon-streaming && systemctl start mastodon-web mastodon-sidekiq mastodon-streaming'"
|
||||||
|
|
||||||
|
# Step 3: Test if Mastodon and Nginx are running correctly
|
||||||
|
dry_run_echo "Checking if Nginx and Mastodon are running..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'curl --silent --head --fail http://localhost' || echo 'Nginx or Mastodon is not responding'"
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'ps aux | grep mastodon' || echo 'Mastodon process is not running'"
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'systemctl status nginx' || echo 'Nginx is not running'"
|
||||||
|
|
||||||
|
# Step 4: Test Database and S3 access
|
||||||
|
dry_run_echo "Verifying database and object storage access on the new server..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'psql -U mastodon -h $PG_HOST -d $PG_DB_NAME -c \"SELECT 1;\"' || echo 'Database connection failed'"
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'curl --silent --head --fail \"https://chatwithus-live.us-east-1.linodeobjects.com\"' || echo 'S3 storage is not reachable'"
|
||||||
|
|
||||||
|
# Step 5: Clean up backup directories
|
||||||
|
dry_run_echo "Cleaning up backup directory on the new server..."
|
||||||
|
dry_run_echo "ssh root@${REMOTE_HOST} 'rm -rf /home/mastodon/backup/*'"
|
||||||
|
|
||||||
|
# Step 6: Final Check
|
||||||
|
dry_run_echo "Final check: Ensure DNS is updated and pointing to new IP."
|
||||||
|
dry_run_echo "Check DNS configuration and ensure it points to $REMOTE_HOST."
|
||||||
|
|
||||||
|
echo "Migration (Part 2) completed."
|
23
miscellaneous/bash/perms.sh
Executable file
23
miscellaneous/bash/perms.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Safe and resumable chmod script with progress output
|
||||||
|
|
||||||
|
TARGET_DIR="/mnt/raid5"
|
||||||
|
LOGFILE="$HOME/chmod_resume_$(date '+%Y%m%d-%H%M').log"
|
||||||
|
INTERVAL=500
|
||||||
|
|
||||||
|
echo "🔧 Starting permission normalization on $TARGET_DIR"
|
||||||
|
echo "Logging to $LOGFILE"
|
||||||
|
echo "Started at $(date)" >> "$LOGFILE"
|
||||||
|
|
||||||
|
i=0
|
||||||
|
find "$TARGET_DIR" -type d -not -perm -005 | while read -r dir; do
|
||||||
|
chmod o+X "$dir"
|
||||||
|
echo "✔️ $dir" >> "$LOGFILE"
|
||||||
|
((i++))
|
||||||
|
if ((i % INTERVAL == 0)); then
|
||||||
|
echo "⏳ Processed $i directories so far..."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "✅ Completed at $(date)" >> "$LOGFILE"
|
||||||
|
echo "✅ chmod finished. Total: $i directories."
|
68
miscellaneous/bash/restore.sh
Executable file
68
miscellaneous/bash/restore.sh
Executable file
@ -0,0 +1,68 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
REMOTE_SERVER="root@offsite.doctatortot.com"
|
||||||
|
REMOTE_BACKUP_DIR="/mnt/backup1/mastodon"
|
||||||
|
LOCAL_RESTORE_DIR="/home/mastodon/restore"
|
||||||
|
MASTODON_DIR="/home/mastodon/live"
|
||||||
|
PG_DB_NAME="mastodon_production"
|
||||||
|
PG_USER="mastodon"
|
||||||
|
PG_HOST="" # Leave empty for local socket connection
|
||||||
|
PG_PORT="5432"
|
||||||
|
|
||||||
|
# Create the local restore directory if it doesn't exist
|
||||||
|
mkdir -p "$LOCAL_RESTORE_DIR" || { echo "Failed to create restore directory"; exit 1; }
|
||||||
|
|
||||||
|
# Find the latest backup file on the remote server
|
||||||
|
echo "Finding the latest backup file on the remote server..."
|
||||||
|
LATEST_BACKUP=$(ssh $REMOTE_SERVER "ls -t $REMOTE_BACKUP_DIR/mastodon_backup_*.tar.gz | head -n 1")
|
||||||
|
|
||||||
|
if [ -z "$LATEST_BACKUP" ]; then
|
||||||
|
echo "No backup files found on the remote server."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Latest backup file found: $LATEST_BACKUP"
|
||||||
|
|
||||||
|
# Transfer the latest backup file to the local server
|
||||||
|
echo "Transferring the latest backup file to the local server..."
|
||||||
|
scp "$REMOTE_SERVER:$LATEST_BACKUP" "$LOCAL_RESTORE_DIR" || { echo "Failed to transfer backup file"; exit 1; }
|
||||||
|
|
||||||
|
# Extract the backup file
|
||||||
|
BACKUP_FILE=$(basename "$LATEST_BACKUP")
|
||||||
|
echo "Extracting the backup file..."
|
||||||
|
tar -xzf "$LOCAL_RESTORE_DIR/$BACKUP_FILE" -C "$LOCAL_RESTORE_DIR" || { echo "Failed to extract backup file"; exit 1; }
|
||||||
|
|
||||||
|
# Stop Mastodon services
|
||||||
|
echo "Stopping Mastodon services..."
|
||||||
|
sudo systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming || { echo "Failed to stop Mastodon services"; exit 1; }
|
||||||
|
|
||||||
|
# Restore Mastodon files
|
||||||
|
echo "Restoring Mastodon files..."
|
||||||
|
rsync -av --delete "$LOCAL_RESTORE_DIR/mastodon_backup_*/mastodon_files/" "$MASTODON_DIR" || { echo "rsync failed"; exit 1; }
|
||||||
|
|
||||||
|
# Restore PostgreSQL database
|
||||||
|
echo "Restoring PostgreSQL database..."
|
||||||
|
PG_DUMP_FILE=$(find "$LOCAL_RESTORE_DIR" -name "mastodon_db_*.sql")
|
||||||
|
if [ -z "$PG_DUMP_FILE" ]; then
|
||||||
|
echo "Database dump file not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
psql -U "$PG_USER" -d "$PG_DB_NAME" -f "$PG_DUMP_FILE" || { echo "psql restore failed"; exit 1; }
|
||||||
|
|
||||||
|
# Run database migrations
|
||||||
|
echo "Running database migrations..."
|
||||||
|
cd "$MASTODON_DIR"
|
||||||
|
RAILS_ENV=production bundle exec rails db:migrate || { echo "Database migrations failed"; exit 1; }
|
||||||
|
|
||||||
|
# Start Mastodon services
|
||||||
|
echo "Starting Mastodon services..."
|
||||||
|
sudo systemctl start mastodon-web mastodon-sidekiq mastodon-streaming || { echo "Failed to start Mastodon services"; exit 1; }
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
echo "Cleaning up..."
|
||||||
|
rm -rf "$LOCAL_RESTORE_DIR/mastodon_backup_*" || { echo "Failed to clean up restore files"; exit 1; }
|
||||||
|
rm "$LOCAL_RESTORE_DIR/$BACKUP_FILE" || { echo "Failed to remove backup file"; exit 1; }
|
||||||
|
|
||||||
|
echo "Restore completed successfully."
|
24
miscellaneous/bash/sync.sh
Executable file
24
miscellaneous/bash/sync.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Setup alias (even if it already exists)
|
||||||
|
mc alias set minio http://localhost:9000 genesisadmin MutationXv3! || true
|
||||||
|
|
||||||
|
echo "[*] Syncing genesisassets → Q:"
|
||||||
|
mc mirror \
|
||||||
|
--overwrite \
|
||||||
|
--remove \
|
||||||
|
--exclude "/System Volume Information/**" \
|
||||||
|
--exclude "/$RECYCLE.BIN/**" \
|
||||||
|
--exclude "**/Thumbs.db" \
|
||||||
|
minio/genesisassets /mnt/spl/qdrive || echo "[!] Q: sync completed with warnings"
|
||||||
|
|
||||||
|
echo "[*] Syncing genesislibrary → R:"
|
||||||
|
mc mirror \
|
||||||
|
--overwrite \
|
||||||
|
--remove \
|
||||||
|
--exclude "/System Volume Information/**" \
|
||||||
|
--exclude "/$RECYCLE.BIN/**" \
|
||||||
|
--exclude "**/Thumbs.db" \
|
||||||
|
minio/genesislibrary /mnt/spl/rdrive || echo "[!] R: sync completed with warnings"
|
||||||
|
|
||||||
|
echo "[✓] All syncs finished"
|
23
miscellaneous/bash/tothebank.sh
Executable file
23
miscellaneous/bash/tothebank.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Mastodon Media Audit: Find orphaned .part.* dirs & estimate space wasted
|
||||||
|
# For mounted MinIO (e.g., /assets/minio-data/mastodon)
|
||||||
|
|
||||||
|
TARGET="/assets/minio-data/mastodon/"
|
||||||
|
LOG="/tmp/mastodon_zombie_audit.log"
|
||||||
|
REPORT="/tmp/mastodon_zombie_report.txt"
|
||||||
|
|
||||||
|
echo "[*] Auditing .part.* zombie files under: $TARGET" | tee "$LOG"
|
||||||
|
|
||||||
|
# Find all part.1 or similar that are actually directories
|
||||||
|
find "$TARGET" -type d -name "part.*" > "$REPORT"
|
||||||
|
|
||||||
|
TOTAL=$(cat "$REPORT" | wc -l)
|
||||||
|
SIZE=$(du -shc $(cat "$REPORT" 2>/dev/null) 2>/dev/null | tail -1 | awk '{print $1}')
|
||||||
|
|
||||||
|
echo "[✔️] Found $TOTAL suspicious .part.* directories" | tee -a "$LOG"
|
||||||
|
echo "[📦] Estimated wasted space: $SIZE" | tee -a "$LOG"
|
||||||
|
|
||||||
|
echo "Top offenders:" | tee -a "$LOG"
|
||||||
|
du -sh $(cat "$REPORT" | head -n 20) 2>/dev/null | sort -hr | tee -a "$LOG"
|
||||||
|
|
||||||
|
echo -e "\n🚨 To delete these, run:\n sudo xargs rm -rf < $REPORT" | tee -a "$LOG"
|
88
miscellaneous/bash/upgrade.sh
Executable file
88
miscellaneous/bash/upgrade.sh
Executable file
@ -0,0 +1,88 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# ---- CONFIGURATION ----
|
||||||
|
DOMAIN="your.mastodon.domain" # Replace this with your real domain
|
||||||
|
ACCOUNT_USERNAME="administration"
|
||||||
|
SCRIPT_PATH="/root/finish_upgrade.sh"
|
||||||
|
LOGFILE="/root/mastodon_upgrade_$(date +%F_%H-%M-%S).log"
|
||||||
|
exec > >(tee -a "$LOGFILE") 2>&1
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "===== Mastodon 20.04 → 22.04 Upgrade Starter ====="
|
||||||
|
|
||||||
|
read -p "❗ Have you backed up your system and database? (yes/no): " confirmed
|
||||||
|
if [[ "$confirmed" != "yes" ]]; then
|
||||||
|
echo "❌ Aborting. Please take a backup."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔧 Updating system..."
|
||||||
|
apt update && apt upgrade -y
|
||||||
|
apt install update-manager-core curl -y
|
||||||
|
|
||||||
|
echo "🛑 Stopping Mastodon..."
|
||||||
|
systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming
|
||||||
|
|
||||||
|
echo "🔁 Preparing post-reboot upgrade finalization..."
|
||||||
|
|
||||||
|
# ---- Create finish_upgrade.sh ----
|
||||||
|
cat << EOF > $SCRIPT_PATH
|
||||||
|
#!/bin/bash
|
||||||
|
LOGFILE="/root/mastodon_post_upgrade_\$(date +%F_%H-%M-%S).log"
|
||||||
|
exec > >(tee -a "\$LOGFILE") 2>&1
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "===== Post-Reboot Finalization Script ====="
|
||||||
|
|
||||||
|
echo "🔄 Restarting Mastodon services..."
|
||||||
|
systemctl daemon-reexec
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl start mastodon-web mastodon-sidekiq mastodon-streaming
|
||||||
|
|
||||||
|
echo "✅ Checking service status..."
|
||||||
|
systemctl status mastodon-web --no-pager
|
||||||
|
systemctl status mastodon-sidekiq --no-pager
|
||||||
|
systemctl status mastodon-streaming --no-pager
|
||||||
|
|
||||||
|
echo "🌐 Homepage check..."
|
||||||
|
if curl --silent --fail https://$DOMAIN >/dev/null; then
|
||||||
|
echo "✅ Homepage is reachable."
|
||||||
|
else
|
||||||
|
echo "❌ Homepage failed to load."
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "📣 Posting announcement toot..."
|
||||||
|
cd /home/mastodon/live
|
||||||
|
sudo -u mastodon -H bash -c '
|
||||||
|
RAILS_ENV=production bundle exec rails runner "
|
||||||
|
acct = Account.find_by(username: \\"$ACCOUNT_USERNAME\\")
|
||||||
|
if acct
|
||||||
|
PostStatusService.new.call(acct, text: \\"✅ Server upgrade to Ubuntu 22.04 complete. We\\'re back online!\\")
|
||||||
|
end
|
||||||
|
"'
|
||||||
|
|
||||||
|
echo "🧹 Cleaning up..."
|
||||||
|
apt autoremove -y && apt autoclean -y
|
||||||
|
|
||||||
|
echo "🚫 Removing rc.local to prevent rerun..."
|
||||||
|
rm -f /etc/rc.local
|
||||||
|
rm -f $SCRIPT_PATH
|
||||||
|
|
||||||
|
echo "✅ Post-upgrade steps complete."
|
||||||
|
EOF
|
||||||
|
|
||||||
|
chmod +x $SCRIPT_PATH
|
||||||
|
|
||||||
|
# ---- Set rc.local to run after reboot ----
|
||||||
|
cat << EOF > /etc/rc.local
|
||||||
|
#!/bin/bash
|
||||||
|
bash $SCRIPT_PATH
|
||||||
|
exit 0
|
||||||
|
EOF
|
||||||
|
|
||||||
|
chmod +x /etc/rc.local
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🚀 Starting do-release-upgrade..."
|
||||||
|
sleep 3
|
||||||
|
do-release-upgrade
|
78
miscellaneous/bash/validate_zfs.sh
Normal file
78
miscellaneous/bash/validate_zfs.sh
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# CONFIG
|
||||||
|
ZFS_BASE="/mnt/zfs_minio"
|
||||||
|
BUCKETS=(
|
||||||
|
"assets-azuracastassets"
|
||||||
|
"assets-genesisassets"
|
||||||
|
"assets-genesislibrary"
|
||||||
|
"assets-genesisarchives"
|
||||||
|
"assets-mastodon"
|
||||||
|
)
|
||||||
|
SAMPLE_COUNT=5
|
||||||
|
USER="minio-user"
|
||||||
|
GROUP="minio-user"
|
||||||
|
|
||||||
|
# COLORS
|
||||||
|
GREEN="\033[0;32m"
|
||||||
|
RED="\033[0;31m"
|
||||||
|
YELLOW="\033[1;33m"
|
||||||
|
NC="\033[0m"
|
||||||
|
|
||||||
|
echo "🔍 Validating migrated MinIO buckets..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
for bucket in "${BUCKETS[@]}"; do
|
||||||
|
OLD_PATH="${ZFS_BASE}/${bucket}"
|
||||||
|
NEW_BUCKET=$(echo "$bucket" | tr '_' '-')
|
||||||
|
NEW_PATH="${ZFS_BASE}/${NEW_BUCKET}"
|
||||||
|
|
||||||
|
echo -e "${YELLOW}=== Bucket: $bucket → $NEW_BUCKET ===${NC}"
|
||||||
|
|
||||||
|
if [[ ! -d "$OLD_PATH" || ! -d "$NEW_PATH" ]]; then
|
||||||
|
echo -e "${RED}❌ Missing directory: ${OLD_PATH} or ${NEW_PATH}${NC}"
|
||||||
|
echo
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 1. File count check
|
||||||
|
old_count=$(find "$OLD_PATH" -type f | wc -l)
|
||||||
|
new_count=$(find "$NEW_PATH" -type f | wc -l)
|
||||||
|
echo "📦 File count: $old_count (old) vs $new_count (new)"
|
||||||
|
|
||||||
|
[[ "$old_count" -eq "$new_count" ]] && \
|
||||||
|
echo -e "${GREEN}✅ File count matches${NC}" || \
|
||||||
|
echo -e "${RED}❌ File count mismatch${NC}"
|
||||||
|
|
||||||
|
# 2. Sample checksum
|
||||||
|
echo "🔐 Verifying checksums for $SAMPLE_COUNT random files..."
|
||||||
|
mismatch=0
|
||||||
|
samples=$(find "$OLD_PATH" -type f | shuf -n "$SAMPLE_COUNT" 2>/dev/null)
|
||||||
|
|
||||||
|
for file in $samples; do
|
||||||
|
rel_path="${file#$OLD_PATH/}"
|
||||||
|
old_sum=$(sha256sum "$OLD_PATH/$rel_path" | awk '{print $1}')
|
||||||
|
new_sum=$(sha256sum "$NEW_PATH/$rel_path" | awk '{print $1}')
|
||||||
|
|
||||||
|
if [[ "$old_sum" != "$new_sum" ]]; then
|
||||||
|
echo -e "${RED}❌ Mismatch: $rel_path${NC}"
|
||||||
|
((mismatch++))
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}✔ Match: $rel_path${NC}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
[[ "$mismatch" -eq 0 ]] && \
|
||||||
|
echo -e "${GREEN}✅ All sample checksums match${NC}" || \
|
||||||
|
echo -e "${RED}❌ $mismatch checksum mismatch(es) found${NC}"
|
||||||
|
|
||||||
|
# 3. Ownership check
|
||||||
|
ownership_issues=$(find "$NEW_PATH" ! -user "$USER" -o ! -group "$GROUP" | wc -l)
|
||||||
|
[[ "$ownership_issues" -eq 0 ]] && \
|
||||||
|
echo -e "${GREEN}✅ Ownership is correct${NC}" || \
|
||||||
|
echo -e "${RED}❌ $ownership_issues ownership issues in $NEW_PATH${NC}"
|
||||||
|
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
echo -e "${YELLOW}📊 Validation complete. Review any ❌ issues before going live with MinIO.${NC}"
|
39
miscellaneous/bash/verify_minio.sh
Executable file
39
miscellaneous/bash/verify_minio.sh
Executable file
@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# CONFIG
|
||||||
|
ZFS_PATH="/assets/"
|
||||||
|
MINIO_USER="minio-user"
|
||||||
|
EXPECTED_BUCKETS=(
|
||||||
|
"assets_azuracast"
|
||||||
|
"assets_archives"
|
||||||
|
"assets_genesisassets"
|
||||||
|
"assets_genesislibrary"
|
||||||
|
"assets_mastodon"
|
||||||
|
"assets_teamtalkdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "=== Verifying ZFS MinIO Layout in $ZFS_PATH ==="
|
||||||
|
|
||||||
|
for BUCKET in "${EXPECTED_BUCKETS[@]}"; do
|
||||||
|
BUCKET_PATH="$ZFS_PATH/$BUCKET"
|
||||||
|
echo "- Checking: $BUCKET_PATH"
|
||||||
|
|
||||||
|
if [ -d "$BUCKET_PATH" ]; then
|
||||||
|
echo " ✅ Exists"
|
||||||
|
OWNER=$(stat -c '%U' "$BUCKET_PATH")
|
||||||
|
if [ "$OWNER" == "$MINIO_USER" ]; then
|
||||||
|
echo " ✅ Ownership correct: $OWNER"
|
||||||
|
else
|
||||||
|
echo " ❌ Ownership incorrect: $OWNER"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " ❌ Missing bucket directory!"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "If MinIO is already running, run the following to confirm bucket visibility:"
|
||||||
|
echo " mc alias set local http://localhost:9000 genesisadmin MutationXv3!"
|
||||||
|
echo " mc ls local/"
|
50
miscellaneous/bash/zfs_bootstrap.sh
Executable file
50
miscellaneous/bash/zfs_bootstrap.sh
Executable file
@ -0,0 +1,50 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# CONFIGURATION
|
||||||
|
ORIG_MINIO_PATH="/assets"
|
||||||
|
NEW_ZFS_PATH="/zfs/disk1"
|
||||||
|
MINIO_BUCKETS=(
|
||||||
|
"assets_azuracast"
|
||||||
|
"assets_archives"
|
||||||
|
"assets_genesisassets"
|
||||||
|
"assets_genesislibrary"
|
||||||
|
"assets_mastodon"
|
||||||
|
"assets_teamtalkdata"
|
||||||
|
)
|
||||||
|
MINIO_USER="minio-user"
|
||||||
|
MINIO_SERVICE="minio"
|
||||||
|
|
||||||
|
echo "=== Step 1: Preparing new ZFS path ==="
|
||||||
|
mkdir -p "$NEW_ZFS_PATH"
|
||||||
|
|
||||||
|
for BUCKET in "${MINIO_BUCKETS[@]}"; do
|
||||||
|
CLEAN_NAME="${BUCKET/assets_/}" # Remove 'assets_' prefix
|
||||||
|
SRC="$ORIG_MINIO_PATH/$BUCKET/"
|
||||||
|
DEST="$NEW_ZFS_PATH/$CLEAN_NAME/"
|
||||||
|
|
||||||
|
echo "=== Step 2: Rsyncing $BUCKET → $CLEAN_NAME ==="
|
||||||
|
rsync -a --info=progress2 "$SRC" "$DEST"
|
||||||
|
|
||||||
|
echo "=== Step 3: Fixing ownership for: $CLEAN_NAME ==="
|
||||||
|
chown -R "$MINIO_USER:$MINIO_USER" "$DEST"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "=== Step 4: Update MinIO service (manual step) ==="
|
||||||
|
echo "Set ExecStart in minio.service to:"
|
||||||
|
echo " /usr/local/bin/minio server $NEW_ZFS_PATH --console-address \":9001\""
|
||||||
|
|
||||||
|
echo "=== Step 5: Reload and restart MinIO ==="
|
||||||
|
echo "Run:"
|
||||||
|
echo " systemctl daemon-reload"
|
||||||
|
echo " systemctl restart $MINIO_SERVICE"
|
||||||
|
|
||||||
|
echo "=== Step 6: Validate with mc ==="
|
||||||
|
echo "Run:"
|
||||||
|
echo " mc alias set local http://localhost:9000 genesisadmin MutationXv3!"
|
||||||
|
echo " mc ls local/"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ All buckets (including teamtalkdata) are now synced to the ZFS backend."
|
||||||
|
echo "To roll back, revert minio.service ExecStart and restart MinIO."
|
Loading…
x
Reference in New Issue
Block a user