- Add lib/sysbackup.sh and lib/sysrestore.sh for system-level backup and restore of WHM/cPanel config, packages, and cron jobs - Wire cmd_sysbackup and cmd_sysrestore into bin/gniza - Add --sysbackup flag to cmd_backup: runs system backup after all account backups complete - Add SYSBACKUP schedule config key so cron jobs can include --sysbackup automatically via build_cron_line() - Add "Include system backup" toggle to WHM schedule form - Revert sysbackup toggle from remotes.cgi (belongs in schedules) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
482 lines
16 KiB
Bash
482 lines
16 KiB
Bash
#!/usr/bin/env bash
|
|
# gniza/lib/sysrestore.sh — System-level WHM restore: phased restore of configs, services, DNS
|
|
|
|
[[ -n "${_GNIZA_SYSRESTORE_LOADED:-}" ]] && return 0
|
|
_GNIZA_SYSRESTORE_LOADED=1
|
|
|
|
# ── Download ─────────────────────────────────────────────────
|
|
|
|
download_system_snapshot() {
|
|
local ts="$1"
|
|
local local_dir="$2"
|
|
|
|
mkdir -p "$local_dir" || {
|
|
log_error "Failed to create staging directory: $local_dir"
|
|
return 1
|
|
}
|
|
|
|
if _is_rclone_mode; then
|
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
|
log_info "Downloading system snapshot $ts (rclone)..."
|
|
rclone_from_remote "${snap_sub}/${ts}" "$local_dir"
|
|
return
|
|
fi
|
|
|
|
local snap_dir; snap_dir=$(_system_snap_base)
|
|
local remote_path="$snap_dir/$ts/"
|
|
log_info "Downloading system snapshot $ts (rsync)..."
|
|
_rsync_download "$remote_path" "$local_dir/"
|
|
}
|
|
|
|
# ── Restore Helpers ──────────────────────────────────────────
|
|
|
|
_restore_file() {
|
|
local stage_dir="$1"
|
|
local rel_path="$2"
|
|
local src="$stage_dir/files/$rel_path"
|
|
|
|
if [[ ! -e "$src" ]]; then
|
|
log_debug "Not in backup (skipping): /$rel_path"
|
|
return 0
|
|
fi
|
|
|
|
# Ensure parent directory exists
|
|
mkdir -p "$(dirname "/$rel_path")"
|
|
cp -a "$src" "/$rel_path" || {
|
|
log_error "Failed to restore: /$rel_path"
|
|
return 1
|
|
}
|
|
log_info "Restored: /$rel_path"
|
|
}
|
|
|
|
_restore_dir() {
|
|
local stage_dir="$1"
|
|
local rel_path="$2"
|
|
local src="$stage_dir/files/$rel_path"
|
|
|
|
if [[ ! -d "$src" ]]; then
|
|
log_debug "Not in backup (skipping): /$rel_path"
|
|
return 0
|
|
fi
|
|
|
|
mkdir -p "/$rel_path"
|
|
cp -a "$src/." "/$rel_path/" || {
|
|
log_error "Failed to restore directory: /$rel_path"
|
|
return 1
|
|
}
|
|
log_info "Restored directory: /$rel_path"
|
|
}
|
|
|
|
_restart_service() {
|
|
local service="$1"
|
|
local script="/scripts/restartsrv_${service}"
|
|
|
|
if [[ -x "$script" ]]; then
|
|
log_info "Restarting $service via $script..."
|
|
if "$script" >/dev/null 2>&1; then
|
|
log_info "$service restarted successfully"
|
|
else
|
|
log_warn "Failed to restart $service via $script"
|
|
return 1
|
|
fi
|
|
elif command -v systemctl &>/dev/null; then
|
|
log_info "Restarting $service via systemctl..."
|
|
if systemctl restart "$service" 2>/dev/null; then
|
|
log_info "$service restarted successfully"
|
|
else
|
|
log_warn "Failed to restart $service via systemctl"
|
|
return 1
|
|
fi
|
|
else
|
|
log_warn "No restart method available for $service"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# ── API Apply Helpers ────────────────────────────────────────
|
|
|
|
_apply_tweaksettings() {
|
|
local stage_dir="$1"
|
|
local json_file="$stage_dir/api/tweaksettings.json"
|
|
|
|
if [[ ! -f "$json_file" ]]; then
|
|
log_warn "No tweaksettings.json found, skipping"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Applying tweak settings via whmapi1..."
|
|
local count=0
|
|
local failed=0
|
|
|
|
# Extract key=value pairs from the JSON
|
|
local settings; settings=$(python3 -c "
|
|
import sys, json
|
|
data = json.load(sys.stdin)
|
|
tw = data.get('data', {}).get('tweaksetting', {})
|
|
if isinstance(tw, dict):
|
|
for k, v in tw.items():
|
|
print(f'{k}={v}')
|
|
elif isinstance(tw, list):
|
|
for item in tw:
|
|
if isinstance(item, dict):
|
|
for k, v in item.items():
|
|
print(f'{k}={v}')
|
|
" < "$json_file" 2>/dev/null) || true
|
|
|
|
if [[ -z "$settings" ]]; then
|
|
log_warn "No tweak settings parsed from export"
|
|
return 0
|
|
fi
|
|
|
|
while IFS='=' read -r key value; do
|
|
[[ -z "$key" ]] && continue
|
|
if whmapi1 set_tweaksetting key="$key" value="$value" >/dev/null 2>&1; then
|
|
((count++)) || true
|
|
else
|
|
((failed++)) || true
|
|
log_debug "Failed to set tweak setting: $key"
|
|
fi
|
|
done <<< "$settings"
|
|
|
|
log_info "Applied $count tweak setting(s) ($failed failed)"
|
|
}
|
|
|
|
_apply_packages() {
|
|
local stage_dir="$1"
|
|
local json_file="$stage_dir/api/packages.json"
|
|
|
|
if [[ ! -f "$json_file" ]]; then
|
|
log_warn "No packages.json found, skipping"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Recreating hosting packages via whmapi1..."
|
|
local count=0
|
|
local failed=0
|
|
|
|
# Extract package names and their attributes
|
|
local packages; packages=$(python3 -c "
|
|
import sys, json
|
|
data = json.load(sys.stdin)
|
|
pkgs = data.get('data', {}).get('pkg', [])
|
|
for p in pkgs:
|
|
name = p.get('name', '')
|
|
if not name:
|
|
continue
|
|
args = []
|
|
for k, v in p.items():
|
|
if k == 'name':
|
|
continue
|
|
args.append(f'{k}={v}')
|
|
print(name + '|' + ' '.join(args))
|
|
" < "$json_file" 2>/dev/null) || true
|
|
|
|
if [[ -z "$packages" ]]; then
|
|
log_warn "No packages parsed from export"
|
|
return 0
|
|
fi
|
|
|
|
while IFS='|' read -r pkg_name pkg_args; do
|
|
[[ -z "$pkg_name" ]] && continue
|
|
# shellcheck disable=SC2086
|
|
if whmapi1 addpkg name="$pkg_name" $pkg_args >/dev/null 2>&1; then
|
|
((count++)) || true
|
|
log_debug "Created package: $pkg_name"
|
|
else
|
|
((failed++)) || true
|
|
log_debug "Failed to create package: $pkg_name (may already exist)"
|
|
fi
|
|
done <<< "$packages"
|
|
|
|
log_info "Created $count package(s) ($failed failed/existing)"
|
|
}
|
|
|
|
_apply_dns_zones() {
|
|
local stage_dir="$1"
|
|
local zones_dir="$stage_dir/api/zones"
|
|
|
|
if [[ ! -d "$zones_dir" ]]; then
|
|
log_warn "No zones directory found, skipping DNS restore"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Restoring DNS zones via whmapi1..."
|
|
local count=0
|
|
local failed=0
|
|
|
|
for zone_file in "$zones_dir"/*.zone; do
|
|
[[ -f "$zone_file" ]] || continue
|
|
local domain; domain=$(basename "$zone_file" .zone)
|
|
[[ -z "$domain" ]] && continue
|
|
|
|
# First ensure the zone exists
|
|
if ! whmapi1 adddns domain="$domain" >/dev/null 2>&1; then
|
|
log_debug "Zone may already exist: $domain"
|
|
fi
|
|
|
|
# Parse zone records and add them
|
|
local records; records=$(python3 -c "
|
|
import sys, json
|
|
data = json.load(sys.stdin)
|
|
zone_data = data.get('data', {}).get('zone', [])
|
|
if isinstance(zone_data, list):
|
|
for record in zone_data:
|
|
rtype = record.get('type', '')
|
|
name = record.get('name', '')
|
|
if rtype in ('A', 'AAAA', 'CNAME', 'MX', 'TXT', 'SRV', 'CAA'):
|
|
address = record.get('address', record.get('cname', record.get('txtdata', '')))
|
|
ttl = record.get('ttl', '14400')
|
|
line = record.get('Line', '')
|
|
print(f'{rtype}|{name}|{address}|{ttl}')
|
|
" < "$zone_file" 2>/dev/null) || true
|
|
|
|
if [[ -z "$records" ]]; then
|
|
log_debug "No parseable records for zone: $domain"
|
|
continue
|
|
fi
|
|
|
|
local zone_count=0
|
|
while IFS='|' read -r rtype rname address ttl; do
|
|
[[ -z "$rtype" || -z "$rname" ]] && continue
|
|
if whmapi1 addzonerecord domain="$domain" name="$rname" type="$rtype" address="$address" ttl="$ttl" >/dev/null 2>&1; then
|
|
((zone_count++)) || true
|
|
fi
|
|
done <<< "$records"
|
|
|
|
((count++)) || true
|
|
log_debug "Restored zone $domain ($zone_count records)"
|
|
done
|
|
|
|
log_info "Restored $count DNS zone(s) ($failed failed)"
|
|
}
|
|
|
|
# ── Phase Functions ──────────────────────────────────────────
|
|
|
|
_restore_phase1_foundation() {
|
|
local stage_dir="$1"
|
|
local dry_run="${2:-false}"
|
|
local errors=0
|
|
|
|
log_info "=== Phase 1: Foundation ==="
|
|
|
|
if [[ "$dry_run" == "true" ]]; then
|
|
log_info "[DRY RUN] Would restore: /etc/wwwacct.conf"
|
|
log_info "[DRY RUN] Would restore: /var/cpanel/cpanel.config"
|
|
log_info "[DRY RUN] Would restore: /var/cpanel/packages/"
|
|
log_info "[DRY RUN] Would restore: /var/cpanel/features/"
|
|
log_info "[DRY RUN] Would restore: /etc/cpanel/"
|
|
log_info "[DRY RUN] Would apply tweak settings via whmapi1"
|
|
log_info "[DRY RUN] Would recreate packages via whmapi1"
|
|
return 0
|
|
fi
|
|
|
|
_restore_file "$stage_dir" "etc/wwwacct.conf" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "var/cpanel/cpanel.config" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "var/cpanel/packages" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "var/cpanel/features" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/cpanel" || ((errors++)) || true
|
|
|
|
_apply_tweaksettings "$stage_dir"
|
|
_apply_packages "$stage_dir"
|
|
|
|
if (( errors > 0 )); then
|
|
log_warn "Phase 1 completed with $errors error(s)"
|
|
return 1
|
|
fi
|
|
log_info "Phase 1 completed successfully"
|
|
}
|
|
|
|
_restore_phase2_services() {
|
|
local stage_dir="$1"
|
|
local dry_run="${2:-false}"
|
|
local errors=0
|
|
|
|
log_info "=== Phase 2: Services ==="
|
|
|
|
if [[ "$dry_run" == "true" ]]; then
|
|
log_info "[DRY RUN] Would restore Exim config files + restart exim"
|
|
log_info "[DRY RUN] Would restore Apache config files + rebuildhttpdconf + restart httpd"
|
|
log_info "[DRY RUN] Would restore PHP/EasyApache config + restart httpd"
|
|
log_info "[DRY RUN] Would restore MySQL config + restart mysql"
|
|
log_info "[DRY RUN] Would restore BIND config + zone files + restart named"
|
|
return 0
|
|
fi
|
|
|
|
# Exim
|
|
log_info "--- Restoring Exim configuration ---"
|
|
_restore_file "$stage_dir" "etc/exim.conf" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/exim.conf.local" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/exim.conf.localopts" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/localdomains" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/remotedomains" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/secondarymx" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/valiases" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/vdomainaliases" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/vfilters" || ((errors++)) || true
|
|
_restart_service "exim" || ((errors++)) || true
|
|
|
|
# Apache
|
|
log_info "--- Restoring Apache configuration ---"
|
|
_restore_dir "$stage_dir" "etc/httpd/conf" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "usr/local/apache/conf" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/httpd/conf.d" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "etc/cpanel/ea4" || ((errors++)) || true
|
|
# Rebuild Apache config and restart
|
|
if [[ -x /usr/local/cpanel/scripts/rebuildhttpdconf ]]; then
|
|
log_info "Rebuilding Apache configuration..."
|
|
/usr/local/cpanel/scripts/rebuildhttpdconf >/dev/null 2>&1 || log_warn "rebuildhttpdconf failed"
|
|
fi
|
|
_restart_service "httpd" || ((errors++)) || true
|
|
|
|
# PHP / EasyApache
|
|
log_info "--- Restoring PHP configuration ---"
|
|
_restore_dir "$stage_dir" "var/cpanel/MultiPHP" || ((errors++)) || true
|
|
# Restore ea-php configs if present
|
|
if [[ -d "$stage_dir/files/opt/cpanel" ]]; then
|
|
local ea_dirs; ea_dirs=$(ls -d "$stage_dir/files/opt/cpanel/ea-php"*/root/etc/ 2>/dev/null) || true
|
|
if [[ -n "$ea_dirs" ]]; then
|
|
while IFS= read -r ea_src; do
|
|
[[ -z "$ea_src" ]] && continue
|
|
# Get the relative path under files/
|
|
local rel="${ea_src#$stage_dir/files/}"
|
|
_restore_dir "$stage_dir" "$rel" || ((errors++)) || true
|
|
done <<< "$ea_dirs"
|
|
fi
|
|
fi
|
|
|
|
# MySQL
|
|
log_info "--- Restoring MySQL configuration ---"
|
|
_restore_file "$stage_dir" "etc/my.cnf" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "root/.my.cnf" || ((errors++)) || true
|
|
_restart_service "mysql" || ((errors++)) || true
|
|
|
|
# BIND / Named
|
|
log_info "--- Restoring BIND configuration ---"
|
|
_restore_file "$stage_dir" "etc/named.conf" || ((errors++)) || true
|
|
_restore_dir "$stage_dir" "var/named" || ((errors++)) || true
|
|
_restart_service "named" || ((errors++)) || true
|
|
|
|
if (( errors > 0 )); then
|
|
log_warn "Phase 2 completed with $errors error(s)"
|
|
return 1
|
|
fi
|
|
log_info "Phase 2 completed successfully"
|
|
}
|
|
|
|
_restore_phase3_security() {
|
|
local stage_dir="$1"
|
|
local dry_run="${2:-false}"
|
|
local errors=0
|
|
|
|
log_info "=== Phase 3: Network & Security ==="
|
|
|
|
if [[ "$dry_run" == "true" ]]; then
|
|
log_info "[DRY RUN] Would restore IP configuration"
|
|
log_info "[DRY RUN] Would restore CSF firewall config + csf -r"
|
|
log_info "[DRY RUN] Would restore /root/.ssh/"
|
|
log_info "[DRY RUN] Would restore root crontab"
|
|
log_info "[DRY RUN] Would restore /etc/gniza/"
|
|
return 0
|
|
fi
|
|
|
|
# IP configuration
|
|
log_info "--- Restoring IP configuration ---"
|
|
_restore_file "$stage_dir" "etc/ips" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/reservedips" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/reservedipreasons" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/sysconfig/network" || ((errors++)) || true
|
|
_restore_file "$stage_dir" "etc/resolv.conf" || ((errors++)) || true
|
|
|
|
# CSF firewall
|
|
log_info "--- Restoring CSF firewall ---"
|
|
_restore_dir "$stage_dir" "etc/csf" || ((errors++)) || true
|
|
if command -v csf &>/dev/null; then
|
|
log_info "Restarting CSF firewall..."
|
|
csf -r >/dev/null 2>&1 || {
|
|
log_warn "CSF restart failed"
|
|
((errors++)) || true
|
|
}
|
|
fi
|
|
|
|
# Root SSH keys
|
|
log_info "--- Restoring root SSH keys ---"
|
|
_restore_dir "$stage_dir" "root/.ssh" || ((errors++)) || true
|
|
# Fix permissions
|
|
if [[ -d /root/.ssh ]]; then
|
|
chmod 700 /root/.ssh 2>/dev/null || true
|
|
chmod 600 /root/.ssh/* 2>/dev/null || true
|
|
chmod 644 /root/.ssh/*.pub 2>/dev/null || true
|
|
fi
|
|
|
|
# Root crontab
|
|
log_info "--- Restoring root crontab ---"
|
|
_restore_file "$stage_dir" "var/spool/cron/root" || ((errors++)) || true
|
|
|
|
# gniza config
|
|
log_info "--- Restoring gniza configuration ---"
|
|
_restore_dir "$stage_dir" "etc/gniza" || ((errors++)) || true
|
|
|
|
if (( errors > 0 )); then
|
|
log_warn "Phase 3 completed with $errors error(s)"
|
|
return 1
|
|
fi
|
|
log_info "Phase 3 completed successfully"
|
|
}
|
|
|
|
_restore_phase4_dns() {
|
|
local stage_dir="$1"
|
|
local dry_run="${2:-false}"
|
|
|
|
log_info "=== Phase 4: DNS via API ==="
|
|
|
|
if [[ "$dry_run" == "true" ]]; then
|
|
log_info "[DRY RUN] Would recreate DNS zones via whmapi1 adddns/addzonerecord"
|
|
return 0
|
|
fi
|
|
|
|
_apply_dns_zones "$stage_dir"
|
|
log_info "Phase 4 completed"
|
|
}
|
|
|
|
# ── Restore Orchestrator ─────────────────────────────────────
|
|
|
|
run_system_restore() {
|
|
local stage_dir="$1"
|
|
local phases="${2:-1,2,3,4}"
|
|
local dry_run="${3:-false}"
|
|
local total_errors=0
|
|
|
|
log_info "Starting system restore (phases: $phases, dry-run: $dry_run)"
|
|
|
|
# Verify staging directory has content
|
|
if [[ ! -d "$stage_dir/files" && ! -d "$stage_dir/api" ]]; then
|
|
log_error "System snapshot staging directory is empty or invalid: $stage_dir"
|
|
return 1
|
|
fi
|
|
|
|
# Run selected phases
|
|
if [[ "$phases" == *1* ]]; then
|
|
_restore_phase1_foundation "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
|
fi
|
|
|
|
if [[ "$phases" == *2* ]]; then
|
|
_restore_phase2_services "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
|
fi
|
|
|
|
if [[ "$phases" == *3* ]]; then
|
|
_restore_phase3_security "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
|
fi
|
|
|
|
if [[ "$phases" == *4* ]]; then
|
|
_restore_phase4_dns "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
|
fi
|
|
|
|
if (( total_errors > 0 )); then
|
|
log_warn "System restore completed with $total_errors phase error(s)"
|
|
return 1
|
|
fi
|
|
|
|
log_info "System restore completed successfully"
|
|
return 0
|
|
}
|