Add sysbackup/sysrestore CLI commands and schedule integration
- Add lib/sysbackup.sh and lib/sysrestore.sh for system-level backup and restore of WHM/cPanel config, packages, and cron jobs - Wire cmd_sysbackup and cmd_sysrestore into bin/gniza - Add --sysbackup flag to cmd_backup: runs system backup after all account backups complete - Add SYSBACKUP schedule config key so cron jobs can include --sysbackup automatically via build_cron_line() - Add "Include system backup" toggle to WHM schedule form - Revert sysbackup toggle from remotes.cgi (belongs in schedules) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
236
bin/gniza
236
bin/gniza
@@ -30,6 +30,8 @@ source "$LIB_DIR/restore.sh"
|
|||||||
source "$LIB_DIR/remotes.sh"
|
source "$LIB_DIR/remotes.sh"
|
||||||
source "$LIB_DIR/rclone.sh"
|
source "$LIB_DIR/rclone.sh"
|
||||||
source "$LIB_DIR/schedule.sh"
|
source "$LIB_DIR/schedule.sh"
|
||||||
|
source "$LIB_DIR/sysbackup.sh"
|
||||||
|
source "$LIB_DIR/sysrestore.sh"
|
||||||
|
|
||||||
# ── Argument parsing helpers ───────────────────────────────────
|
# ── Argument parsing helpers ───────────────────────────────────
|
||||||
|
|
||||||
@@ -100,6 +102,9 @@ cmd_backup() {
|
|||||||
local dry_run=false
|
local dry_run=false
|
||||||
has_flag dry-run "$@" && dry_run=true
|
has_flag dry-run "$@" && dry_run=true
|
||||||
|
|
||||||
|
local run_sysbackup=false
|
||||||
|
has_flag sysbackup "$@" && run_sysbackup=true
|
||||||
|
|
||||||
local single_account=""
|
local single_account=""
|
||||||
single_account=$(get_opt account "$@" 2>/dev/null) || true
|
single_account=$(get_opt account "$@" 2>/dev/null) || true
|
||||||
|
|
||||||
@@ -233,6 +238,20 @@ cmd_backup() {
|
|||||||
fi
|
fi
|
||||||
echo "============================================"
|
echo "============================================"
|
||||||
|
|
||||||
|
# Run system backup if --sysbackup was requested
|
||||||
|
if [[ "$run_sysbackup" == "true" ]]; then
|
||||||
|
echo ""
|
||||||
|
log_info "=== Running system backup (--sysbackup) ==="
|
||||||
|
# Release lock so sysbackup can acquire its own
|
||||||
|
release_lock
|
||||||
|
local sysbackup_args=()
|
||||||
|
[[ -n "$remote_flag" ]] && sysbackup_args+=(--remote="$remote_flag")
|
||||||
|
[[ "$dry_run" == "true" ]] && sysbackup_args+=(--dry-run)
|
||||||
|
# Run as subprocess so its exit doesn't kill our process
|
||||||
|
/usr/local/bin/gniza sysbackup "${sysbackup_args[@]}" || log_error "System backup failed"
|
||||||
|
acquire_lock
|
||||||
|
fi
|
||||||
|
|
||||||
# Send notification
|
# Send notification
|
||||||
send_backup_report "$total" "$succeeded" "$failed" "$duration" "$failed_accounts"
|
send_backup_report "$total" "$succeeded" "$failed" "$duration" "$failed_accounts"
|
||||||
|
|
||||||
@@ -1247,6 +1266,209 @@ CONF
|
|||||||
echo "Run 'gniza backup --remote=$name --dry-run' to test."
|
echo "Run 'gniza backup --remote=$name --dry-run' to test."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ── System Backup / Restore ───────────────────────────────────
|
||||||
|
|
||||||
|
# Transfer + finalize + retention for system backup on the current remote.
|
||||||
|
# Globals REMOTE_* must already be set via load_remote().
|
||||||
|
_sysbackup_to_current_remote() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local ts="$2"
|
||||||
|
local remote_label="$CURRENT_REMOTE_NAME"
|
||||||
|
|
||||||
|
clean_partial_system_snapshots
|
||||||
|
|
||||||
|
local prev; prev=$(get_latest_system_snapshot) || prev=""
|
||||||
|
|
||||||
|
if ! transfer_system_backup "$stage_dir" "$ts" "$prev"; then
|
||||||
|
log_error "[$remote_label] System backup transfer failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! finalize_system_snapshot "$ts"; then
|
||||||
|
log_error "[$remote_label] System snapshot finalize failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
enforce_system_retention
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_sysbackup() {
|
||||||
|
require_root
|
||||||
|
local config_file; config_file=$(get_opt config "$@" 2>/dev/null) || config_file="$DEFAULT_CONFIG_FILE"
|
||||||
|
load_config "$config_file"
|
||||||
|
validate_config || die "Invalid configuration"
|
||||||
|
init_logging
|
||||||
|
|
||||||
|
local dry_run=false
|
||||||
|
has_flag dry-run "$@" && dry_run=true
|
||||||
|
|
||||||
|
local remote_flag=""
|
||||||
|
remote_flag=$(get_opt remote "$@" 2>/dev/null) || true
|
||||||
|
|
||||||
|
acquire_lock
|
||||||
|
trap 'cleanup_system_stage; release_lock' EXIT
|
||||||
|
|
||||||
|
local remotes=""
|
||||||
|
remotes=$(get_target_remotes "$remote_flag") || die "Invalid remote specification"
|
||||||
|
|
||||||
|
_save_remote_globals
|
||||||
|
|
||||||
|
# Test connectivity upfront
|
||||||
|
while IFS= read -r rname; do
|
||||||
|
[[ -z "$rname" ]] && continue
|
||||||
|
load_remote "$rname" || die "Failed to load remote: $rname"
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
test_rclone_connection || die "Cannot connect to remote '$rname' (${REMOTE_TYPE})"
|
||||||
|
else
|
||||||
|
test_ssh_connection || die "Cannot connect to remote '$rname' (${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_PORT})"
|
||||||
|
fi
|
||||||
|
done <<< "$remotes"
|
||||||
|
_restore_remote_globals
|
||||||
|
|
||||||
|
local start_time; start_time=$(date +%s)
|
||||||
|
local ts; ts=$(timestamp)
|
||||||
|
local temp_dir="${TEMP_DIR:-$DEFAULT_TEMP_DIR}"
|
||||||
|
local stage_dir="$temp_dir/system/$ts"
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
log_info "[DRY RUN] System backup preview"
|
||||||
|
log_info "[DRY RUN] Would export: packages, tweaksettings, DNS zones, IPs, PHP config"
|
||||||
|
log_info "[DRY RUN] Would stage: ${#_SYSBACKUP_PATHS[@]} system paths + ea-php configs"
|
||||||
|
while IFS= read -r rname; do
|
||||||
|
[[ -z "$rname" ]] && continue
|
||||||
|
load_remote "$rname"
|
||||||
|
log_info "[DRY RUN] [$rname] Would transfer system backup to ${REMOTE_HOST:-$REMOTE_TYPE}"
|
||||||
|
log_info "[DRY RUN] [$rname] Would finalize system snapshot: $ts"
|
||||||
|
log_info "[DRY RUN] [$rname] Would enforce retention: keep $RETENTION_COUNT"
|
||||||
|
done <<< "$remotes"
|
||||||
|
_restore_remote_globals
|
||||||
|
echo ""
|
||||||
|
echo "System backup dry run complete. No changes made."
|
||||||
|
exit "$EXIT_OK"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stage ONCE
|
||||||
|
log_info "=== System Backup ==="
|
||||||
|
if ! run_system_backup "$stage_dir"; then
|
||||||
|
log_error "System backup staging failed"
|
||||||
|
cleanup_system_stage
|
||||||
|
exit "$EXIT_FATAL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Transfer to each remote
|
||||||
|
local failed=0
|
||||||
|
local succeeded=0
|
||||||
|
while IFS= read -r rname; do
|
||||||
|
[[ -z "$rname" ]] && continue
|
||||||
|
load_remote "$rname"
|
||||||
|
log_info "--- Transferring system backup to remote '$rname' ---"
|
||||||
|
if _sysbackup_to_current_remote "$stage_dir" "$ts"; then
|
||||||
|
((succeeded++)) || true
|
||||||
|
log_info "System backup to '$rname' completed"
|
||||||
|
else
|
||||||
|
((failed++)) || true
|
||||||
|
log_error "System backup to '$rname' failed"
|
||||||
|
fi
|
||||||
|
done <<< "$remotes"
|
||||||
|
_restore_remote_globals
|
||||||
|
|
||||||
|
cleanup_system_stage
|
||||||
|
|
||||||
|
local end_time; end_time=$(date +%s)
|
||||||
|
local duration=$(( end_time - start_time ))
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "============================================"
|
||||||
|
echo "System Backup Summary"
|
||||||
|
echo "============================================"
|
||||||
|
echo "Timestamp: $ts"
|
||||||
|
echo "Duration: $(human_duration $duration)"
|
||||||
|
echo "Remotes: $(echo "$remotes" | tr '\n' ' ')"
|
||||||
|
echo "Succeeded: ${C_GREEN}${succeeded}${C_RESET}"
|
||||||
|
if (( failed > 0 )); then
|
||||||
|
echo "Failed: ${C_RED}${failed}${C_RESET}"
|
||||||
|
else
|
||||||
|
echo "Failed: 0"
|
||||||
|
fi
|
||||||
|
echo "============================================"
|
||||||
|
|
||||||
|
if (( failed > 0 && succeeded > 0 )); then
|
||||||
|
exit "$EXIT_PARTIAL"
|
||||||
|
elif (( failed > 0 )); then
|
||||||
|
exit "$EXIT_FATAL"
|
||||||
|
fi
|
||||||
|
exit "$EXIT_OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_sysrestore() {
|
||||||
|
require_root
|
||||||
|
local config_file; config_file=$(get_opt config "$@" 2>/dev/null) || config_file="$DEFAULT_CONFIG_FILE"
|
||||||
|
load_config "$config_file"
|
||||||
|
validate_config || die "Invalid configuration"
|
||||||
|
init_logging
|
||||||
|
|
||||||
|
local remote_flag=""
|
||||||
|
remote_flag=$(get_opt remote "$@" 2>/dev/null) || true
|
||||||
|
[[ -z "$remote_flag" ]] && die "Specify --remote=NAME for sysrestore."$'\n'"Available remotes: $(list_remotes | tr '\n' ' ')"
|
||||||
|
|
||||||
|
_save_remote_globals
|
||||||
|
load_remote "$remote_flag" || die "Failed to load remote: $remote_flag"
|
||||||
|
|
||||||
|
local timestamp=""
|
||||||
|
timestamp=$(get_opt timestamp "$@" 2>/dev/null) || true
|
||||||
|
|
||||||
|
local dry_run=false
|
||||||
|
has_flag dry-run "$@" && dry_run=true
|
||||||
|
|
||||||
|
local phases="1,2,3,4"
|
||||||
|
phases=$(get_opt phase "$@" 2>/dev/null) || phases="1,2,3,4"
|
||||||
|
|
||||||
|
# Test connectivity
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
test_rclone_connection || die "Cannot connect to remote '$remote_flag' (${REMOTE_TYPE})"
|
||||||
|
else
|
||||||
|
test_ssh_connection || die "Cannot connect to remote '$remote_flag' (${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_PORT})"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Resolve timestamp
|
||||||
|
local ts; ts=$(resolve_system_snapshot_timestamp "$timestamp") || die "No system snapshot found on remote '$remote_flag'"
|
||||||
|
[[ -z "$ts" ]] && die "No system snapshot found on remote '$remote_flag'"
|
||||||
|
|
||||||
|
log_info "System restore from remote '$remote_flag', snapshot: $ts"
|
||||||
|
log_info "Phases: $phases, Dry-run: $dry_run"
|
||||||
|
|
||||||
|
local temp_dir="${TEMP_DIR:-$DEFAULT_TEMP_DIR}"
|
||||||
|
local stage_dir="$temp_dir/system-restore/$ts"
|
||||||
|
|
||||||
|
# Download snapshot (needed for both dry-run and real restore)
|
||||||
|
if ! download_system_snapshot "$ts" "$stage_dir"; then
|
||||||
|
die "Failed to download system snapshot: $ts"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run restore
|
||||||
|
local rc=0
|
||||||
|
run_system_restore "$stage_dir" "$phases" "$dry_run" || rc=$?
|
||||||
|
|
||||||
|
# Cleanup staging
|
||||||
|
if [[ -d "$stage_dir" ]]; then
|
||||||
|
rm -rf "$stage_dir"
|
||||||
|
log_debug "Cleaned up restore staging: $stage_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
_restore_remote_globals
|
||||||
|
|
||||||
|
if (( rc != 0 )); then
|
||||||
|
echo ""
|
||||||
|
echo "${C_YELLOW}System restore completed with errors. Check log for details.${C_RESET}"
|
||||||
|
exit "$EXIT_PARTIAL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "${C_GREEN}System restore completed successfully.${C_RESET}"
|
||||||
|
exit "$EXIT_OK"
|
||||||
|
}
|
||||||
|
|
||||||
cmd_usage() {
|
cmd_usage() {
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
${C_BOLD}gniza v${GNIZA_VERSION}${C_RESET} — cPanel Backup, Restore & Disaster Recovery
|
${C_BOLD}gniza v${GNIZA_VERSION}${C_RESET} — cPanel Backup, Restore & Disaster Recovery
|
||||||
@@ -1255,7 +1477,7 @@ ${C_BOLD}Usage:${C_RESET}
|
|||||||
gniza <command> [options]
|
gniza <command> [options]
|
||||||
|
|
||||||
${C_BOLD}Commands:${C_RESET}
|
${C_BOLD}Commands:${C_RESET}
|
||||||
backup [--account=NAME] [--remote=NAME[,NAME2]] [--dry-run]
|
backup [--account=NAME] [--remote=NAME[,NAME2]] [--dry-run] [--sysbackup]
|
||||||
restore account <name> [--remote=NAME] [--timestamp=TS] [--force]
|
restore account <name> [--remote=NAME] [--timestamp=TS] [--force]
|
||||||
restore files <name> [--remote=NAME] [--path=subpath] [--timestamp=TS]
|
restore files <name> [--remote=NAME] [--path=subpath] [--timestamp=TS]
|
||||||
restore database <name> [<dbname>] [--remote=NAME] [--timestamp=TS]
|
restore database <name> [<dbname>] [--remote=NAME] [--timestamp=TS]
|
||||||
@@ -1269,6 +1491,8 @@ ${C_BOLD}Commands:${C_RESET}
|
|||||||
restore list-mailboxes <name> [--remote=NAME] [--timestamp=TS]
|
restore list-mailboxes <name> [--remote=NAME] [--timestamp=TS]
|
||||||
restore list-files <name> [--remote=NAME] [--timestamp=TS] [--path=subdir]
|
restore list-files <name> [--remote=NAME] [--timestamp=TS] [--path=subdir]
|
||||||
restore server [--remote=NAME] [--timestamp=TS]
|
restore server [--remote=NAME] [--timestamp=TS]
|
||||||
|
sysbackup [--remote=NAME[,NAME2]] [--dry-run] Backup system/WHM config
|
||||||
|
sysrestore --remote=NAME [--timestamp=TS] [--phase=N] [--dry-run]
|
||||||
list [--account=NAME] [--remote=NAME] List remote snapshots
|
list [--account=NAME] [--remote=NAME] List remote snapshots
|
||||||
verify [--account=NAME] [--remote=NAME] Verify backup integrity
|
verify [--account=NAME] [--remote=NAME] Verify backup integrity
|
||||||
status Show configuration and status
|
status Show configuration and status
|
||||||
@@ -1302,6 +1526,10 @@ ${C_BOLD}Examples:${C_RESET}
|
|||||||
gniza schedule install
|
gniza schedule install
|
||||||
gniza remote list
|
gniza remote list
|
||||||
gniza init remote nas
|
gniza init remote nas
|
||||||
|
gniza sysbackup --dry-run
|
||||||
|
gniza sysbackup --remote=nas
|
||||||
|
gniza sysrestore --remote=nas
|
||||||
|
gniza sysrestore --remote=nas --phase=1 --dry-run
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1315,8 +1543,10 @@ main() {
|
|||||||
shift 2>/dev/null || true
|
shift 2>/dev/null || true
|
||||||
|
|
||||||
case "$command" in
|
case "$command" in
|
||||||
backup) cmd_backup "$@" ;;
|
backup) cmd_backup "$@" ;;
|
||||||
restore) cmd_restore "$@" ;;
|
sysbackup) cmd_sysbackup "$@" ;;
|
||||||
|
sysrestore) cmd_sysrestore "$@" ;;
|
||||||
|
restore) cmd_restore "$@" ;;
|
||||||
list) cmd_list "$@" ;;
|
list) cmd_list "$@" ;;
|
||||||
verify) cmd_verify "$@" ;;
|
verify) cmd_verify "$@" ;;
|
||||||
status) cmd_status "$@" ;;
|
status) cmd_status "$@" ;;
|
||||||
|
|||||||
@@ -15,3 +15,7 @@ SCHEDULE_CRON="" # Full cron expression for SCHEDULE=custom
|
|||||||
# ── Target Remotes ────────────────────────────────────────────
|
# ── Target Remotes ────────────────────────────────────────────
|
||||||
REMOTES="" # Comma-separated remote names (e.g. "nas,offsite")
|
REMOTES="" # Comma-separated remote names (e.g. "nas,offsite")
|
||||||
# Empty = all configured remotes
|
# Empty = all configured remotes
|
||||||
|
|
||||||
|
# ── System Backup ─────────────────────────────────────────────
|
||||||
|
SYSBACKUP="" # "yes" to run system backup after account backups
|
||||||
|
# Backs up WHM/cPanel config, packages, cron jobs
|
||||||
|
|||||||
@@ -53,6 +53,7 @@ load_schedule() {
|
|||||||
SCHEDULE_DAY=""
|
SCHEDULE_DAY=""
|
||||||
SCHEDULE_CRON=""
|
SCHEDULE_CRON=""
|
||||||
SCHEDULE_REMOTES=""
|
SCHEDULE_REMOTES=""
|
||||||
|
SCHEDULE_SYSBACKUP=""
|
||||||
|
|
||||||
# shellcheck disable=SC1090
|
# shellcheck disable=SC1090
|
||||||
source "$conf" || {
|
source "$conf" || {
|
||||||
@@ -62,6 +63,7 @@ load_schedule() {
|
|||||||
|
|
||||||
# Map REMOTES to SCHEDULE_REMOTES to avoid conflicts
|
# Map REMOTES to SCHEDULE_REMOTES to avoid conflicts
|
||||||
SCHEDULE_REMOTES="${REMOTES:-}"
|
SCHEDULE_REMOTES="${REMOTES:-}"
|
||||||
|
SCHEDULE_SYSBACKUP="${SYSBACKUP:-}"
|
||||||
|
|
||||||
log_debug "Loaded schedule '$name': ${SCHEDULE} at ${SCHEDULE_TIME:-02:00}, remotes=${SCHEDULE_REMOTES:-all}"
|
log_debug "Loaded schedule '$name': ${SCHEDULE} at ${SCHEDULE_TIME:-02:00}, remotes=${SCHEDULE_REMOTES:-all}"
|
||||||
}
|
}
|
||||||
@@ -134,12 +136,15 @@ build_cron_line() {
|
|||||||
local cron_expr
|
local cron_expr
|
||||||
cron_expr=$(schedule_to_cron "$name") || return 1
|
cron_expr=$(schedule_to_cron "$name") || return 1
|
||||||
|
|
||||||
local remote_flag=""
|
local extra_flags=""
|
||||||
if [[ -n "$SCHEDULE_REMOTES" ]]; then
|
if [[ -n "$SCHEDULE_REMOTES" ]]; then
|
||||||
remote_flag=" --remote=$SCHEDULE_REMOTES"
|
extra_flags+=" --remote=$SCHEDULE_REMOTES"
|
||||||
|
fi
|
||||||
|
if [[ "${SCHEDULE_SYSBACKUP:-}" == "yes" ]]; then
|
||||||
|
extra_flags+=" --sysbackup"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$cron_expr /usr/local/bin/gniza backup${remote_flag} >> /var/log/gniza/cron-${name}.log 2>&1"
|
echo "$cron_expr /usr/local/bin/gniza backup${extra_flags} >> /var/log/gniza/cron-${name}.log 2>&1"
|
||||||
}
|
}
|
||||||
|
|
||||||
# ── Crontab Management ────────────────────────────────────────
|
# ── Crontab Management ────────────────────────────────────────
|
||||||
|
|||||||
471
lib/sysbackup.sh
Normal file
471
lib/sysbackup.sh
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# gniza/lib/sysbackup.sh — System-level WHM backup: API exports, file staging, snapshot lifecycle
|
||||||
|
|
||||||
|
[[ -n "${_GNIZA_SYSBACKUP_LOADED:-}" ]] && return 0
|
||||||
|
_GNIZA_SYSBACKUP_LOADED=1
|
||||||
|
|
||||||
|
# ── Path Helpers ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
_system_snap_base() {
|
||||||
|
local hostname; hostname=$(hostname -f)
|
||||||
|
echo "${REMOTE_BASE}/${hostname}/system/snapshots"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Rclone subpath (no REMOTE_BASE prefix — _rclone_remote_path adds it)
|
||||||
|
_system_snap_subpath() {
|
||||||
|
echo "system/snapshots"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Snapshot Lifecycle ───────────────────────────────────────
|
||||||
|
|
||||||
|
list_system_snapshots() {
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
local all_dirs; all_dirs=$(rclone_list_dirs "$snap_sub") || true
|
||||||
|
[[ -z "$all_dirs" ]] && return 0
|
||||||
|
|
||||||
|
local completed=""
|
||||||
|
while IFS= read -r dir; do
|
||||||
|
[[ -z "$dir" ]] && continue
|
||||||
|
if rclone_exists "${snap_sub}/${dir}/.complete"; then
|
||||||
|
completed+="${dir}"$'\n'
|
||||||
|
fi
|
||||||
|
done <<< "$all_dirs"
|
||||||
|
|
||||||
|
[[ -n "$completed" ]] && echo "$completed" | sort -r
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
local raw; raw=$(remote_exec "ls -1d '$snap_dir'/[0-9]* 2>/dev/null | grep -v '\\.partial$' | sort -r" 2>/dev/null) || true
|
||||||
|
if [[ -n "$raw" ]]; then
|
||||||
|
echo "$raw" | xargs -I{} basename {} | sort -r
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
get_latest_system_snapshot() {
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
local latest; latest=$(rclone_cat "${snap_sub}/latest.txt" 2>/dev/null) || true
|
||||||
|
if [[ -n "$latest" ]]; then
|
||||||
|
if rclone_exists "${snap_sub}/${latest}/.complete"; then
|
||||||
|
echo "$latest"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
list_system_snapshots | head -1
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
list_system_snapshots | head -1
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_system_snapshot_timestamp() {
|
||||||
|
local requested="$1"
|
||||||
|
|
||||||
|
if [[ -z "$requested" || "$requested" == "LATEST" || "$requested" == "latest" ]]; then
|
||||||
|
get_latest_system_snapshot
|
||||||
|
elif _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
if rclone_exists "${snap_sub}/${requested}/.complete"; then
|
||||||
|
echo "$requested"
|
||||||
|
else
|
||||||
|
log_error "System snapshot not found or incomplete: $requested"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
if remote_exec "test -d '$snap_dir/$requested'" 2>/dev/null; then
|
||||||
|
echo "$requested"
|
||||||
|
else
|
||||||
|
log_error "System snapshot not found: $requested"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_partial_system_snapshots() {
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
local all_dirs; all_dirs=$(rclone_list_dirs "$snap_sub") || true
|
||||||
|
[[ -z "$all_dirs" ]] && return 0
|
||||||
|
|
||||||
|
while IFS= read -r dir; do
|
||||||
|
[[ -z "$dir" ]] && continue
|
||||||
|
if ! rclone_exists "${snap_sub}/${dir}/.complete"; then
|
||||||
|
log_info "Purging incomplete system snapshot: $dir"
|
||||||
|
rclone_purge "${snap_sub}/${dir}" || {
|
||||||
|
log_warn "Failed to purge incomplete system snapshot: $dir"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
done <<< "$all_dirs"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
local partials; partials=$(remote_exec "ls -1d '$snap_dir'/*.partial 2>/dev/null" 2>/dev/null) || true
|
||||||
|
if [[ -n "$partials" ]]; then
|
||||||
|
log_info "Cleaning partial system snapshots..."
|
||||||
|
remote_exec "rm -rf '$snap_dir'/*.partial" || {
|
||||||
|
log_warn "Failed to clean partial system snapshots"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
finalize_system_snapshot() {
|
||||||
|
local ts="$1"
|
||||||
|
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
log_info "Finalizing system snapshot: $ts (rclone)"
|
||||||
|
rclone_rcat "${snap_sub}/${ts}/.complete" "$(date -u +%Y-%m-%dT%H:%M:%SZ)" || {
|
||||||
|
log_error "Failed to create .complete marker for system/$ts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
rclone_rcat "${snap_sub}/latest.txt" "$ts" || {
|
||||||
|
log_warn "Failed to update system latest.txt"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
log_debug "Updated system latest.txt -> $ts"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
log_info "Finalizing system snapshot: $ts"
|
||||||
|
remote_exec "mv '$snap_dir/${ts}.partial' '$snap_dir/$ts'" || {
|
||||||
|
log_error "Failed to finalize system snapshot: $ts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update latest symlink
|
||||||
|
local hostname; hostname=$(hostname -f)
|
||||||
|
local base="${REMOTE_BASE}/${hostname}/system"
|
||||||
|
remote_exec "ln -sfn '$snap_dir/$ts' '$base/latest'" || {
|
||||||
|
log_warn "Failed to update system latest symlink"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
log_debug "Updated system latest symlink -> $ts"
|
||||||
|
}
|
||||||
|
|
||||||
|
enforce_system_retention() {
|
||||||
|
local keep="${RETENTION_COUNT:-$DEFAULT_RETENTION_COUNT}"
|
||||||
|
|
||||||
|
log_debug "Enforcing system retention: keeping $keep snapshots"
|
||||||
|
|
||||||
|
local snapshots; snapshots=$(list_system_snapshots)
|
||||||
|
if [[ -z "$snapshots" ]]; then
|
||||||
|
log_debug "No system snapshots found, nothing to prune"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
local pruned=0
|
||||||
|
while IFS= read -r snap; do
|
||||||
|
((count++)) || true
|
||||||
|
if (( count > keep )); then
|
||||||
|
log_info "Pruning old system snapshot: $snap"
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
rclone_purge "${snap_sub}/${snap}" || {
|
||||||
|
log_warn "Failed to purge system snapshot: $snap"
|
||||||
|
}
|
||||||
|
else
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
remote_exec "rm -rf '$snap_dir/$snap'" || {
|
||||||
|
log_warn "Failed to prune system snapshot: $snap_dir/$snap"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
((pruned++)) || true
|
||||||
|
fi
|
||||||
|
done <<< "$snapshots"
|
||||||
|
|
||||||
|
if (( pruned > 0 )); then
|
||||||
|
log_info "Pruned $pruned old system snapshot(s)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Transfer to Remote ───────────────────────────────────────
|
||||||
|
|
||||||
|
transfer_system_backup() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local ts="$2"
|
||||||
|
local prev_snapshot="${3:-}"
|
||||||
|
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
log_info "Transferring system backup (rclone)..."
|
||||||
|
rclone_to_remote "$stage_dir" "${snap_sub}/${ts}"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
local dest="$snap_dir/${ts}.partial/"
|
||||||
|
local link_dest=""
|
||||||
|
|
||||||
|
if [[ -n "$prev_snapshot" ]]; then
|
||||||
|
link_dest="$snap_dir/$prev_snapshot"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ensure_remote_dir "$dest" || return 1
|
||||||
|
|
||||||
|
log_info "Transferring system backup..."
|
||||||
|
rsync_to_remote "$stage_dir" "$dest" "$link_dest"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── API Export Functions ─────────────────────────────────────
|
||||||
|
|
||||||
|
_export_packages() {
|
||||||
|
local api_dir="$1"
|
||||||
|
log_info "Exporting hosting packages..."
|
||||||
|
if ! whmapi1 listpkgs --output=json > "$api_dir/packages.json" 2>/dev/null; then
|
||||||
|
log_error "Failed to export packages"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
_export_tweaksettings() {
|
||||||
|
local api_dir="$1"
|
||||||
|
log_info "Exporting tweak settings..."
|
||||||
|
if ! whmapi1 get_tweaksettings --output=json > "$api_dir/tweaksettings.json" 2>/dev/null; then
|
||||||
|
log_error "Failed to export tweak settings"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
_export_dns_zones() {
|
||||||
|
local api_dir="$1"
|
||||||
|
log_info "Exporting DNS zones..."
|
||||||
|
|
||||||
|
if ! whmapi1 listzones --output=json > "$api_dir/zones.json" 2>/dev/null; then
|
||||||
|
log_error "Failed to export zone list"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$api_dir/zones"
|
||||||
|
local zones; zones=$(python3 -c "
|
||||||
|
import sys, json
|
||||||
|
data = json.load(sys.stdin)
|
||||||
|
for z in data.get('data', {}).get('zone', []):
|
||||||
|
print(z['domain'])
|
||||||
|
" < "$api_dir/zones.json" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [[ -z "$zones" ]]; then
|
||||||
|
log_warn "No DNS zones found to export"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
while IFS= read -r domain; do
|
||||||
|
[[ -z "$domain" ]] && continue
|
||||||
|
if whmapi1 dumpzone domain="$domain" --output=json > "$api_dir/zones/${domain}.zone" 2>/dev/null; then
|
||||||
|
((count++)) || true
|
||||||
|
else
|
||||||
|
log_warn "Failed to dump zone for: $domain"
|
||||||
|
fi
|
||||||
|
done <<< "$zones"
|
||||||
|
|
||||||
|
log_info "Exported $count DNS zone(s)"
|
||||||
|
}
|
||||||
|
|
||||||
|
_export_ips() {
|
||||||
|
local api_dir="$1"
|
||||||
|
log_info "Exporting IP configuration..."
|
||||||
|
if ! whmapi1 listips --output=json > "$api_dir/ips.json" 2>/dev/null; then
|
||||||
|
log_error "Failed to export IP list"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
_export_php() {
|
||||||
|
local api_dir="$1"
|
||||||
|
log_info "Exporting PHP configuration..."
|
||||||
|
|
||||||
|
# Combine multiple PHP API calls into one JSON file
|
||||||
|
local tmpfile; tmpfile=$(mktemp)
|
||||||
|
{
|
||||||
|
echo '{'
|
||||||
|
echo '"system_default":'
|
||||||
|
whmapi1 php_get_system_default_version --output=json 2>/dev/null || echo '{}'
|
||||||
|
echo ','
|
||||||
|
echo '"installed_versions":'
|
||||||
|
whmapi1 php_get_installed_versions --output=json 2>/dev/null || echo '{}'
|
||||||
|
echo ','
|
||||||
|
echo '"vhost_versions":'
|
||||||
|
whmapi1 php_get_vhost_versions --output=json 2>/dev/null || echo '{}'
|
||||||
|
echo '}'
|
||||||
|
} > "$tmpfile"
|
||||||
|
|
||||||
|
if [[ -s "$tmpfile" ]]; then
|
||||||
|
mv "$tmpfile" "$api_dir/php.json"
|
||||||
|
else
|
||||||
|
rm -f "$tmpfile"
|
||||||
|
log_error "Failed to export PHP configuration"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── File Staging ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
# Known system paths to back up
|
||||||
|
readonly _SYSBACKUP_PATHS=(
|
||||||
|
# cPanel core config
|
||||||
|
/var/cpanel/packages
|
||||||
|
/var/cpanel/features
|
||||||
|
/var/cpanel/cpanel.config
|
||||||
|
/etc/wwwacct.conf
|
||||||
|
/etc/cpanel
|
||||||
|
/var/cpanel/ssl
|
||||||
|
/var/cpanel/MultiPHP
|
||||||
|
# DNS
|
||||||
|
/var/named
|
||||||
|
/etc/named.conf
|
||||||
|
# Exim
|
||||||
|
/etc/exim.conf
|
||||||
|
/etc/exim.conf.local
|
||||||
|
/etc/exim.conf.localopts
|
||||||
|
# Mail routing
|
||||||
|
/etc/localdomains
|
||||||
|
/etc/remotedomains
|
||||||
|
/etc/secondarymx
|
||||||
|
/etc/valiases
|
||||||
|
/etc/vdomainaliases
|
||||||
|
/etc/vfilters
|
||||||
|
# Apache / EasyApache
|
||||||
|
/etc/httpd/conf
|
||||||
|
/usr/local/apache/conf
|
||||||
|
/etc/httpd/conf.d
|
||||||
|
/etc/cpanel/ea4
|
||||||
|
# MySQL
|
||||||
|
/etc/my.cnf
|
||||||
|
/root/.my.cnf
|
||||||
|
# CSF firewall
|
||||||
|
/etc/csf
|
||||||
|
# Root cron & SSH
|
||||||
|
/var/spool/cron/root
|
||||||
|
/root/.ssh
|
||||||
|
# Network
|
||||||
|
/etc/ips
|
||||||
|
/etc/reservedips
|
||||||
|
/etc/reservedipreasons
|
||||||
|
/etc/sysconfig/network
|
||||||
|
/etc/resolv.conf
|
||||||
|
# gniza's own config
|
||||||
|
/etc/gniza
|
||||||
|
)
|
||||||
|
|
||||||
|
_stage_files() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local files_dir="$stage_dir/files"
|
||||||
|
local count=0
|
||||||
|
local failed=0
|
||||||
|
|
||||||
|
for src_path in "${_SYSBACKUP_PATHS[@]}"; do
|
||||||
|
if [[ ! -e "$src_path" ]]; then
|
||||||
|
log_debug "Skipping (not found): $src_path"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mirror the source path under files/ (strip leading /)
|
||||||
|
local rel_path="${src_path#/}"
|
||||||
|
local dest="$files_dir/$rel_path"
|
||||||
|
|
||||||
|
# Create parent directory
|
||||||
|
mkdir -p "$(dirname "$dest")"
|
||||||
|
|
||||||
|
if cp -a "$src_path" "$dest" 2>/dev/null; then
|
||||||
|
((count++)) || true
|
||||||
|
log_debug "Staged: $src_path"
|
||||||
|
else
|
||||||
|
((failed++)) || true
|
||||||
|
log_warn "Failed to stage: $src_path"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Also stage ea-php configs if they exist
|
||||||
|
local ea_php_dirs; ea_php_dirs=$(ls -d /opt/cpanel/ea-php*/root/etc/ 2>/dev/null) || true
|
||||||
|
if [[ -n "$ea_php_dirs" ]]; then
|
||||||
|
while IFS= read -r ea_dir; do
|
||||||
|
[[ -z "$ea_dir" ]] && continue
|
||||||
|
local rel="${ea_dir#/}"
|
||||||
|
local dest="$files_dir/$rel"
|
||||||
|
mkdir -p "$(dirname "$dest")"
|
||||||
|
if cp -a "$ea_dir" "$dest" 2>/dev/null; then
|
||||||
|
((count++)) || true
|
||||||
|
log_debug "Staged: $ea_dir"
|
||||||
|
else
|
||||||
|
((failed++)) || true
|
||||||
|
log_warn "Failed to stage: $ea_dir"
|
||||||
|
fi
|
||||||
|
done <<< "$ea_php_dirs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Staged $count system path(s) ($failed failed)"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Backup Orchestrator ──────────────────────────────────────
|
||||||
|
|
||||||
|
run_system_backup() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local api_dir="$stage_dir/api"
|
||||||
|
local api_failed=0
|
||||||
|
local api_succeeded=0
|
||||||
|
|
||||||
|
mkdir -p "$api_dir"
|
||||||
|
mkdir -p "$stage_dir/files"
|
||||||
|
|
||||||
|
log_info "=== System Backup: API exports ==="
|
||||||
|
|
||||||
|
# Export each API category — continue on failure
|
||||||
|
if _export_packages "$api_dir"; then
|
||||||
|
((api_succeeded++)) || true
|
||||||
|
else
|
||||||
|
((api_failed++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if _export_tweaksettings "$api_dir"; then
|
||||||
|
((api_succeeded++)) || true
|
||||||
|
else
|
||||||
|
((api_failed++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if _export_dns_zones "$api_dir"; then
|
||||||
|
((api_succeeded++)) || true
|
||||||
|
else
|
||||||
|
((api_failed++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if _export_ips "$api_dir"; then
|
||||||
|
((api_succeeded++)) || true
|
||||||
|
else
|
||||||
|
((api_failed++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if _export_php "$api_dir"; then
|
||||||
|
((api_succeeded++)) || true
|
||||||
|
else
|
||||||
|
((api_failed++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "API exports: $api_succeeded succeeded, $api_failed failed"
|
||||||
|
|
||||||
|
log_info "=== System Backup: File staging ==="
|
||||||
|
_stage_files "$stage_dir"
|
||||||
|
|
||||||
|
if (( api_failed > 0 )); then
|
||||||
|
log_warn "System backup completed with $api_failed API export failure(s)"
|
||||||
|
return 0 # Don't fail the whole backup for partial API failures
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "System backup staging complete"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup_system_stage() {
|
||||||
|
local temp_dir="${TEMP_DIR:-$DEFAULT_TEMP_DIR}"
|
||||||
|
local sys_dir="$temp_dir/system"
|
||||||
|
if [[ -d "$sys_dir" ]]; then
|
||||||
|
rm -rf "$sys_dir"
|
||||||
|
log_debug "Cleaned up system staging directory"
|
||||||
|
fi
|
||||||
|
}
|
||||||
481
lib/sysrestore.sh
Normal file
481
lib/sysrestore.sh
Normal file
@@ -0,0 +1,481 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# gniza/lib/sysrestore.sh — System-level WHM restore: phased restore of configs, services, DNS
|
||||||
|
|
||||||
|
[[ -n "${_GNIZA_SYSRESTORE_LOADED:-}" ]] && return 0
|
||||||
|
_GNIZA_SYSRESTORE_LOADED=1
|
||||||
|
|
||||||
|
# ── Download ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
download_system_snapshot() {
|
||||||
|
local ts="$1"
|
||||||
|
local local_dir="$2"
|
||||||
|
|
||||||
|
mkdir -p "$local_dir" || {
|
||||||
|
log_error "Failed to create staging directory: $local_dir"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if _is_rclone_mode; then
|
||||||
|
local snap_sub; snap_sub=$(_system_snap_subpath)
|
||||||
|
log_info "Downloading system snapshot $ts (rclone)..."
|
||||||
|
rclone_from_remote "${snap_sub}/${ts}" "$local_dir"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local snap_dir; snap_dir=$(_system_snap_base)
|
||||||
|
local remote_path="$snap_dir/$ts/"
|
||||||
|
log_info "Downloading system snapshot $ts (rsync)..."
|
||||||
|
_rsync_download "$remote_path" "$local_dir/"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Restore Helpers ──────────────────────────────────────────
|
||||||
|
|
||||||
|
_restore_file() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local rel_path="$2"
|
||||||
|
local src="$stage_dir/files/$rel_path"
|
||||||
|
|
||||||
|
if [[ ! -e "$src" ]]; then
|
||||||
|
log_debug "Not in backup (skipping): /$rel_path"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure parent directory exists
|
||||||
|
mkdir -p "$(dirname "/$rel_path")"
|
||||||
|
cp -a "$src" "/$rel_path" || {
|
||||||
|
log_error "Failed to restore: /$rel_path"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
log_info "Restored: /$rel_path"
|
||||||
|
}
|
||||||
|
|
||||||
|
_restore_dir() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local rel_path="$2"
|
||||||
|
local src="$stage_dir/files/$rel_path"
|
||||||
|
|
||||||
|
if [[ ! -d "$src" ]]; then
|
||||||
|
log_debug "Not in backup (skipping): /$rel_path"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "/$rel_path"
|
||||||
|
cp -a "$src/." "/$rel_path/" || {
|
||||||
|
log_error "Failed to restore directory: /$rel_path"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
log_info "Restored directory: /$rel_path"
|
||||||
|
}
|
||||||
|
|
||||||
|
_restart_service() {
|
||||||
|
local service="$1"
|
||||||
|
local script="/scripts/restartsrv_${service}"
|
||||||
|
|
||||||
|
if [[ -x "$script" ]]; then
|
||||||
|
log_info "Restarting $service via $script..."
|
||||||
|
if "$script" >/dev/null 2>&1; then
|
||||||
|
log_info "$service restarted successfully"
|
||||||
|
else
|
||||||
|
log_warn "Failed to restart $service via $script"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
elif command -v systemctl &>/dev/null; then
|
||||||
|
log_info "Restarting $service via systemctl..."
|
||||||
|
if systemctl restart "$service" 2>/dev/null; then
|
||||||
|
log_info "$service restarted successfully"
|
||||||
|
else
|
||||||
|
log_warn "Failed to restart $service via systemctl"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_warn "No restart method available for $service"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── API Apply Helpers ────────────────────────────────────────
|
||||||
|
|
||||||
|
_apply_tweaksettings() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local json_file="$stage_dir/api/tweaksettings.json"
|
||||||
|
|
||||||
|
if [[ ! -f "$json_file" ]]; then
|
||||||
|
log_warn "No tweaksettings.json found, skipping"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Applying tweak settings via whmapi1..."
|
||||||
|
local count=0
|
||||||
|
local failed=0
|
||||||
|
|
||||||
|
# Extract key=value pairs from the JSON
|
||||||
|
local settings; settings=$(python3 -c "
|
||||||
|
import sys, json
|
||||||
|
data = json.load(sys.stdin)
|
||||||
|
tw = data.get('data', {}).get('tweaksetting', {})
|
||||||
|
if isinstance(tw, dict):
|
||||||
|
for k, v in tw.items():
|
||||||
|
print(f'{k}={v}')
|
||||||
|
elif isinstance(tw, list):
|
||||||
|
for item in tw:
|
||||||
|
if isinstance(item, dict):
|
||||||
|
for k, v in item.items():
|
||||||
|
print(f'{k}={v}')
|
||||||
|
" < "$json_file" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [[ -z "$settings" ]]; then
|
||||||
|
log_warn "No tweak settings parsed from export"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
while IFS='=' read -r key value; do
|
||||||
|
[[ -z "$key" ]] && continue
|
||||||
|
if whmapi1 set_tweaksetting key="$key" value="$value" >/dev/null 2>&1; then
|
||||||
|
((count++)) || true
|
||||||
|
else
|
||||||
|
((failed++)) || true
|
||||||
|
log_debug "Failed to set tweak setting: $key"
|
||||||
|
fi
|
||||||
|
done <<< "$settings"
|
||||||
|
|
||||||
|
log_info "Applied $count tweak setting(s) ($failed failed)"
|
||||||
|
}
|
||||||
|
|
||||||
|
_apply_packages() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local json_file="$stage_dir/api/packages.json"
|
||||||
|
|
||||||
|
if [[ ! -f "$json_file" ]]; then
|
||||||
|
log_warn "No packages.json found, skipping"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Recreating hosting packages via whmapi1..."
|
||||||
|
local count=0
|
||||||
|
local failed=0
|
||||||
|
|
||||||
|
# Extract package names and their attributes
|
||||||
|
local packages; packages=$(python3 -c "
|
||||||
|
import sys, json
|
||||||
|
data = json.load(sys.stdin)
|
||||||
|
pkgs = data.get('data', {}).get('pkg', [])
|
||||||
|
for p in pkgs:
|
||||||
|
name = p.get('name', '')
|
||||||
|
if not name:
|
||||||
|
continue
|
||||||
|
args = []
|
||||||
|
for k, v in p.items():
|
||||||
|
if k == 'name':
|
||||||
|
continue
|
||||||
|
args.append(f'{k}={v}')
|
||||||
|
print(name + '|' + ' '.join(args))
|
||||||
|
" < "$json_file" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [[ -z "$packages" ]]; then
|
||||||
|
log_warn "No packages parsed from export"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
while IFS='|' read -r pkg_name pkg_args; do
|
||||||
|
[[ -z "$pkg_name" ]] && continue
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if whmapi1 addpkg name="$pkg_name" $pkg_args >/dev/null 2>&1; then
|
||||||
|
((count++)) || true
|
||||||
|
log_debug "Created package: $pkg_name"
|
||||||
|
else
|
||||||
|
((failed++)) || true
|
||||||
|
log_debug "Failed to create package: $pkg_name (may already exist)"
|
||||||
|
fi
|
||||||
|
done <<< "$packages"
|
||||||
|
|
||||||
|
log_info "Created $count package(s) ($failed failed/existing)"
|
||||||
|
}
|
||||||
|
|
||||||
|
_apply_dns_zones() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local zones_dir="$stage_dir/api/zones"
|
||||||
|
|
||||||
|
if [[ ! -d "$zones_dir" ]]; then
|
||||||
|
log_warn "No zones directory found, skipping DNS restore"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Restoring DNS zones via whmapi1..."
|
||||||
|
local count=0
|
||||||
|
local failed=0
|
||||||
|
|
||||||
|
for zone_file in "$zones_dir"/*.zone; do
|
||||||
|
[[ -f "$zone_file" ]] || continue
|
||||||
|
local domain; domain=$(basename "$zone_file" .zone)
|
||||||
|
[[ -z "$domain" ]] && continue
|
||||||
|
|
||||||
|
# First ensure the zone exists
|
||||||
|
if ! whmapi1 adddns domain="$domain" >/dev/null 2>&1; then
|
||||||
|
log_debug "Zone may already exist: $domain"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse zone records and add them
|
||||||
|
local records; records=$(python3 -c "
|
||||||
|
import sys, json
|
||||||
|
data = json.load(sys.stdin)
|
||||||
|
zone_data = data.get('data', {}).get('zone', [])
|
||||||
|
if isinstance(zone_data, list):
|
||||||
|
for record in zone_data:
|
||||||
|
rtype = record.get('type', '')
|
||||||
|
name = record.get('name', '')
|
||||||
|
if rtype in ('A', 'AAAA', 'CNAME', 'MX', 'TXT', 'SRV', 'CAA'):
|
||||||
|
address = record.get('address', record.get('cname', record.get('txtdata', '')))
|
||||||
|
ttl = record.get('ttl', '14400')
|
||||||
|
line = record.get('Line', '')
|
||||||
|
print(f'{rtype}|{name}|{address}|{ttl}')
|
||||||
|
" < "$zone_file" 2>/dev/null) || true
|
||||||
|
|
||||||
|
if [[ -z "$records" ]]; then
|
||||||
|
log_debug "No parseable records for zone: $domain"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
local zone_count=0
|
||||||
|
while IFS='|' read -r rtype rname address ttl; do
|
||||||
|
[[ -z "$rtype" || -z "$rname" ]] && continue
|
||||||
|
if whmapi1 addzonerecord domain="$domain" name="$rname" type="$rtype" address="$address" ttl="$ttl" >/dev/null 2>&1; then
|
||||||
|
((zone_count++)) || true
|
||||||
|
fi
|
||||||
|
done <<< "$records"
|
||||||
|
|
||||||
|
((count++)) || true
|
||||||
|
log_debug "Restored zone $domain ($zone_count records)"
|
||||||
|
done
|
||||||
|
|
||||||
|
log_info "Restored $count DNS zone(s) ($failed failed)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Phase Functions ──────────────────────────────────────────
|
||||||
|
|
||||||
|
_restore_phase1_foundation() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local dry_run="${2:-false}"
|
||||||
|
local errors=0
|
||||||
|
|
||||||
|
log_info "=== Phase 1: Foundation ==="
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
log_info "[DRY RUN] Would restore: /etc/wwwacct.conf"
|
||||||
|
log_info "[DRY RUN] Would restore: /var/cpanel/cpanel.config"
|
||||||
|
log_info "[DRY RUN] Would restore: /var/cpanel/packages/"
|
||||||
|
log_info "[DRY RUN] Would restore: /var/cpanel/features/"
|
||||||
|
log_info "[DRY RUN] Would restore: /etc/cpanel/"
|
||||||
|
log_info "[DRY RUN] Would apply tweak settings via whmapi1"
|
||||||
|
log_info "[DRY RUN] Would recreate packages via whmapi1"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
_restore_file "$stage_dir" "etc/wwwacct.conf" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "var/cpanel/cpanel.config" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "var/cpanel/packages" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "var/cpanel/features" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/cpanel" || ((errors++)) || true
|
||||||
|
|
||||||
|
_apply_tweaksettings "$stage_dir"
|
||||||
|
_apply_packages "$stage_dir"
|
||||||
|
|
||||||
|
if (( errors > 0 )); then
|
||||||
|
log_warn "Phase 1 completed with $errors error(s)"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log_info "Phase 1 completed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
_restore_phase2_services() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local dry_run="${2:-false}"
|
||||||
|
local errors=0
|
||||||
|
|
||||||
|
log_info "=== Phase 2: Services ==="
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
log_info "[DRY RUN] Would restore Exim config files + restart exim"
|
||||||
|
log_info "[DRY RUN] Would restore Apache config files + rebuildhttpdconf + restart httpd"
|
||||||
|
log_info "[DRY RUN] Would restore PHP/EasyApache config + restart httpd"
|
||||||
|
log_info "[DRY RUN] Would restore MySQL config + restart mysql"
|
||||||
|
log_info "[DRY RUN] Would restore BIND config + zone files + restart named"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Exim
|
||||||
|
log_info "--- Restoring Exim configuration ---"
|
||||||
|
_restore_file "$stage_dir" "etc/exim.conf" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/exim.conf.local" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/exim.conf.localopts" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/localdomains" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/remotedomains" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/secondarymx" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/valiases" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/vdomainaliases" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/vfilters" || ((errors++)) || true
|
||||||
|
_restart_service "exim" || ((errors++)) || true
|
||||||
|
|
||||||
|
# Apache
|
||||||
|
log_info "--- Restoring Apache configuration ---"
|
||||||
|
_restore_dir "$stage_dir" "etc/httpd/conf" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "usr/local/apache/conf" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/httpd/conf.d" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "etc/cpanel/ea4" || ((errors++)) || true
|
||||||
|
# Rebuild Apache config and restart
|
||||||
|
if [[ -x /usr/local/cpanel/scripts/rebuildhttpdconf ]]; then
|
||||||
|
log_info "Rebuilding Apache configuration..."
|
||||||
|
/usr/local/cpanel/scripts/rebuildhttpdconf >/dev/null 2>&1 || log_warn "rebuildhttpdconf failed"
|
||||||
|
fi
|
||||||
|
_restart_service "httpd" || ((errors++)) || true
|
||||||
|
|
||||||
|
# PHP / EasyApache
|
||||||
|
log_info "--- Restoring PHP configuration ---"
|
||||||
|
_restore_dir "$stage_dir" "var/cpanel/MultiPHP" || ((errors++)) || true
|
||||||
|
# Restore ea-php configs if present
|
||||||
|
if [[ -d "$stage_dir/files/opt/cpanel" ]]; then
|
||||||
|
local ea_dirs; ea_dirs=$(ls -d "$stage_dir/files/opt/cpanel/ea-php"*/root/etc/ 2>/dev/null) || true
|
||||||
|
if [[ -n "$ea_dirs" ]]; then
|
||||||
|
while IFS= read -r ea_src; do
|
||||||
|
[[ -z "$ea_src" ]] && continue
|
||||||
|
# Get the relative path under files/
|
||||||
|
local rel="${ea_src#$stage_dir/files/}"
|
||||||
|
_restore_dir "$stage_dir" "$rel" || ((errors++)) || true
|
||||||
|
done <<< "$ea_dirs"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# MySQL
|
||||||
|
log_info "--- Restoring MySQL configuration ---"
|
||||||
|
_restore_file "$stage_dir" "etc/my.cnf" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "root/.my.cnf" || ((errors++)) || true
|
||||||
|
_restart_service "mysql" || ((errors++)) || true
|
||||||
|
|
||||||
|
# BIND / Named
|
||||||
|
log_info "--- Restoring BIND configuration ---"
|
||||||
|
_restore_file "$stage_dir" "etc/named.conf" || ((errors++)) || true
|
||||||
|
_restore_dir "$stage_dir" "var/named" || ((errors++)) || true
|
||||||
|
_restart_service "named" || ((errors++)) || true
|
||||||
|
|
||||||
|
if (( errors > 0 )); then
|
||||||
|
log_warn "Phase 2 completed with $errors error(s)"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log_info "Phase 2 completed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
_restore_phase3_security() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local dry_run="${2:-false}"
|
||||||
|
local errors=0
|
||||||
|
|
||||||
|
log_info "=== Phase 3: Network & Security ==="
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
log_info "[DRY RUN] Would restore IP configuration"
|
||||||
|
log_info "[DRY RUN] Would restore CSF firewall config + csf -r"
|
||||||
|
log_info "[DRY RUN] Would restore /root/.ssh/"
|
||||||
|
log_info "[DRY RUN] Would restore root crontab"
|
||||||
|
log_info "[DRY RUN] Would restore /etc/gniza/"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# IP configuration
|
||||||
|
log_info "--- Restoring IP configuration ---"
|
||||||
|
_restore_file "$stage_dir" "etc/ips" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/reservedips" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/reservedipreasons" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/sysconfig/network" || ((errors++)) || true
|
||||||
|
_restore_file "$stage_dir" "etc/resolv.conf" || ((errors++)) || true
|
||||||
|
|
||||||
|
# CSF firewall
|
||||||
|
log_info "--- Restoring CSF firewall ---"
|
||||||
|
_restore_dir "$stage_dir" "etc/csf" || ((errors++)) || true
|
||||||
|
if command -v csf &>/dev/null; then
|
||||||
|
log_info "Restarting CSF firewall..."
|
||||||
|
csf -r >/dev/null 2>&1 || {
|
||||||
|
log_warn "CSF restart failed"
|
||||||
|
((errors++)) || true
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Root SSH keys
|
||||||
|
log_info "--- Restoring root SSH keys ---"
|
||||||
|
_restore_dir "$stage_dir" "root/.ssh" || ((errors++)) || true
|
||||||
|
# Fix permissions
|
||||||
|
if [[ -d /root/.ssh ]]; then
|
||||||
|
chmod 700 /root/.ssh 2>/dev/null || true
|
||||||
|
chmod 600 /root/.ssh/* 2>/dev/null || true
|
||||||
|
chmod 644 /root/.ssh/*.pub 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Root crontab
|
||||||
|
log_info "--- Restoring root crontab ---"
|
||||||
|
_restore_file "$stage_dir" "var/spool/cron/root" || ((errors++)) || true
|
||||||
|
|
||||||
|
# gniza config
|
||||||
|
log_info "--- Restoring gniza configuration ---"
|
||||||
|
_restore_dir "$stage_dir" "etc/gniza" || ((errors++)) || true
|
||||||
|
|
||||||
|
if (( errors > 0 )); then
|
||||||
|
log_warn "Phase 3 completed with $errors error(s)"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log_info "Phase 3 completed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
_restore_phase4_dns() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local dry_run="${2:-false}"
|
||||||
|
|
||||||
|
log_info "=== Phase 4: DNS via API ==="
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
log_info "[DRY RUN] Would recreate DNS zones via whmapi1 adddns/addzonerecord"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
_apply_dns_zones "$stage_dir"
|
||||||
|
log_info "Phase 4 completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Restore Orchestrator ─────────────────────────────────────
|
||||||
|
|
||||||
|
run_system_restore() {
|
||||||
|
local stage_dir="$1"
|
||||||
|
local phases="${2:-1,2,3,4}"
|
||||||
|
local dry_run="${3:-false}"
|
||||||
|
local total_errors=0
|
||||||
|
|
||||||
|
log_info "Starting system restore (phases: $phases, dry-run: $dry_run)"
|
||||||
|
|
||||||
|
# Verify staging directory has content
|
||||||
|
if [[ ! -d "$stage_dir/files" && ! -d "$stage_dir/api" ]]; then
|
||||||
|
log_error "System snapshot staging directory is empty or invalid: $stage_dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run selected phases
|
||||||
|
if [[ "$phases" == *1* ]]; then
|
||||||
|
_restore_phase1_foundation "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$phases" == *2* ]]; then
|
||||||
|
_restore_phase2_services "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$phases" == *3* ]]; then
|
||||||
|
_restore_phase3_security "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$phases" == *4* ]]; then
|
||||||
|
_restore_phase4_dns "$stage_dir" "$dry_run" || ((total_errors++)) || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if (( total_errors > 0 )); then
|
||||||
|
log_warn "System restore completed with $total_errors phase error(s)"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "System restore completed successfully"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
@@ -21,7 +21,7 @@ our @REMOTE_KEYS = qw(
|
|||||||
);
|
);
|
||||||
|
|
||||||
our @SCHEDULE_KEYS = qw(
|
our @SCHEDULE_KEYS = qw(
|
||||||
SCHEDULE SCHEDULE_TIME SCHEDULE_DAY SCHEDULE_CRON REMOTES
|
SCHEDULE SCHEDULE_TIME SCHEDULE_DAY SCHEDULE_CRON REMOTES SYSBACKUP
|
||||||
);
|
);
|
||||||
|
|
||||||
my %MAIN_KEY_SET = map { $_ => 1 } @MAIN_KEYS;
|
my %MAIN_KEY_SET = map { $_ => 1 } @MAIN_KEYS;
|
||||||
|
|||||||
@@ -295,32 +295,13 @@ sub handle_add {
|
|||||||
}
|
}
|
||||||
GnizaWHM::UI::init_remote_dir(%init_args);
|
GnizaWHM::UI::init_remote_dir(%init_args);
|
||||||
|
|
||||||
# Fork sysbackup if toggle is on
|
|
||||||
my $sysbackup_msg = '';
|
|
||||||
if ($form->{'sysbackup_toggle'}) {
|
|
||||||
my $log_file = "/var/log/gniza/sysbackup-$name.log";
|
|
||||||
my $pid = fork();
|
|
||||||
if (defined $pid && $pid == 0) {
|
|
||||||
# Child: detach and exec sysbackup
|
|
||||||
close STDIN;
|
|
||||||
close STDOUT;
|
|
||||||
close STDERR;
|
|
||||||
open STDIN, '<', '/dev/null';
|
|
||||||
open STDOUT, '>', $log_file;
|
|
||||||
open STDERR, '>&', \*STDOUT;
|
|
||||||
exec '/usr/local/bin/gniza', 'sysbackup', "--remote=$name";
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
$sysbackup_msg = ' System backup started in background.' if defined $pid;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($form->{'wizard'}) {
|
if ($form->{'wizard'}) {
|
||||||
GnizaWHM::UI::set_flash('success', "Remote '$name' created. Now set up a schedule." . $sysbackup_msg);
|
GnizaWHM::UI::set_flash('success', "Remote '$name' created. Now set up a schedule.");
|
||||||
print "Status: 302 Found\r\n";
|
print "Status: 302 Found\r\n";
|
||||||
print "Location: schedules.cgi?action=add&wizard=1&remote_name=" . _uri_escape($name) . "\r\n\r\n";
|
print "Location: schedules.cgi?action=add&wizard=1&remote_name=" . _uri_escape($name) . "\r\n\r\n";
|
||||||
exit;
|
exit;
|
||||||
}
|
}
|
||||||
GnizaWHM::UI::set_flash('success', "Remote '$name' created successfully." . $sysbackup_msg);
|
GnizaWHM::UI::set_flash('success', "Remote '$name' created successfully.");
|
||||||
print "Status: 302 Found\r\n";
|
print "Status: 302 Found\r\n";
|
||||||
print "Location: remotes.cgi\r\n\r\n";
|
print "Location: remotes.cgi\r\n\r\n";
|
||||||
exit;
|
exit;
|
||||||
@@ -430,25 +411,7 @@ sub handle_edit {
|
|||||||
if (!@errors) {
|
if (!@errors) {
|
||||||
my ($ok, $err) = GnizaWHM::Config::write($conf_path, \%data, \@GnizaWHM::Config::REMOTE_KEYS);
|
my ($ok, $err) = GnizaWHM::Config::write($conf_path, \%data, \@GnizaWHM::Config::REMOTE_KEYS);
|
||||||
if ($ok) {
|
if ($ok) {
|
||||||
# Fork sysbackup if toggle is on
|
GnizaWHM::UI::set_flash('success', "Remote '$name' updated successfully.");
|
||||||
my $sysbackup_msg = '';
|
|
||||||
if ($form->{'sysbackup_toggle'}) {
|
|
||||||
my $log_file = "/var/log/gniza/sysbackup-$name.log";
|
|
||||||
my $pid = fork();
|
|
||||||
if (defined $pid && $pid == 0) {
|
|
||||||
close STDIN;
|
|
||||||
close STDOUT;
|
|
||||||
close STDERR;
|
|
||||||
open STDIN, '<', '/dev/null';
|
|
||||||
open STDOUT, '>', $log_file;
|
|
||||||
open STDERR, '>&', \*STDOUT;
|
|
||||||
exec '/usr/local/bin/gniza', 'sysbackup', "--remote=$name";
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
$sysbackup_msg = ' System backup started in background.' if defined $pid;
|
|
||||||
}
|
|
||||||
|
|
||||||
GnizaWHM::UI::set_flash('success', "Remote '$name' updated successfully." . $sysbackup_msg);
|
|
||||||
print "Status: 302 Found\r\n";
|
print "Status: 302 Found\r\n";
|
||||||
print "Location: remotes.cgi\r\n\r\n";
|
print "Location: remotes.cgi\r\n\r\n";
|
||||||
exit;
|
exit;
|
||||||
@@ -669,17 +632,6 @@ sub render_remote_form {
|
|||||||
_field($conf, 'RETENTION_COUNT', 'Snapshots to Keep', 'Default: 30');
|
_field($conf, 'RETENTION_COUNT', 'Snapshots to Keep', 'Default: 30');
|
||||||
print qq{</div>\n</div>\n};
|
print qq{</div>\n</div>\n};
|
||||||
|
|
||||||
# System Backup toggle
|
|
||||||
my $sysbackup_checked = $form->{'sysbackup_toggle'} ? ' checked' : '';
|
|
||||||
print qq{<div class="card bg-white shadow-sm border border-base-300 mb-6">\n<div class="card-body">\n};
|
|
||||||
print qq{<h2 class="card-title text-sm">System Backup</h2>\n};
|
|
||||||
print qq{<div class="flex items-center gap-3 mb-2.5">\n};
|
|
||||||
print qq{ <label class="w-44 font-medium text-sm" for="sysbackup_toggle">Run system backup</label>\n};
|
|
||||||
print qq{ <input type="checkbox" class="toggle toggle-sm toggle-success" id="sysbackup_toggle" name="sysbackup_toggle" value="1"$sysbackup_checked>\n};
|
|
||||||
print qq{</div>\n};
|
|
||||||
print qq{<p class="text-xs text-base-content/60 mt-2">Backs up WHM/cPanel config, installed packages, and cron jobs to this remote. Runs in the background.</p>\n};
|
|
||||||
print qq{</div>\n</div>\n};
|
|
||||||
|
|
||||||
# Submit
|
# Submit
|
||||||
print qq{<div class="flex gap-2 mt-4">\n};
|
print qq{<div class="flex gap-2 mt-4">\n};
|
||||||
my $btn_label = $is_edit ? 'Save Changes' : 'Create Remote';
|
my $btn_label = $is_edit ? 'Save Changes' : 'Create Remote';
|
||||||
|
|||||||
@@ -553,6 +553,18 @@ sub render_schedule_form {
|
|||||||
|
|
||||||
print qq{</div>\n</div>\n};
|
print qq{</div>\n</div>\n};
|
||||||
|
|
||||||
|
# System Backup toggle
|
||||||
|
my $sysbackup_val = $conf->{SYSBACKUP} // '';
|
||||||
|
my $sysbackup_checked = ($sysbackup_val eq 'yes') ? ' checked' : '';
|
||||||
|
print qq{<div class="card bg-white shadow-sm border border-base-300 mb-6">\n<div class="card-body">\n};
|
||||||
|
print qq{<h2 class="card-title text-sm">System Backup</h2>\n};
|
||||||
|
print qq{<div class="flex items-center gap-3 mb-2.5">\n};
|
||||||
|
print qq{ <label class="w-44 font-medium text-sm" for="SYSBACKUP">Include system backup</label>\n};
|
||||||
|
print qq{ <input type="checkbox" class="toggle toggle-sm toggle-success" id="SYSBACKUP" name="SYSBACKUP" value="yes"$sysbackup_checked>\n};
|
||||||
|
print qq{</div>\n};
|
||||||
|
print qq{<p class="text-xs text-base-content/60 mt-2">After all account backups complete, also back up WHM/cPanel config, installed packages, and cron jobs.</p>\n};
|
||||||
|
print qq{</div>\n</div>\n};
|
||||||
|
|
||||||
# Submit
|
# Submit
|
||||||
print qq{<div class="flex gap-2 mt-4">\n};
|
print qq{<div class="flex gap-2 mt-4">\n};
|
||||||
my $btn_label = $is_edit ? 'Save Changes' : 'Create Schedule';
|
my $btn_label = $is_edit ? 'Save Changes' : 'Create Schedule';
|
||||||
|
|||||||
Reference in New Issue
Block a user