Add source targets, docs panel, tail-style log viewer, and various improvements

- Add source.sh for remote source backup support
- Add responsive DocsPanel with layout adaptations for narrow screens
- Running tasks log viewer now shows last 100 lines (tail -f style)
- Add incremental backup explanation to README
- Update backup, transfer, schedule, and snaplog modules
- Add MCP config and logo asset

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
shuki
2026-03-07 03:06:39 +02:00
parent eca08483e4
commit fec13135ce
23 changed files with 784 additions and 125 deletions

14
.mcp.json Normal file
View File

@@ -0,0 +1,14 @@
{
"$schema": "https://raw.githubusercontent.com/anthropics/claude-code/main/schemas/mcp.schema.json",
"mcpServers": {
"textual-mcp": {
"command": "/home/shuki/projects/textual-mcp/.venv/bin/python",
"args": [
"-m",
"textual_mcp.server"
],
"cwd": "/home/shuki/projects/textual-mcp",
"description": "Textual TUI framework MCP: CSS validation, widget generation, style analysis, doc search"
}
}
}

View File

@@ -165,6 +165,23 @@ REMOTE_TYPE="local"
REMOTE_BASE="/mnt/backup-drive"
```
## How Incremental Backups Work
GNIZA uses rsync's `--link-dest` option to create space-efficient incremental backups using **hardlinks**.
**The first backup** copies every file from source to destination. This takes the most time and disk space, since every file must be transferred in full. Depending on the size of your data and network speed, this initial backup may take a long time — this is normal.
**Every backup after the first** is significantly faster. Rsync compares each file against the previous snapshot. Files that haven't changed are not transferred again — instead, rsync creates a **hardlink** to the same data block on disk from the previous snapshot. Only new or modified files are actually copied.
This means:
- Each snapshot appears as a full, complete directory tree — you can browse or restore any snapshot independently.
- Unchanged files share disk space between snapshots through hardlinks, so 10 snapshots of 50 GB with only minor changes might use 55 GB total instead of 500 GB.
- Deleting an old snapshot only frees space for files that are not referenced by any other snapshot.
- Subsequent backups typically finish in seconds or minutes rather than hours, since only the differences are transferred.
> **Example**: A first backup of 20 GB takes 45 minutes over SSH. The next day, only 200 MB of files changed — the second backup takes under 2 minutes and uses only 200 MB of additional disk space, while still appearing as a complete 20 GB snapshot.
## Snapshot Structure
```

View File

@@ -25,6 +25,7 @@ source "$GNIZA_DIR/lib/rclone.sh"
source "$GNIZA_DIR/lib/snapshot.sh"
source "$GNIZA_DIR/lib/transfer.sh"
source "$GNIZA_DIR/lib/snaplog.sh"
source "$GNIZA_DIR/lib/source.sh"
# ── Help ─────────────────────────────────────────────────────
show_help() {

View File

@@ -10,6 +10,24 @@ TARGET_RETENTION=""
TARGET_PRE_HOOK=""
TARGET_POST_HOOK=""
TARGET_ENABLED="yes"
# Remote Source (pull files from a remote before backing up)
#TARGET_SOURCE_TYPE="local" # local | ssh | s3 | gdrive
# SSH source
#TARGET_SOURCE_HOST=""
#TARGET_SOURCE_PORT="22"
#TARGET_SOURCE_USER="root"
#TARGET_SOURCE_AUTH_METHOD="key" # key | password
#TARGET_SOURCE_KEY=""
#TARGET_SOURCE_PASSWORD=""
# S3 source
#TARGET_SOURCE_S3_BUCKET=""
#TARGET_SOURCE_S3_REGION="us-east-1"
#TARGET_SOURCE_S3_ENDPOINT=""
#TARGET_SOURCE_S3_ACCESS_KEY_ID=""
#TARGET_SOURCE_S3_SECRET_ACCESS_KEY=""
# Google Drive source
#TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE=""
#TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID=""
# MySQL Backup
#TARGET_MYSQL_ENABLED="no"
#TARGET_MYSQL_MODE="all"

View File

@@ -132,22 +132,56 @@ _backup_target_impl() {
# 9. Transfer each folder
local folder
local transfer_failed=false
local folder_index=0
local staging_dir=""
while IFS= read -r folder; do
[[ -z "$folder" ]] && continue
if (( folder_index > 0 )) && [[ "$threshold" -gt 0 ]]; then
check_remote_disk_space "$threshold" || {
log_error "Disk space threshold exceeded — aborting after $folder_index folder(s)"
transfer_failed=true
break
}
fi
((folder_index++)) || true
if [[ "${TARGET_SOURCE_TYPE:-local}" != "local" ]]; then
staging_dir=$(mktemp -d "${WORK_DIR:-/tmp}/gniza-source-XXXXXX")
log_info "Pulling from ${TARGET_SOURCE_TYPE} source: $folder"
if ! pull_from_source "$folder" "$staging_dir/${folder#/}"; then
log_error "Source pull failed for: $folder"
rm -rf "$staging_dir"
transfer_failed=true
continue
fi
if ! transfer_folder "$target_name" "$staging_dir/${folder#/}" "$ts" "$prev" "${folder#/}"; then
log_error "Transfer failed for folder: $folder"
transfer_failed=true
fi
rm -rf "$staging_dir"
else
if ! transfer_folder "$target_name" "$folder" "$ts" "$prev"; then
log_error "Transfer failed for folder: $folder"
transfer_failed=true
fi
fi
done < <(get_target_folders)
# 9.5. Transfer MySQL dumps
if [[ -n "$mysql_dump_dir" && -d "$mysql_dump_dir/_mysql" ]]; then
if [[ "$transfer_failed" != "true" ]] && [[ "$threshold" -gt 0 ]]; then
check_remote_disk_space "$threshold" || {
log_error "Disk space threshold exceeded — aborting before MySQL dump transfer"
transfer_failed=true
}
fi
if [[ "$transfer_failed" != "true" ]]; then
log_info "Transferring MySQL dumps for $target_name..."
if ! transfer_folder "$target_name" "$mysql_dump_dir/_mysql" "$ts" "$prev" "_mysql"; then
log_error "Transfer failed for MySQL dumps"
transfer_failed=true
fi
fi
fi
# Cleanup MySQL temp dir
mysql_cleanup_dump

View File

@@ -17,6 +17,26 @@ _GNIZA4LINUX_SCHEDULE_LOADED=1
readonly GNIZA4LINUX_CRON_TAG="# gniza4linux:"
SCHEDULES_DIR="$CONFIG_DIR/schedules.d"
# Check whether the cron daemon is running.
# Returns 0 if running, 1 otherwise. Sets CRON_WARNING with a message.
_cron_is_running() {
CRON_WARNING=""
# Try systemctl first (systemd-based systems)
if command -v systemctl &>/dev/null; then
if systemctl is-active cron &>/dev/null || systemctl is-active crond &>/dev/null; then
return 0
fi
CRON_WARNING="Cron daemon is not running. Start it with: sudo systemctl start cron"
return 1
fi
# Fallback: check for a running cron process
if pgrep -x cron &>/dev/null || pgrep -x crond &>/dev/null; then
return 0
fi
CRON_WARNING="Cron daemon does not appear to be running. Scheduled backups will not execute."
return 1
}
# ── Discovery ─────────────────────────────────────────────────
# List schedule names (filenames without .conf) sorted alphabetically.
@@ -248,6 +268,12 @@ install_schedules() {
local cron_line; cron_line=$(build_cron_line "$sname" 2>/dev/null) || continue
echo " [$sname] $cron_line"
done <<< "$schedules"
# Warn if cron daemon is not running
if ! _cron_is_running; then
echo ""
log_warn "$CRON_WARNING"
fi
}
# Display current gniza4linux cron entries.

View File

@@ -6,10 +6,14 @@ _GNIZA4LINUX_SNAPLOG_LOADED=1
# Tee helper: copies stdin to the transfer log, app log, and stderr (TUI).
# Used as process substitution target: cmd > >(_snaplog_tee) 2>&1
# Uses tee(1) to preserve \r from rsync --info=progress2 in real-time.
# The raw transfer log gets everything; LOG_FILE only gets structured
# log lines — skips rsync progress percentages and verbose file listings
# to keep the app log small and readable.
_snaplog_tee() {
if [[ -n "${LOG_FILE:-}" ]]; then
tee -a "${_TRANSFER_LOG}" "${LOG_FILE}" >&2
tee -a "${_TRANSFER_LOG}" >(
grep --line-buffered -E '^\[|^(sent |total size |Total |rsync |Number of |===)' >> "${LOG_FILE}"
) >&2
else
tee -a "${_TRANSFER_LOG}" >&2
fi

173
lib/source.sh Executable file
View File

@@ -0,0 +1,173 @@
#!/usr/bin/env bash
# gniza4linux/lib/source.sh — Pull files from remote sources
[[ -n "${_GNIZA4LINUX_SOURCE_LOADED:-}" ]] && return 0
_GNIZA4LINUX_SOURCE_LOADED=1
# Pull files from a remote source to a local staging directory.
# Usage: pull_from_source <remote_path> <local_dir>
pull_from_source() {
local remote_path="$1"
local local_dir="$2"
mkdir -p "$local_dir" || {
log_error "Failed to create staging directory: $local_dir"
return 1
}
case "${TARGET_SOURCE_TYPE:-local}" in
ssh)
_rsync_from_source_ssh "$remote_path" "$local_dir"
;;
s3)
_build_source_rclone_config "s3"
_rclone_from_source "$remote_path" "$local_dir"
;;
gdrive)
_build_source_rclone_config "gdrive"
_rclone_from_source "$remote_path" "$local_dir"
;;
*)
log_error "Unknown source type: ${TARGET_SOURCE_TYPE}"
return 1
;;
esac
}
# Pull from SSH source using rsync.
_rsync_from_source_ssh() {
local remote_path="$1"
local local_dir="$2"
local attempt=0
local max_retries="${SSH_RETRIES:-${DEFAULT_SSH_RETRIES:-3}}"
# Build SSH command for source connection
local ssh_opts=(-o BatchMode=yes -o StrictHostKeyChecking=accept-new)
ssh_opts+=(-o ConnectTimeout="${SSH_TIMEOUT:-${DEFAULT_SSH_TIMEOUT:-30}}")
ssh_opts+=(-p "${TARGET_SOURCE_PORT:-22}")
if [[ "${TARGET_SOURCE_AUTH_METHOD:-key}" == "key" && -n "${TARGET_SOURCE_KEY:-}" ]]; then
ssh_opts+=(-i "$TARGET_SOURCE_KEY")
fi
local rsync_ssh="ssh ${ssh_opts[*]}"
local rsync_opts=(-aHAX --numeric-ids --sparse --rsync-path="rsync --fake-super")
rsync_opts+=(--info=progress2 --no-inc-recursive)
if [[ -n "${_TRANSFER_LOG:-}" ]]; then
rsync_opts+=(--verbose --stats)
fi
# Ensure remote_path ends with /
[[ "$remote_path" != */ ]] && remote_path="$remote_path/"
# Ensure local_dir ends with /
[[ "$local_dir" != */ ]] && local_dir="$local_dir/"
local source_spec="${TARGET_SOURCE_USER:-root}@${TARGET_SOURCE_HOST}:${remote_path}"
while (( attempt < max_retries )); do
((attempt++)) || true
log_debug "rsync (source pull) attempt $attempt/$max_retries: $source_spec -> $local_dir"
local rsync_cmd=(rsync "${rsync_opts[@]}" -e "$rsync_ssh" "$source_spec" "$local_dir")
if [[ "${TARGET_SOURCE_AUTH_METHOD:-key}" == "password" && -n "${TARGET_SOURCE_PASSWORD:-}" ]]; then
export SSHPASS="$TARGET_SOURCE_PASSWORD"
rsync_cmd=(sshpass -e "${rsync_cmd[@]}")
fi
local rc=0
if [[ -n "${_TRANSFER_LOG:-}" ]]; then
echo "=== rsync (source pull): $source_spec -> $local_dir ===" >> "$_TRANSFER_LOG"
"${rsync_cmd[@]}" > >(_snaplog_tee) 2>&1 || rc=$?
else
"${rsync_cmd[@]}" || rc=$?
fi
if (( rc == 0 )); then
log_debug "rsync (source pull) succeeded on attempt $attempt"
return 0
fi
if (( rc == 23 || rc == 24 )); then
log_warn "rsync (source pull) completed with warnings (exit $rc): some files could not be transferred"
return 0
fi
log_warn "rsync (source pull) failed (exit $rc), attempt $attempt/$max_retries"
if (( attempt < max_retries )); then
local backoff=$(( attempt * 10 ))
log_info "Retrying in ${backoff}s..."
sleep "$backoff"
fi
done
log_error "rsync (source pull) failed after $max_retries attempts"
return 1
}
# Build a temporary rclone config for source pulling.
# Usage: _build_source_rclone_config <type>
_build_source_rclone_config() {
local src_type="$1"
_SOURCE_RCLONE_CONF=$(mktemp /tmp/gniza-source-rclone-XXXXXX.conf)
if [[ "$src_type" == "s3" ]]; then
cat > "$_SOURCE_RCLONE_CONF" <<EOF
[gniza-source]
type = s3
provider = Other
access_key_id = ${TARGET_SOURCE_S3_ACCESS_KEY_ID:-}
secret_access_key = ${TARGET_SOURCE_S3_SECRET_ACCESS_KEY:-}
region = ${TARGET_SOURCE_S3_REGION:-us-east-1}
endpoint = ${TARGET_SOURCE_S3_ENDPOINT:-}
EOF
elif [[ "$src_type" == "gdrive" ]]; then
cat > "$_SOURCE_RCLONE_CONF" <<EOF
[gniza-source]
type = drive
service_account_file = ${TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE:-}
root_folder_id = ${TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID:-}
EOF
fi
chmod 600 "$_SOURCE_RCLONE_CONF"
}
# Pull from S3/GDrive source using rclone.
_rclone_from_source() {
local remote_path="$1"
local local_dir="$2"
if [[ -z "${_SOURCE_RCLONE_CONF:-}" || ! -f "${_SOURCE_RCLONE_CONF:-}" ]]; then
log_error "Source rclone config not found"
return 1
fi
local rclone_src="gniza-source:${remote_path}"
if [[ "${TARGET_SOURCE_TYPE}" == "s3" && -n "${TARGET_SOURCE_S3_BUCKET:-}" ]]; then
rclone_src="gniza-source:${TARGET_SOURCE_S3_BUCKET}/${remote_path#/}"
fi
log_debug "rclone (source pull): $rclone_src -> $local_dir"
local rc=0
if [[ -n "${_TRANSFER_LOG:-}" ]]; then
echo "=== rclone (source pull): $rclone_src -> $local_dir ===" >> "$_TRANSFER_LOG"
rclone copy --config "$_SOURCE_RCLONE_CONF" "$rclone_src" "$local_dir" \
--progress 2>&1 | tee -a "$_TRANSFER_LOG" || rc=$?
else
rclone copy --config "$_SOURCE_RCLONE_CONF" "$rclone_src" "$local_dir" || rc=$?
fi
# Cleanup temp config
rm -f "$_SOURCE_RCLONE_CONF"
_SOURCE_RCLONE_CONF=""
if (( rc != 0 )); then
log_error "rclone (source pull) failed (exit $rc)"
return 1
fi
return 0
}

View File

@@ -63,6 +63,20 @@ load_target() {
TARGET_MYSQL_HOST="${TARGET_MYSQL_HOST:-localhost}"
TARGET_MYSQL_PORT="${TARGET_MYSQL_PORT:-3306}"
TARGET_MYSQL_EXTRA_OPTS="${TARGET_MYSQL_EXTRA_OPTS:---single-transaction --routines --triggers}"
TARGET_SOURCE_TYPE="${TARGET_SOURCE_TYPE:-local}"
TARGET_SOURCE_HOST="${TARGET_SOURCE_HOST:-}"
TARGET_SOURCE_PORT="${TARGET_SOURCE_PORT:-22}"
TARGET_SOURCE_USER="${TARGET_SOURCE_USER:-root}"
TARGET_SOURCE_AUTH_METHOD="${TARGET_SOURCE_AUTH_METHOD:-key}"
TARGET_SOURCE_KEY="${TARGET_SOURCE_KEY:-}"
TARGET_SOURCE_PASSWORD="${TARGET_SOURCE_PASSWORD:-}"
TARGET_SOURCE_S3_BUCKET="${TARGET_SOURCE_S3_BUCKET:-}"
TARGET_SOURCE_S3_REGION="${TARGET_SOURCE_S3_REGION:-us-east-1}"
TARGET_SOURCE_S3_ENDPOINT="${TARGET_SOURCE_S3_ENDPOINT:-}"
TARGET_SOURCE_S3_ACCESS_KEY_ID="${TARGET_SOURCE_S3_ACCESS_KEY_ID:-}"
TARGET_SOURCE_S3_SECRET_ACCESS_KEY="${TARGET_SOURCE_S3_SECRET_ACCESS_KEY:-}"
TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE="${TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE:-}"
TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID="${TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID:-}"
log_debug "Loaded target '$name': folders=${TARGET_FOLDERS} enabled=${TARGET_ENABLED}"
}
@@ -86,12 +100,12 @@ validate_target() {
log_error "Target '$name': TARGET_FOLDERS is required (or enable MySQL backup)"
((errors++)) || true
elif [[ -n "$TARGET_FOLDERS" ]]; then
# Validate each folder exists
if [[ "${TARGET_SOURCE_TYPE:-local}" == "local" ]]; then
# Validate each folder exists locally
local -a folders
IFS=',' read -ra folders <<< "$TARGET_FOLDERS"
local folder
for folder in "${folders[@]}"; do
# Trim whitespace
folder="${folder#"${folder%%[![:space:]]*}"}"
folder="${folder%"${folder##*[![:space:]]}"}"
[[ -z "$folder" ]] && continue
@@ -103,6 +117,46 @@ validate_target() {
((errors++)) || true
fi
done
else
# Remote source: validate connection fields
case "${TARGET_SOURCE_TYPE}" in
ssh)
if [[ -z "${TARGET_SOURCE_HOST}" ]]; then
log_error "Target '$name': TARGET_SOURCE_HOST is required for SSH source"
((errors++)) || true
fi
;;
s3)
if [[ -z "${TARGET_SOURCE_S3_BUCKET}" ]]; then
log_error "Target '$name': TARGET_SOURCE_S3_BUCKET is required for S3 source"
((errors++)) || true
fi
if [[ -z "${TARGET_SOURCE_S3_ACCESS_KEY_ID}" || -z "${TARGET_SOURCE_S3_SECRET_ACCESS_KEY}" ]]; then
log_error "Target '$name': S3 credentials are required for S3 source"
((errors++)) || true
fi
;;
gdrive)
if [[ -z "${TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE}" ]]; then
log_error "Target '$name': service account file is required for Google Drive source"
((errors++)) || true
fi
;;
esac
# Validate paths are absolute (even on remote)
local -a folders
IFS=',' read -ra folders <<< "$TARGET_FOLDERS"
local folder
for folder in "${folders[@]}"; do
folder="${folder#"${folder%%[![:space:]]*}"}"
folder="${folder%"${folder##*[![:space:]]}"}"
[[ -z "$folder" ]] && continue
if [[ "$folder" != /* ]]; then
log_error "Target '$name': folder path must be absolute: $folder"
((errors++)) || true
fi
done
fi
fi
if [[ -n "$TARGET_ENABLED" && "$TARGET_ENABLED" != "yes" && "$TARGET_ENABLED" != "no" ]]; then

View File

@@ -4,6 +4,17 @@
[[ -n "${_GNIZA4LINUX_TRANSFER_LOADED:-}" ]] && return 0
_GNIZA4LINUX_TRANSFER_LOADED=1
_check_disk_space_or_abort() {
local threshold="${DISK_USAGE_THRESHOLD:-${DEFAULT_DISK_USAGE_THRESHOLD:-95}}"
if [[ "$threshold" -gt 0 ]]; then
check_remote_disk_space "$threshold" || {
log_error "Disk space threshold exceeded during transfer — aborting backup"
return 1
}
fi
return 0
}
rsync_to_remote() {
local source_dir="$1"
local remote_dest="$2"
@@ -78,6 +89,7 @@ rsync_to_remote() {
fi
log_warn "rsync failed (exit $rc), attempt $attempt/$max_retries"
_check_disk_space_or_abort || return 1
if (( attempt < max_retries )); then
local backoff=$(( attempt * 10 ))
@@ -154,6 +166,7 @@ rsync_local() {
fi
log_warn "rsync (local) failed (exit $rc), attempt $attempt/$max_retries"
_check_disk_space_or_abort || return 1
if (( attempt < max_retries )); then
local backoff=$(( attempt * 10 ))

17
logo.txt Normal file
View File

@@ -0,0 +1,17 @@
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓
▓▓

View File

@@ -63,18 +63,29 @@ class GnizaApp(App):
else:
self.notify(f"{job.label} failed (exit code {message.return_code})", severity="error")
# Width thresholds for docs panel
DOCS_VERTICAL_WIDTH = 80 # Below: panel moves to bottom
DOCS_HIDE_WIDTH = 45 # Below: panel hidden entirely
# Below this width: hide inline panel, F1 opens modal instead
DOCS_MODAL_WIDTH = 80
def action_toggle_docs(self) -> None:
if self.size.width < self.DOCS_MODAL_WIDTH:
self._open_help_modal()
else:
try:
panel = self.screen.query_one("#docs-panel")
panel.display = not panel.display
panel._user_toggled = True
except NoMatches:
pass
def _open_help_modal(self) -> None:
from tui.widgets import HelpModal
from tui.docs import SCREEN_DOCS
try:
panel = self.screen.query_one("#docs-panel")
content = panel._content
except NoMatches:
content = "No documentation available for this screen."
self.push_screen(HelpModal(content))
def on_resize(self, event: Resize) -> None:
self._update_docs_layout(event.size.width)
@@ -85,23 +96,12 @@ class GnizaApp(App):
def _update_docs_layout(self, width: int) -> None:
try:
panel = self.screen.query_one("#docs-panel")
container = self.screen.query_one(".screen-with-docs")
except NoMatches:
return
# Auto-hide only on very narrow screens (unless user toggled)
if not getattr(panel, "_user_toggled", False):
panel.display = width >= self.DOCS_HIDE_WIDTH
# Switch layout direction
if width < self.DOCS_VERTICAL_WIDTH:
container.styles.layout = "vertical"
panel.styles.width = "100%"
panel.styles.min_width = None
panel.styles.max_height = "40%"
if width < self.DOCS_MODAL_WIDTH:
panel.display = False
else:
container.styles.layout = "horizontal"
panel.styles.width = "30%"
panel.styles.min_width = 30
panel.styles.max_height = None
panel.display = True
async def action_quit(self) -> None:
if job_manager.running_count() > 0:

View File

@@ -65,6 +65,14 @@ SCREEN_DOCS = {
" [bold]Retention override[/bold] - Custom snapshot count.\n"
" [bold]Pre/Post hooks[/bold] - Shell commands to run before/after the backup.\n"
"\n"
"[bold]Source section:[/bold]\n"
" Set Source Type to pull files from a remote server instead of backing up local folders.\n"
" [bold]Local[/bold] - Default. Back up folders on this machine.\n"
" [bold]SSH[/bold] - Pull files from a remote server via SSH/rsync before backing up.\n"
" [bold]S3[/bold] - Pull files from an S3-compatible bucket.\n"
" [bold]Google Drive[/bold] - Pull files from Google Drive via service account.\n"
" When using a remote source, specify remote paths in the Folders field instead of local paths.\n"
"\n"
"[bold]MySQL section:[/bold]\n"
" Enable MySQL to dump databases alongside files. Choose 'All databases' or select specific ones. Leave user/password empty for socket auth."
),
@@ -204,7 +212,6 @@ SCREEN_DOCS = {
"\n"
"[bold]Buttons:[/bold]\n"
" [bold]View[/bold] - Display the selected log file contents in the viewer below.\n"
" [bold]Status[/bold] - Show a summary of recent backup activity.\n"
"\n"
"[bold]Status detection:[/bold]\n"
" - [green]Success[/green] - 'Backup completed' found, no errors.\n"

View File

@@ -73,6 +73,19 @@ Select {
margin: 0 0 1 0;
}
#settings-screen Input,
#settings-screen Select {
margin: 0;
}
#settings-screen Static {
margin: 1 0 0 0;
}
#settings-screen #screen-title {
margin: 0;
}
SelectionList {
height: auto;
max-height: 8;
@@ -120,6 +133,7 @@ SelectionList {
#targets-buttons,
#remotes-buttons,
#rt-buttons,
#log-pager-buttons,
#logs-buttons,
#snapshots-buttons,
#sched-buttons,
@@ -138,6 +152,7 @@ SelectionList {
#targets-buttons Button,
#remotes-buttons Button,
#rt-buttons Button,
#log-pager-buttons Button,
#logs-buttons Button,
#snapshots-buttons Button,
#sched-buttons Button,
@@ -148,6 +163,12 @@ SelectionList {
margin: 0 1 0 0;
}
#log-page-info {
width: auto;
padding: 0 1;
content-align: center middle;
}
/* Dialogs */
#confirm-dialog {
width: 80%;
@@ -177,6 +198,25 @@ SelectionList {
margin: 0 1 0 0;
}
/* Help modal */
HelpModal {
align: center middle;
}
#help-modal {
width: 90%;
max-width: 70;
height: 80%;
padding: 1 2;
background: $panel;
border: thick $accent;
}
#help-modal #help-close {
margin: 1 0 0 0;
width: auto;
}
/* Folder picker */
#folder-picker {
width: 90%;

View File

@@ -323,12 +323,13 @@ class JobManager:
text = Path(log_file).read_text()
if not text.strip():
return None
for marker in ("FATAL:", "ERROR:", "failed", "Failed"):
if marker in text:
return 1
# Look for success indicators
if "completed" in text.lower() or "Backup Summary" in text:
# Check success markers first — these are definitive
if "Backup completed" in text or "Backup Summary" in text:
return 0
# Only match structured log lines for errors, not rsync file listings
for line in text.splitlines():
if "[FATAL]" in line or "[ERROR]" in line:
return 1
except OSError:
return None
return None

View File

@@ -12,6 +12,20 @@ class Target:
pre_hook: str = ""
post_hook: str = ""
enabled: str = "yes"
source_type: str = "local"
source_host: str = ""
source_port: str = "22"
source_user: str = "root"
source_auth_method: str = "key"
source_key: str = ""
source_password: str = ""
source_s3_bucket: str = ""
source_s3_region: str = "us-east-1"
source_s3_endpoint: str = ""
source_s3_access_key_id: str = ""
source_s3_secret_access_key: str = ""
source_gdrive_sa_file: str = ""
source_gdrive_root_folder_id: str = ""
mysql_enabled: str = "no"
mysql_mode: str = "all"
mysql_databases: str = ""
@@ -23,7 +37,7 @@ class Target:
mysql_extra_opts: str = "--single-transaction --routines --triggers"
def to_conf(self) -> dict[str, str]:
return {
data = {
"TARGET_NAME": self.name,
"TARGET_FOLDERS": self.folders,
"TARGET_EXCLUDE": self.exclude,
@@ -33,6 +47,31 @@ class Target:
"TARGET_PRE_HOOK": self.pre_hook,
"TARGET_POST_HOOK": self.post_hook,
"TARGET_ENABLED": self.enabled,
"TARGET_SOURCE_TYPE": self.source_type,
}
if self.source_type == "ssh":
data.update({
"TARGET_SOURCE_HOST": self.source_host,
"TARGET_SOURCE_PORT": self.source_port,
"TARGET_SOURCE_USER": self.source_user,
"TARGET_SOURCE_AUTH_METHOD": self.source_auth_method,
"TARGET_SOURCE_KEY": self.source_key,
"TARGET_SOURCE_PASSWORD": self.source_password,
})
elif self.source_type == "s3":
data.update({
"TARGET_SOURCE_S3_BUCKET": self.source_s3_bucket,
"TARGET_SOURCE_S3_REGION": self.source_s3_region,
"TARGET_SOURCE_S3_ENDPOINT": self.source_s3_endpoint,
"TARGET_SOURCE_S3_ACCESS_KEY_ID": self.source_s3_access_key_id,
"TARGET_SOURCE_S3_SECRET_ACCESS_KEY": self.source_s3_secret_access_key,
})
elif self.source_type == "gdrive":
data.update({
"TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE": self.source_gdrive_sa_file,
"TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID": self.source_gdrive_root_folder_id,
})
data.update({
"TARGET_MYSQL_ENABLED": self.mysql_enabled,
"TARGET_MYSQL_MODE": self.mysql_mode,
"TARGET_MYSQL_DATABASES": self.mysql_databases,
@@ -42,7 +81,8 @@ class Target:
"TARGET_MYSQL_HOST": self.mysql_host,
"TARGET_MYSQL_PORT": self.mysql_port,
"TARGET_MYSQL_EXTRA_OPTS": self.mysql_extra_opts,
}
})
return data
@classmethod
def from_conf(cls, name: str, data: dict[str, str]) -> "Target":
@@ -56,6 +96,20 @@ class Target:
pre_hook=data.get("TARGET_PRE_HOOK", ""),
post_hook=data.get("TARGET_POST_HOOK", ""),
enabled=data.get("TARGET_ENABLED", "yes"),
source_type=data.get("TARGET_SOURCE_TYPE", "local"),
source_host=data.get("TARGET_SOURCE_HOST", ""),
source_port=data.get("TARGET_SOURCE_PORT", "22"),
source_user=data.get("TARGET_SOURCE_USER", "root"),
source_auth_method=data.get("TARGET_SOURCE_AUTH_METHOD", "key"),
source_key=data.get("TARGET_SOURCE_KEY", ""),
source_password=data.get("TARGET_SOURCE_PASSWORD", ""),
source_s3_bucket=data.get("TARGET_SOURCE_S3_BUCKET", ""),
source_s3_region=data.get("TARGET_SOURCE_S3_REGION", "us-east-1"),
source_s3_endpoint=data.get("TARGET_SOURCE_S3_ENDPOINT", ""),
source_s3_access_key_id=data.get("TARGET_SOURCE_S3_ACCESS_KEY_ID", ""),
source_s3_secret_access_key=data.get("TARGET_SOURCE_S3_SECRET_ACCESS_KEY", ""),
source_gdrive_sa_file=data.get("TARGET_SOURCE_GDRIVE_SERVICE_ACCOUNT_FILE", ""),
source_gdrive_root_folder_id=data.get("TARGET_SOURCE_GDRIVE_ROOT_FOLDER_ID", ""),
mysql_enabled=data.get("TARGET_MYSQL_ENABLED", "no"),
mysql_mode=data.get("TARGET_MYSQL_MODE", "all"),
mysql_databases=data.get("TARGET_MYSQL_DATABASES", ""),

View File

@@ -1,5 +1,4 @@
import re
from datetime import datetime
from pathlib import Path
from textual.app import ComposeResult
@@ -23,16 +22,26 @@ def _format_log_name(name: str) -> tuple[str, str]:
def _detect_log_status(filepath: Path) -> str:
"""Determine backup status from log file content."""
"""Determine backup status from log file content.
Only reads last 100 KB for efficiency on large files.
"""
try:
text = filepath.read_text()
size = filepath.stat().st_size
if size == 0:
return "Empty"
with open(filepath, "r") as f:
if size > 102400:
f.seek(size - 102400)
f.readline()
tail = f.read()
except OSError:
return "?"
if not text.strip():
if not tail.strip():
return "Empty"
has_error = "[ERROR]" in text or "[FATAL]" in text
has_completed = "Backup completed" in text or "Restore completed" in text
has_lock_released = "Lock released" in text
has_error = "[ERROR]" in tail or "[FATAL]" in tail
has_completed = "Backup completed" in tail or "Restore completed" in tail
has_lock_released = "Lock released" in tail
if has_completed and not has_error:
return "Success"
if has_error:
@@ -42,9 +51,29 @@ def _detect_log_status(filepath: Path) -> str:
return "Interrupted"
def _build_line_index(filepath: Path) -> list[int]:
"""Build an index of byte offsets for each line start. Fast even for large files."""
offsets = [0]
with open(filepath, "rb") as f:
while True:
chunk = f.read(1024 * 1024)
if not chunk:
break
base = offsets[-1] if not offsets else f.tell() - len(chunk)
start = 0
while True:
pos = chunk.find(b"\n", start)
if pos == -1:
break
offsets.append(base + pos + 1)
start = pos + 1
return offsets
class LogsScreen(Screen):
BINDINGS = [("escape", "go_back", "Back")]
LINES_PER_PAGE = 200
def compose(self) -> ComposeResult:
yield Header(show_clock=True)
@@ -54,15 +83,30 @@ class LogsScreen(Screen):
yield DataTable(id="logs-table")
with Horizontal(id="logs-buttons"):
yield Button("View", variant="primary", id="btn-view")
yield Button("Status", id="btn-status")
yield Button("Back", id="btn-back")
with Horizontal(id="log-pager-buttons"):
yield Button("◀ Prev", id="btn-prev-page")
yield Static("", id="log-page-info")
yield Button("Next ▶", id="btn-next-page")
yield RichLog(id="log-viewer", wrap=True, highlight=True)
yield DocsPanel.for_screen("logs-screen")
yield Footer()
def on_mount(self) -> None:
self._log_filepath: Path | None = None
self._line_offsets: list[int] = []
self._total_lines: int = 0
self._current_page: int = 0
self._total_pages: int = 0
self._hide_pager()
self._refresh_table()
def _hide_pager(self) -> None:
self.query_one("#log-pager-buttons").display = False
def _show_pager(self) -> None:
self.query_one("#log-pager-buttons").display = self._total_pages > 1
def _refresh_table(self) -> None:
table = self.query_one("#logs-table", DataTable)
table.clear(columns=True)
@@ -95,56 +139,59 @@ class LogsScreen(Screen):
elif event.button.id == "btn-view":
name = self._selected_log()
if name:
self._view_log(name)
self._open_log(name)
else:
self.notify("Select a log file first", severity="warning")
elif event.button.id == "btn-status":
self._show_status()
elif event.button.id == "btn-prev-page":
if self._current_page > 0:
self._current_page -= 1
self._render_page()
elif event.button.id == "btn-next-page":
if self._current_page < self._total_pages - 1:
self._current_page += 1
self._render_page()
def _view_log(self, name: str) -> None:
def _open_log(self, name: str) -> None:
filepath = (Path(LOG_DIR) / name).resolve()
if not filepath.is_relative_to(Path(LOG_DIR).resolve()):
self.notify("Invalid log path", severity="error")
return
if not filepath.is_file():
viewer = self.query_one("#log-viewer", RichLog)
viewer.clear()
if filepath.is_file():
content = filepath.read_text()
viewer.write(content)
else:
viewer.write(f"File not found: {filepath}")
self._hide_pager()
return
self._log_filepath = filepath
self._line_offsets = _build_line_index(filepath)
self._total_lines = max(len(self._line_offsets) - 1, 1)
self._total_pages = max(1, (self._total_lines + self.LINES_PER_PAGE - 1) // self.LINES_PER_PAGE)
# Start at last page (most recent output)
self._current_page = self._total_pages - 1
self._show_pager()
self._render_page()
def _show_status(self) -> None:
def _render_page(self) -> None:
viewer = self.query_one("#log-viewer", RichLog)
viewer.clear()
log_dir = Path(LOG_DIR)
viewer.write("Backup Status Overview")
viewer.write("=" * 40)
if not log_dir.is_dir():
viewer.write("Log directory does not exist.")
if not self._log_filepath:
return
logs = sorted(log_dir.glob("gniza-*.log"), key=lambda p: p.stat().st_mtime, reverse=True)
if logs:
latest = logs[0]
from datetime import datetime
mtime = datetime.fromtimestamp(latest.stat().st_mtime)
viewer.write(f"Last log: {mtime.strftime('%Y-%m-%d %H:%M:%S')}")
last_line = ""
with open(latest) as f:
for line in f:
last_line = line.rstrip()
if last_line:
viewer.write(f"Last entry: {last_line}")
else:
viewer.write("No backup logs found.")
viewer.write(f"Log files: {len(logs)}")
total = sum(f.stat().st_size for f in logs)
if total >= 1048576:
viewer.write(f"Total size: {total / 1048576:.1f} MB")
elif total >= 1024:
viewer.write(f"Total size: {total / 1024:.1f} KB")
else:
viewer.write(f"Total size: {total} B")
start_line = self._current_page * self.LINES_PER_PAGE
end_line = min(start_line + self.LINES_PER_PAGE, self._total_lines)
# Seek to the right byte offset and read the lines
start_byte = self._line_offsets[start_line]
end_byte = self._line_offsets[end_line] if end_line < len(self._line_offsets) else self._log_filepath.stat().st_size
with open(self._log_filepath, "r", errors="replace") as f:
f.seek(start_byte)
chunk = f.read(end_byte - start_byte)
for line in chunk.splitlines():
viewer.write(line)
# Update page info
page_info = self.query_one("#log-page-info", Static)
page_info.update(f" Page {self._current_page + 1}/{self._total_pages} ")
# Update button states
self.query_one("#btn-prev-page", Button).disabled = self._current_page == 0
self.query_one("#btn-next-page", Button).disabled = self._current_page >= self._total_pages - 1
def action_go_back(self) -> None:
self.app.pop_screen()

View File

@@ -4,6 +4,8 @@ from textual.widgets import Header, Footer, Static, OptionList
from textual.widgets.option_list import Option
from textual.containers import Horizontal, Vertical
from tui.jobs import job_manager
LOGO = """\
[green]▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
▓▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓
@@ -39,6 +41,8 @@ MENU_ITEMS = [
("quit", "Quit"),
]
SPINNER_FRAMES = ["", "", "", "", "", "", "", "", "", ""]
class MainMenuScreen(Screen):
@@ -59,6 +63,9 @@ class MainMenuScreen(Screen):
def on_mount(self) -> None:
self._update_layout()
self.query_one("#menu-list", OptionList).focus()
self._spinner_idx = 0
self._update_running_label()
self._spinner_timer = self.set_interval(1, self._tick_spinner)
def on_resize(self) -> None:
self._update_layout()
@@ -77,6 +84,25 @@ class MainMenuScreen(Screen):
layout.styles.align = ("center", "middle")
layout.styles.overflow_y = "hidden"
def _tick_spinner(self) -> None:
self._spinner_idx = (self._spinner_idx + 1) % len(SPINNER_FRAMES)
self._update_running_label()
def _update_running_label(self) -> None:
count = job_manager.running_count()
menu = self.query_one("#menu-list", OptionList)
# Find the running_tasks option index
for idx in range(menu.option_count):
opt = menu.get_option_at_index(idx)
if opt.id == "running_tasks":
if count > 0:
spinner = SPINNER_FRAMES[self._spinner_idx]
label = f"{spinner} Running Tasks ({count})"
else:
label = "Running Tasks"
menu.replace_option_prompt(opt.id, label)
break
def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
option_id = event.option.id
if option_id == "quit":

View File

@@ -12,6 +12,7 @@ from tui.jobs import job_manager
from tui.widgets import ConfirmDialog, DocsPanel
_PROGRESS_RE = re.compile(r"(\d+)%")
_SPINNER = ["", "", "", "", "", "", "", "", "", ""]
class RunningTasksScreen(Screen):
@@ -39,8 +40,9 @@ class RunningTasksScreen(Screen):
table = self.query_one("#rt-table", DataTable)
table.add_columns("Status", "Job", "Started", "Duration")
table.cursor_type = "row"
self._spinner_idx = 0
self._refresh_table()
self._timer = self.set_interval(1, self._refresh_table)
self._timer = self.set_interval(1, self._tick)
self._log_timer: Timer | None = None
self._viewing_job_id: str | None = None
self._log_file_pos: int = 0
@@ -60,14 +62,19 @@ class RunningTasksScreen(Screen):
hours, m = divmod(mins, 60)
return f"{hours}h {m}m"
def _tick(self) -> None:
self._spinner_idx = (self._spinner_idx + 1) % len(_SPINNER)
self._refresh_table()
def _refresh_table(self) -> None:
job_manager.check_reconnected()
table = self.query_one("#rt-table", DataTable)
old_row = table.cursor_coordinate.row if table.row_count > 0 else 0
table.clear()
spinner = _SPINNER[self._spinner_idx]
for job in job_manager.list_jobs():
if job.status == "running":
icon = "... "
icon = f" {spinner} "
elif job.status == "success":
icon = " ok "
elif job.status == "unknown":
@@ -109,17 +116,25 @@ class RunningTasksScreen(Screen):
progress = self.query_one("#rt-progress", ProgressBar)
label = self.query_one("#rt-progress-label", Static)
progress.update(progress=0)
# Load existing content from log file
# Load only the tail of the log file (like tail -f)
if job._log_file and Path(job._log_file).is_file():
try:
raw = Path(job._log_file).read_bytes()
self._log_file_pos = len(raw)
content = raw.decode(errors="replace")
# Show only last 100 lines
lines = content.splitlines()
if len(lines) > 100:
log_viewer.write(f"--- showing last 100 of {len(lines)} lines ---")
content = "\n".join(lines[-100:])
self._process_log_content(content, log_viewer)
except OSError:
pass
elif job.output:
for line in job.output:
tail = job.output[-100:] if len(job.output) > 100 else job.output
if len(job.output) > 100:
log_viewer.write(f"--- showing last 100 of {len(job.output)} lines ---")
for line in tail:
log_viewer.write(line)
# Show/hide progress bar based on job status
is_running = job.status == "running"

View File

@@ -34,6 +34,44 @@ class TargetEditScreen(Screen):
if self._is_new:
yield Static("Name:")
yield Input(value="", placeholder="Target name", id="te-name")
yield Static("--- Source ---", classes="section-label")
yield Static("Source Type:")
yield Select(
[("Local", "local"), ("SSH", "ssh"), ("S3", "s3"), ("Google Drive", "gdrive")],
value=target.source_type,
id="te-source-type",
)
yield Static("Source Host:", classes="source-field source-ssh-field")
yield Input(value=target.source_host, placeholder="hostname or IP", id="te-source-host", classes="source-field source-ssh-field")
yield Static("Source Port:", classes="source-field source-ssh-field")
yield Input(value=target.source_port, placeholder="22", id="te-source-port", classes="source-field source-ssh-field")
yield Static("Source User:", classes="source-field source-ssh-field")
yield Input(value=target.source_user, placeholder="root", id="te-source-user", classes="source-field source-ssh-field")
yield Static("Auth Method:", classes="source-field source-ssh-field")
yield Select(
[("SSH Key", "key"), ("Password", "password")],
value=target.source_auth_method,
id="te-source-auth-method",
classes="source-field source-ssh-field",
)
yield Static("SSH Key Path:", classes="source-field source-ssh-field source-key-field")
yield Input(value=target.source_key, placeholder="/root/.ssh/id_rsa", id="te-source-key", classes="source-field source-ssh-field source-key-field")
yield Static("Password:", classes="source-field source-ssh-field source-password-field")
yield Input(value=target.source_password, placeholder="SSH password", password=True, id="te-source-password", classes="source-field source-ssh-field source-password-field")
yield Static("S3 Bucket:", classes="source-field source-s3-field")
yield Input(value=target.source_s3_bucket, placeholder="my-bucket", id="te-source-s3-bucket", classes="source-field source-s3-field")
yield Static("S3 Region:", classes="source-field source-s3-field")
yield Input(value=target.source_s3_region, placeholder="us-east-1", id="te-source-s3-region", classes="source-field source-s3-field")
yield Static("S3 Endpoint:", classes="source-field source-s3-field")
yield Input(value=target.source_s3_endpoint, placeholder="https://s3.amazonaws.com", id="te-source-s3-endpoint", classes="source-field source-s3-field")
yield Static("S3 Access Key:", classes="source-field source-s3-field")
yield Input(value=target.source_s3_access_key_id, placeholder="AKIA...", id="te-source-s3-access-key", classes="source-field source-s3-field")
yield Static("S3 Secret Key:", classes="source-field source-s3-field")
yield Input(value=target.source_s3_secret_access_key, placeholder="secret", password=True, id="te-source-s3-secret-key", classes="source-field source-s3-field")
yield Static("Service Account File:", classes="source-field source-gdrive-field")
yield Input(value=target.source_gdrive_sa_file, placeholder="/path/to/sa.json", id="te-source-gdrive-sa-file", classes="source-field source-gdrive-field")
yield Static("Root Folder ID:", classes="source-field source-gdrive-field")
yield Input(value=target.source_gdrive_root_folder_id, placeholder="folder ID", id="te-source-gdrive-root-folder-id", classes="source-field source-gdrive-field")
yield Static("Folders (comma-separated):")
yield Input(value=target.folders, placeholder="/path1,/path2", id="te-folders")
yield Button("Browse...", id="btn-browse")
@@ -91,10 +129,13 @@ class TargetEditScreen(Screen):
def on_mount(self) -> None:
self._update_mysql_visibility()
self._update_source_visibility()
def on_select_changed(self, event: Select.Changed) -> None:
if event.select.id in ("te-mysql-enabled", "te-mysql-mode"):
self._update_mysql_visibility()
elif event.select.id in ("te-source-type", "te-source-auth-method"):
self._update_source_visibility()
def _update_mysql_visibility(self) -> None:
enabled = str(self.query_one("#te-mysql-enabled", Select).value)
@@ -108,6 +149,31 @@ class TargetEditScreen(Screen):
for w in self.query(".mysql-all-field"):
w.display = mode == "all"
def _update_source_visibility(self) -> None:
source_type = str(self.query_one("#te-source-type", Select).value)
is_remote = source_type != "local"
# Hide all source fields first
for w in self.query(".source-field"):
w.display = False
# Show fields for selected source type
if source_type == "ssh":
for w in self.query(".source-ssh-field"):
w.display = True
# Toggle key/password based on auth method
auth = str(self.query_one("#te-source-auth-method", Select).value)
for w in self.query(".source-key-field"):
w.display = auth == "key"
for w in self.query(".source-password-field"):
w.display = auth == "password"
elif source_type == "s3":
for w in self.query(".source-s3-field"):
w.display = True
elif source_type == "gdrive":
for w in self.query(".source-gdrive-field"):
w.display = True
# Hide browse button when remote
self.query_one("#btn-browse", Button).display = not is_remote
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "btn-cancel":
self.dismiss(None)
@@ -148,9 +214,13 @@ class TargetEditScreen(Screen):
folders = self.query_one("#te-folders", Input).value.strip()
mysql_enabled = str(self.query_one("#te-mysql-enabled", Select).value)
if not folders and mysql_enabled != "yes":
source_type = str(self.query_one("#te-source-type", Select).value)
if not folders and mysql_enabled != "yes" and source_type == "local":
self.notify("At least one folder or MySQL backup is required", severity="error")
return
if source_type != "local" and not folders:
self.notify("At least one remote path is required", severity="error")
return
target = Target(
name=name,
@@ -162,6 +232,20 @@ class TargetEditScreen(Screen):
pre_hook=self.query_one("#te-prehook", Input).value.strip(),
post_hook=self.query_one("#te-posthook", Input).value.strip(),
enabled=str(self.query_one("#te-enabled", Select).value),
source_type=source_type,
source_host=self.query_one("#te-source-host", Input).value.strip(),
source_port=self.query_one("#te-source-port", Input).value.strip(),
source_user=self.query_one("#te-source-user", Input).value.strip(),
source_auth_method=str(self.query_one("#te-source-auth-method", Select).value),
source_key=self.query_one("#te-source-key", Input).value.strip(),
source_password=self.query_one("#te-source-password", Input).value.strip(),
source_s3_bucket=self.query_one("#te-source-s3-bucket", Input).value.strip(),
source_s3_region=self.query_one("#te-source-s3-region", Input).value.strip(),
source_s3_endpoint=self.query_one("#te-source-s3-endpoint", Input).value.strip(),
source_s3_access_key_id=self.query_one("#te-source-s3-access-key", Input).value.strip(),
source_s3_secret_access_key=self.query_one("#te-source-s3-secret-key", Input).value.strip(),
source_gdrive_sa_file=self.query_one("#te-source-gdrive-sa-file", Input).value.strip(),
source_gdrive_root_folder_id=self.query_one("#te-source-gdrive-root-folder-id", Input).value.strip(),
mysql_enabled=mysql_enabled,
mysql_mode=str(self.query_one("#te-mysql-mode", Select).value),
mysql_databases=self.query_one("#te-mysql-databases", Input).value.strip(),

View File

@@ -134,6 +134,7 @@
#terminal .xterm-screen {
width: 100% !important;
}
</style>
<script>
function getStartUrl() {

View File

@@ -3,4 +3,4 @@ from tui.widgets.file_picker import FilePicker
from tui.widgets.confirm_dialog import ConfirmDialog
from tui.widgets.operation_log import OperationLog
from tui.widgets.snapshot_browser import SnapshotBrowser
from tui.widgets.docs_panel import DocsPanel
from tui.widgets.docs_panel import DocsPanel, HelpModal

View File

@@ -1,11 +1,35 @@
from textual.containers import VerticalScroll
from textual.widgets import Static
from textual.screen import ModalScreen
from textual.widgets import Static, Button
from textual.containers import Vertical
from tui.docs import SCREEN_DOCS
class HelpModal(ModalScreen[None]):
BINDINGS = [("escape", "close", "Close"), ("f1", "close", "Close")]
def __init__(self, content: str):
super().__init__()
self._content = content
def compose(self):
with VerticalScroll(id="help-modal"):
yield Static("[bold underline]Help[/]", id="docs-title")
yield Static(self._content, id="docs-body")
yield Button("Close", id="help-close")
def on_button_pressed(self, event: Button.Pressed) -> None:
self.dismiss(None)
def action_close(self) -> None:
self.dismiss(None)
class DocsPanel(VerticalScroll):
DEFAULT_CSS = """
DocsPanel {
display: none;
width: 30%;
min-width: 30;
border-left: solid $accent;
@@ -23,27 +47,16 @@ class DocsPanel(VerticalScroll):
yield Static(self._content, id="docs-body")
def on_mount(self) -> None:
self.app.call_later(self._apply_layout)
# Delay check — at mount time the terminal may not have reported
# its real size yet (especially in web/mobile via textual-serve).
self.set_timer(0.3, self._check_show)
self.set_timer(0.8, self._check_show)
def on_resize(self) -> None:
self._apply_layout()
self._check_show()
def _apply_layout(self) -> None:
width = self.app.size.width
try:
container = self.screen.query_one(".screen-with-docs")
except Exception:
return
if width < 80:
container.styles.layout = "vertical"
self.styles.width = "100%"
self.styles.min_width = None
self.styles.max_height = "40%"
else:
container.styles.layout = "horizontal"
self.styles.width = "30%"
self.styles.min_width = 30
self.styles.max_height = None
def _check_show(self) -> None:
self.display = self.app.size.width >= 80
@classmethod
def for_screen(cls, screen_id: str) -> "DocsPanel":