Add adaptive FPS and honest device reachability during streaming

DDP uses fire-and-forget UDP, so when a WiFi device becomes overwhelmed
by sustained traffic, sends appear successful while the device is
actually unreachable. This adds:

- HTTP liveness probe (GET /json/info, 2s timeout) every 10s during
  streaming, exposed as device_streaming_reachable in target state
- Adaptive FPS (opt-in): exponential backoff when device is unreachable,
  gradual recovery when it stabilizes — finds sustainable send rate
- Honest health checks: removed the lie that forced device_online=true
  during streaming; now runs actual health checks regardless
- Target editor toggle, FPS display shows effective rate when throttled,
  health dot reflects streaming reachability, red highlight when
  unreachable
- Auto-backup scheduling support in settings modal

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 20:22:58 +03:00
parent f8656b72a6
commit cadef971e7
23 changed files with 873 additions and 21 deletions

View File

@@ -0,0 +1,220 @@
"""Auto-backup engine — periodic background backups of all configuration stores."""
import asyncio
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
from wled_controller import __version__
from wled_controller.utils import atomic_write_json, get_logger
logger = get_logger(__name__)
DEFAULT_SETTINGS = {
"enabled": False,
"interval_hours": 24,
"max_backups": 10,
}
class AutoBackupEngine:
"""Creates periodic backups of all configuration stores."""
def __init__(
self,
settings_path: Path,
backup_dir: Path,
store_map: Dict[str, str],
storage_config: Any,
):
self._settings_path = Path(settings_path)
self._backup_dir = Path(backup_dir)
self._store_map = store_map
self._storage_config = storage_config
self._task: Optional[asyncio.Task] = None
self._last_backup_time: Optional[datetime] = None
self._settings = self._load_settings()
self._backup_dir.mkdir(parents=True, exist_ok=True)
# ─── Settings persistence ──────────────────────────────────
def _load_settings(self) -> dict:
if self._settings_path.exists():
try:
with open(self._settings_path, "r", encoding="utf-8") as f:
data = json.load(f)
return {**DEFAULT_SETTINGS, **data}
except Exception as e:
logger.warning(f"Failed to load auto-backup settings: {e}")
return dict(DEFAULT_SETTINGS)
def _save_settings(self) -> None:
atomic_write_json(self._settings_path, {
"enabled": self._settings["enabled"],
"interval_hours": self._settings["interval_hours"],
"max_backups": self._settings["max_backups"],
})
# ─── Lifecycle ─────────────────────────────────────────────
async def start(self) -> None:
if self._settings["enabled"]:
self._start_loop()
logger.info(
f"Auto-backup engine started (every {self._settings['interval_hours']}h, "
f"max {self._settings['max_backups']})"
)
else:
logger.info("Auto-backup engine initialized (disabled)")
async def stop(self) -> None:
self._cancel_loop()
logger.info("Auto-backup engine stopped")
def _start_loop(self) -> None:
self._cancel_loop()
self._task = asyncio.create_task(self._backup_loop())
def _cancel_loop(self) -> None:
if self._task is not None:
self._task.cancel()
self._task = None
async def _backup_loop(self) -> None:
try:
# Perform first backup immediately on start
await self._perform_backup()
self._prune_old_backups()
interval_secs = self._settings["interval_hours"] * 3600
while True:
await asyncio.sleep(interval_secs)
try:
await self._perform_backup()
self._prune_old_backups()
except Exception as e:
logger.error(f"Auto-backup failed: {e}", exc_info=True)
except asyncio.CancelledError:
pass
# ─── Backup operations ─────────────────────────────────────
async def _perform_backup(self) -> None:
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._perform_backup_sync)
def _perform_backup_sync(self) -> None:
stores = {}
for store_key, config_attr in self._store_map.items():
file_path = Path(getattr(self._storage_config, config_attr))
if file_path.exists():
with open(file_path, "r", encoding="utf-8") as f:
stores[store_key] = json.load(f)
else:
stores[store_key] = {}
now = datetime.now(timezone.utc)
backup = {
"meta": {
"format": "ledgrab-backup",
"format_version": 1,
"app_version": __version__,
"created_at": now.isoformat(),
"store_count": len(stores),
"auto_backup": True,
},
"stores": stores,
}
timestamp = now.strftime("%Y-%m-%dT%H%M%S")
filename = f"ledgrab-autobackup-{timestamp}.json"
file_path = self._backup_dir / filename
content = json.dumps(backup, indent=2, ensure_ascii=False)
file_path.write_text(content, encoding="utf-8")
self._last_backup_time = now
logger.info(f"Auto-backup created: {filename}")
def _prune_old_backups(self) -> None:
max_backups = self._settings["max_backups"]
files = sorted(self._backup_dir.glob("*.json"), key=lambda p: p.stat().st_mtime)
excess = len(files) - max_backups
if excess > 0:
for f in files[:excess]:
try:
f.unlink()
logger.info(f"Pruned old backup: {f.name}")
except Exception as e:
logger.warning(f"Failed to prune {f.name}: {e}")
# ─── Public API ────────────────────────────────────────────
def get_settings(self) -> dict:
next_backup = None
if self._settings["enabled"] and self._last_backup_time:
from datetime import timedelta
next_backup = (
self._last_backup_time + timedelta(hours=self._settings["interval_hours"])
).isoformat()
return {
"enabled": self._settings["enabled"],
"interval_hours": self._settings["interval_hours"],
"max_backups": self._settings["max_backups"],
"last_backup_time": self._last_backup_time.isoformat() if self._last_backup_time else None,
"next_backup_time": next_backup,
}
async def update_settings(self, enabled: bool, interval_hours: float, max_backups: int) -> dict:
self._settings["enabled"] = enabled
self._settings["interval_hours"] = interval_hours
self._settings["max_backups"] = max_backups
self._save_settings()
# Restart or stop the loop
if enabled:
self._start_loop()
logger.info(
f"Auto-backup enabled (every {interval_hours}h, max {max_backups})"
)
else:
self._cancel_loop()
logger.info("Auto-backup disabled")
# Prune if max_backups was reduced
self._prune_old_backups()
return self.get_settings()
def list_backups(self) -> List[dict]:
backups = []
for f in sorted(self._backup_dir.glob("*.json"), key=lambda p: p.stat().st_mtime, reverse=True):
stat = f.stat()
backups.append({
"filename": f.name,
"size_bytes": stat.st_size,
"created_at": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat(),
})
return backups
def delete_backup(self, filename: str) -> None:
# Validate filename to prevent path traversal
if os.sep in filename or "/" in filename or ".." in filename:
raise ValueError("Invalid filename")
target = self._backup_dir / filename
if not target.exists():
raise FileNotFoundError(f"Backup not found: {filename}")
target.unlink()
logger.info(f"Deleted backup: {filename}")
def get_backup_path(self, filename: str) -> Path:
if os.sep in filename or "/" in filename or ".." in filename:
raise ValueError("Invalid filename")
target = self._backup_dir / filename
if not target.exists():
raise FileNotFoundError(f"Backup not found: {filename}")
return target