diff --git a/server/src/wled_controller/api/routes/system.py b/server/src/wled_controller/api/routes/system.py index b4ed93a..c098aff 100644 --- a/server/src/wled_controller/api/routes/system.py +++ b/server/src/wled_controller/api/routes/system.py @@ -6,11 +6,12 @@ from datetime import datetime from typing import Optional import psutil -from fastapi import APIRouter, HTTPException, Query +from fastapi import APIRouter, Depends, HTTPException, Query from pydantic import BaseModel from wled_controller import __version__ from wled_controller.api.auth import AuthRequired +from wled_controller.api.dependencies import get_processor_manager from wled_controller.api.schemas.system import ( DisplayInfo, DisplayListResponse, @@ -192,6 +193,19 @@ def get_system_performance(_: AuthRequired): ) +@router.get("/api/v1/system/metrics-history", tags=["Config"]) +async def get_metrics_history( + _: AuthRequired, + manager=Depends(get_processor_manager), +): + """Return the last ~2 minutes of system and per-target metrics. + + Used by the dashboard to seed charts on page load so history + survives browser refreshes. + """ + return manager.metrics_history.get_history() + + # --------------------------------------------------------------------------- # ADB helpers (for Android / scrcpy engine) # --------------------------------------------------------------------------- diff --git a/server/src/wled_controller/core/processing/metrics_history.py b/server/src/wled_controller/core/processing/metrics_history.py new file mode 100644 index 0000000..3870e73 --- /dev/null +++ b/server/src/wled_controller/core/processing/metrics_history.py @@ -0,0 +1,123 @@ +"""Server-side ring buffer for system and per-target metrics.""" + +import asyncio +from collections import deque +from datetime import datetime +from typing import Dict, Optional + +from wled_controller.utils import get_logger + +logger = get_logger(__name__) + +MAX_SAMPLES = 120 # ~2 minutes at 1-second interval +SAMPLE_INTERVAL = 1.0 # seconds + + +def _collect_system_snapshot() -> dict: + """Collect CPU/RAM/GPU metrics (blocking — run in thread pool). + + Returns a dict suitable for direct JSON serialization. + """ + import psutil + + mem = psutil.virtual_memory() + snapshot = { + "t": datetime.utcnow().isoformat(), + "cpu": psutil.cpu_percent(interval=None), + "ram_pct": mem.percent, + "ram_used": round(mem.used / 1024 / 1024, 1), + "ram_total": round(mem.total / 1024 / 1024, 1), + "gpu_util": None, + "gpu_temp": None, + } + + try: + from wled_controller.api.routes.system import _nvml_available, _nvml, _nvml_handle + + if _nvml_available: + util = _nvml.nvmlDeviceGetUtilizationRates(_nvml_handle) + temp = _nvml.nvmlDeviceGetTemperature(_nvml_handle, _nvml.NVML_TEMPERATURE_GPU) + snapshot["gpu_util"] = float(util.gpu) + snapshot["gpu_temp"] = float(temp) + except Exception: + pass + + return snapshot + + +class MetricsHistory: + """In-memory ring buffer collecting system and per-target metrics.""" + + def __init__(self, processor_manager): + self._manager = processor_manager + self._system: deque = deque(maxlen=MAX_SAMPLES) + self._targets: Dict[str, deque] = {} + self._task: Optional[asyncio.Task] = None + + async def start(self): + """Start the background sampling loop.""" + if self._task and not self._task.done(): + return + self._task = asyncio.create_task(self._sample_loop()) + logger.info("Metrics history sampling started") + + async def stop(self): + """Stop the background sampling loop.""" + if self._task: + self._task.cancel() + try: + await self._task + except asyncio.CancelledError: + pass + self._task = None + logger.info("Metrics history sampling stopped") + + async def _sample_loop(self): + """Sample system + target metrics every SAMPLE_INTERVAL seconds.""" + while True: + try: + await self._sample() + except asyncio.CancelledError: + raise + except Exception as e: + logger.warning(f"Metrics sampling error: {e}") + await asyncio.sleep(SAMPLE_INTERVAL) + + async def _sample(self): + """Collect one snapshot of system and target metrics.""" + # System metrics (blocking psutil/nvml calls in thread pool) + sys_snap = await asyncio.to_thread(_collect_system_snapshot) + self._system.append(sys_snap) + + # Per-target metrics from processor states + try: + all_states = self._manager.get_all_target_states() + except Exception: + all_states = {} + + now = datetime.utcnow().isoformat() + active_ids = set() + for target_id, state in all_states.items(): + active_ids.add(target_id) + if target_id not in self._targets: + self._targets[target_id] = deque(maxlen=MAX_SAMPLES) + if state.get("processing"): + self._targets[target_id].append({ + "t": now, + "fps": state.get("fps_actual"), + "fps_target": state.get("fps_target"), + "timing": state.get("timing_total_ms"), + "errors": state.get("errors_count", 0), + }) + + # Prune deques for targets no longer registered + for tid in list(self._targets.keys()): + if tid not in active_ids: + del self._targets[tid] + + def get_history(self) -> dict: + """Return all history for the API response.""" + return { + "system": list(self._system), + "targets": {tid: list(dq) for tid, dq in self._targets.items()}, + } diff --git a/server/src/wled_controller/core/processing/processor_manager.py b/server/src/wled_controller/core/processing/processor_manager.py index 1384060..94b8f0d 100644 --- a/server/src/wled_controller/core/processing/processor_manager.py +++ b/server/src/wled_controller/core/processing/processor_manager.py @@ -16,6 +16,7 @@ from wled_controller.core.devices.led_client import ( from wled_controller.core.audio.audio_capture import AudioCaptureManager from wled_controller.core.processing.live_stream_manager import LiveStreamManager from wled_controller.core.processing.color_strip_stream_manager import ColorStripStreamManager +from wled_controller.core.processing.metrics_history import MetricsHistory from wled_controller.core.processing.value_stream import ValueStreamManager from wled_controller.core.capture.screen_overlay import OverlayManager from wled_controller.core.processing.target_processor import ( @@ -97,8 +98,13 @@ class ProcessorManager: ) if value_source_store else None self._overlay_manager = OverlayManager() self._event_queues: List[asyncio.Queue] = [] + self._metrics_history = MetricsHistory(self) logger.info("Processor manager initialized") + @property + def metrics_history(self) -> MetricsHistory: + return self._metrics_history + # ===== SHARED CONTEXT (passed to target processors) ===== def _build_context(self) -> TargetContext: @@ -718,6 +724,7 @@ class ProcessorManager: async def stop_all(self): """Stop processing and health monitoring for all targets and devices.""" + await self._metrics_history.stop() await self.stop_health_monitoring() # Stop all processors @@ -761,6 +768,7 @@ class ProcessorManager: self._health_monitoring_active = True for device_id in self._devices: self._start_device_health_check(device_id) + await self._metrics_history.start() logger.info("Started health monitoring for all devices") async def stop_health_monitoring(self): diff --git a/server/src/wled_controller/static/js/features/dashboard.js b/server/src/wled_controller/static/js/features/dashboard.js index 5ce1409..286f1a1 100644 --- a/server/src/wled_controller/static/js/features/dashboard.js +++ b/server/src/wled_controller/static/js/features/dashboard.js @@ -10,10 +10,9 @@ import { renderPerfSection, initPerfCharts, startPerfPolling, stopPerfPolling } import { startAutoRefresh } from './tabs.js'; const DASHBOARD_COLLAPSED_KEY = 'dashboard_collapsed'; -const FPS_HISTORY_KEY = 'dashboard_fps_history'; -const MAX_FPS_SAMPLES = 30; +const MAX_FPS_SAMPLES = 120; -let _fpsHistory = _loadFpsHistory(); // { targetId: number[] } +let _fpsHistory = {}; // { targetId: number[] } let _fpsCharts = {}; // { targetId: Chart } let _lastRunningIds = []; // sorted target IDs from previous render let _uptimeBase = {}; // { targetId: { seconds, timestamp } } @@ -21,19 +20,6 @@ let _uptimeTimer = null; let _uptimeElements = {}; // { targetId: HTMLElement } — cached DOM refs let _metricsElements = new Map(); -function _loadFpsHistory() { - try { - const raw = sessionStorage.getItem(FPS_HISTORY_KEY); - if (raw) return JSON.parse(raw); - } catch {} - return {}; -} - -function _saveFpsHistory() { - try { sessionStorage.setItem(FPS_HISTORY_KEY, JSON.stringify(_fpsHistory)); } - catch {} -} - function _pushFps(targetId, value) { if (!_fpsHistory[targetId]) _fpsHistory[targetId] = []; _fpsHistory[targetId].push(value); @@ -120,8 +106,26 @@ function _createFpsChart(canvasId, history, fpsTarget) { }); } -function _initFpsCharts(runningTargetIds) { +async function _initFpsCharts(runningTargetIds) { _destroyFpsCharts(); + + // Seed FPS history from server ring buffer on first load + if (Object.keys(_fpsHistory).length === 0 && runningTargetIds.length > 0) { + try { + const resp = await fetch(`${API_BASE}/system/metrics-history`, { headers: getHeaders() }); + if (resp.ok) { + const data = await resp.json(); + const serverTargets = data.targets || {}; + for (const id of runningTargetIds) { + const samples = serverTargets[id] || []; + _fpsHistory[id] = samples.map(s => s.fps).filter(v => v != null); + } + } + } catch { + // Silently ignore — charts will fill from polling + } + } + // Clean up history for targets that are no longer running for (const id of Object.keys(_fpsHistory)) { if (!runningTargetIds.includes(id)) delete _fpsHistory[id]; @@ -133,7 +137,7 @@ function _initFpsCharts(runningTargetIds) { const fpsTarget = parseFloat(canvas.dataset.fpsTarget) || 30; _fpsCharts[id] = _createFpsChart(`dashboard-fps-${id}`, history, fpsTarget); } - _saveFpsHistory(); + _cacheMetricsElements(runningTargetIds); } @@ -194,7 +198,7 @@ function _updateRunningMetrics(enrichedRunning) { } } } - _saveFpsHistory(); + } function _updateProfilesInPlace(profiles) { @@ -412,7 +416,7 @@ export async function loadDashboard(forceFullRender = false) { ${_sectionContent('perf', renderPerfSection())}