Fix event loop blocking from perf endpoint and profile detection
- Change /api/v1/system/performance from async def to def so FastAPI runs the blocking psutil + NVML GPU queries in a thread pool instead of freezing the event loop (polled every 2s by dashboard) - Batch profile engine's 3 separate run_in_executor detection calls into a single _detect_all_sync() call, reducing event loop wake-ups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -140,8 +140,13 @@ async def get_running_processes(_: AuthRequired):
|
||||
response_model=PerformanceResponse,
|
||||
tags=["Config"],
|
||||
)
|
||||
async def get_system_performance(_: AuthRequired):
|
||||
"""Get current system performance metrics (CPU, RAM, GPU)."""
|
||||
def get_system_performance(_: AuthRequired):
|
||||
"""Get current system performance metrics (CPU, RAM, GPU).
|
||||
|
||||
Uses sync ``def`` so FastAPI runs it in a thread pool — the psutil
|
||||
and NVML calls are blocking and would stall the event loop if run
|
||||
in an ``async def`` handler.
|
||||
"""
|
||||
mem = psutil.virtual_memory()
|
||||
|
||||
gpu = None
|
||||
|
||||
@@ -68,6 +68,23 @@ class ProfileEngine:
|
||||
async with self._eval_lock:
|
||||
await self._evaluate_all_locked()
|
||||
|
||||
def _detect_all_sync(
|
||||
self, needs_running: bool, needs_topmost: bool, needs_fullscreen: bool,
|
||||
) -> tuple:
|
||||
"""Run all platform detection in a single thread call.
|
||||
|
||||
Batching the three detection calls into one executor submission reduces
|
||||
event-loop wake-ups from 3 to 1, minimising asyncio.sleep() jitter in
|
||||
latency-sensitive processing loops.
|
||||
"""
|
||||
running_procs = self._detector._get_running_processes_sync() if needs_running else set()
|
||||
if needs_topmost:
|
||||
topmost_proc, topmost_fullscreen = self._detector._get_topmost_process_sync()
|
||||
else:
|
||||
topmost_proc, topmost_fullscreen = None, False
|
||||
fullscreen_procs = self._detector._get_fullscreen_processes_sync() if needs_fullscreen else set()
|
||||
return running_procs, topmost_proc, topmost_fullscreen, fullscreen_procs
|
||||
|
||||
async def _evaluate_all_locked(self) -> None:
|
||||
profiles = self._store.get_all_profiles()
|
||||
if not profiles:
|
||||
@@ -84,26 +101,18 @@ class ProfileEngine:
|
||||
mt = getattr(c, "match_type", "running")
|
||||
match_types_used.add(mt)
|
||||
|
||||
# WMI process enumeration (~3s) — only needed for "running" match type
|
||||
needs_running = "running" in match_types_used
|
||||
running_procs = (
|
||||
await self._detector.get_running_processes()
|
||||
if needs_running else set()
|
||||
)
|
||||
|
||||
# Foreground window check (<1ms) — needed for "topmost" and "topmost_fullscreen"
|
||||
needs_topmost = bool(match_types_used & {"topmost", "topmost_fullscreen"})
|
||||
if needs_topmost:
|
||||
topmost_proc, topmost_fullscreen = await self._detector.get_topmost_process()
|
||||
else:
|
||||
topmost_proc = None
|
||||
topmost_fullscreen = False
|
||||
|
||||
# Fullscreen window enumeration (<1ms) — only needed for "fullscreen"
|
||||
needs_fullscreen = "fullscreen" in match_types_used
|
||||
fullscreen_procs = (
|
||||
await self._detector.get_fullscreen_processes()
|
||||
if needs_fullscreen else set()
|
||||
|
||||
# Single executor call for all platform detection (avoids 3 separate
|
||||
# event-loop roundtrips that can jitter processing-loop timing)
|
||||
loop = asyncio.get_event_loop()
|
||||
running_procs, topmost_proc, topmost_fullscreen, fullscreen_procs = (
|
||||
await loop.run_in_executor(
|
||||
None, self._detect_all_sync,
|
||||
needs_running, needs_topmost, needs_fullscreen,
|
||||
)
|
||||
)
|
||||
|
||||
active_profile_ids = set()
|
||||
|
||||
Reference in New Issue
Block a user