WGC capture fixes + high-resolution timer pacing for all loops

- Fix WGC capture_frame() returning stale frames (80k "frames" in 2s)
  by tracking new-frame events; return None when no new frame arrived
- Add draw_border config passthrough with Win11 22H2+ platform check
- Add high_resolution_timer() utility (timeBeginPeriod/EndPeriod)
- Switch all processing loops from time.time() to time.perf_counter()
- Wrap all loops with high_resolution_timer() for ~1ms sleep precision
- Add animation speed badges to static/gradient color strip cards

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-21 01:23:56 +03:00
parent 5004992f26
commit 84f063eee9
8 changed files with 514 additions and 444 deletions

View File

@@ -43,14 +43,22 @@ class WGCCaptureStream(CaptureStream):
try: try:
capture_cursor = self.config.get("capture_cursor", False) capture_cursor = self.config.get("capture_cursor", False)
draw_border = self.config.get("draw_border", None)
# WGC uses 1-based monitor indexing # WGC uses 1-based monitor indexing
wgc_monitor_index = self.display_index + 1 wgc_monitor_index = self.display_index + 1
self._capture_instance = self._wgc.WindowsCapture( # draw_border toggling requires Windows 11 22H2+ (build 22621+).
# On older builds, passing any value crashes the capture session,
# so we only pass it when the platform supports it.
wgc_kwargs = dict(
cursor_capture=capture_cursor, cursor_capture=capture_cursor,
monitor_index=wgc_monitor_index, monitor_index=wgc_monitor_index,
) )
if draw_border is not None and self._supports_border_toggle():
wgc_kwargs["draw_border"] = draw_border
self._capture_instance = self._wgc.WindowsCapture(**wgc_kwargs)
def on_frame_arrived(frame, capture_control): def on_frame_arrived(frame, capture_control):
try: try:
@@ -101,6 +109,16 @@ class WGCCaptureStream(CaptureStream):
logger.error(f"Failed to initialize WGC for display {self.display_index}: {e}", exc_info=True) logger.error(f"Failed to initialize WGC for display {self.display_index}: {e}", exc_info=True)
raise RuntimeError(f"Failed to initialize WGC for display {self.display_index}: {e}") raise RuntimeError(f"Failed to initialize WGC for display {self.display_index}: {e}")
@staticmethod
def _supports_border_toggle() -> bool:
"""Check if the platform supports WGC border toggle (Windows 11 22H2+, build 22621+)."""
try:
import platform
build = int(platform.version().split(".")[2])
return build >= 22621
except Exception:
return False
def _cleanup_internal(self) -> None: def _cleanup_internal(self) -> None:
"""Internal cleanup helper.""" """Internal cleanup helper."""
if self._capture_control: if self._capture_control:
@@ -137,12 +155,15 @@ class WGCCaptureStream(CaptureStream):
self.initialize() self.initialize()
try: try:
# Only return a frame when the callback has delivered a new one
if not self._frame_event.is_set():
return None
with self._frame_lock: with self._frame_lock:
if self._latest_frame is None: if self._latest_frame is None:
raise RuntimeError( return None
f"No frame available yet for display {self.display_index}."
)
frame = self._latest_frame frame = self._latest_frame
self._frame_event.clear()
logger.debug( logger.debug(
f"WGC captured display {self.display_index}: " f"WGC captured display {self.display_index}: "
@@ -208,7 +229,7 @@ class WGCEngine(CaptureEngine):
def get_default_config(cls) -> Dict[str, Any]: def get_default_config(cls) -> Dict[str, Any]:
return { return {
"capture_cursor": False, "capture_cursor": False,
"draw_border": False, "draw_border": None, # None = OS default; False = hide border (Win 11 22H2+ only)
} }
@classmethod @classmethod

View File

@@ -22,6 +22,7 @@ from wled_controller.core.capture.calibration import CalibrationConfig, PixelMap
from wled_controller.core.capture.screen_capture import extract_border_pixels from wled_controller.core.capture.screen_capture import extract_border_pixels
from wled_controller.core.processing.live_stream import LiveStream from wled_controller.core.processing.live_stream import LiveStream
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -259,131 +260,132 @@ class PictureColorStripStream(ColorStripStream):
"""Background thread: poll source, process, cache colors.""" """Background thread: poll source, process, cache colors."""
cached_frame = None cached_frame = None
while self._running: with high_resolution_timer():
loop_start = time.perf_counter() while self._running:
fps = self._fps loop_start = time.perf_counter()
frame_time = 1.0 / fps if fps > 0 else 1.0 fps = self._fps
frame_time = 1.0 / fps if fps > 0 else 1.0
try: try:
frame = self._live_stream.get_latest_frame() frame = self._live_stream.get_latest_frame()
if frame is None or frame is cached_frame: if frame is None or frame is cached_frame:
if (
frame is not None
and self._frame_interpolation
and self._interp_from is not None
and self._interp_to is not None
):
t = min(1.0, (loop_start - self._interp_start) / self._interp_duration)
alpha = int(t * 256)
led_colors = (
(256 - alpha) * self._interp_from.astype(np.uint16)
+ alpha * self._interp_to.astype(np.uint16)
) >> 8
led_colors = led_colors.astype(np.uint8)
if self._saturation != 1.0:
led_colors = _apply_saturation(led_colors, self._saturation)
if self._gamma != 1.0:
led_colors = self._gamma_lut[led_colors]
if self._brightness != 1.0:
led_colors = np.clip(
led_colors.astype(np.float32) * self._brightness, 0, 255
).astype(np.uint8)
with self._colors_lock:
self._latest_colors = led_colors
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))
continue
interval = (
loop_start - self._last_capture_time
if self._last_capture_time > 0
else frame_time
)
self._last_capture_time = loop_start
cached_frame = frame
t0 = time.perf_counter()
calibration = self._calibration
border_pixels = extract_border_pixels(frame, calibration.border_width)
t1 = time.perf_counter()
led_colors = self._pixel_mapper.map_border_to_leds(border_pixels)
t2 = time.perf_counter()
# Pad or truncate to match the declared led_count
target_count = self._led_count
if target_count > 0 and len(led_colors) != target_count:
if len(led_colors) < target_count:
pad = np.zeros((target_count - len(led_colors), 3), dtype=np.uint8)
led_colors = np.concatenate([led_colors, pad])
else:
led_colors = led_colors[:target_count]
# Update interpolation buffers (raw colors, before corrections)
if self._frame_interpolation:
self._interp_from = self._interp_to
self._interp_to = led_colors.copy()
self._interp_start = loop_start
self._interp_duration = max(interval, 0.001)
# Temporal smoothing
smoothing = self._smoothing
if ( if (
frame is not None self._previous_colors is not None
and self._frame_interpolation and smoothing > 0
and self._interp_from is not None and len(self._previous_colors) == len(led_colors)
and self._interp_to is not None
): ):
t = min(1.0, (loop_start - self._interp_start) / self._interp_duration) alpha = int(smoothing * 256)
alpha = int(t * 256)
led_colors = ( led_colors = (
(256 - alpha) * self._interp_from.astype(np.uint16) (256 - alpha) * led_colors.astype(np.uint16)
+ alpha * self._interp_to.astype(np.uint16) + alpha * self._previous_colors.astype(np.uint16)
) >> 8 ) >> 8
led_colors = led_colors.astype(np.uint8) led_colors = led_colors.astype(np.uint8)
if self._saturation != 1.0: t3 = time.perf_counter()
led_colors = _apply_saturation(led_colors, self._saturation)
if self._gamma != 1.0:
led_colors = self._gamma_lut[led_colors]
if self._brightness != 1.0:
led_colors = np.clip(
led_colors.astype(np.float32) * self._brightness, 0, 255
).astype(np.uint8)
with self._colors_lock:
self._latest_colors = led_colors
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))
continue
interval = ( # Saturation
loop_start - self._last_capture_time saturation = self._saturation
if self._last_capture_time > 0 if saturation != 1.0:
else frame_time led_colors = _apply_saturation(led_colors, saturation)
) t4 = time.perf_counter()
self._last_capture_time = loop_start
cached_frame = frame
t0 = time.perf_counter() # Gamma (LUT lookup — O(1) per pixel)
if self._gamma != 1.0:
led_colors = self._gamma_lut[led_colors]
t5 = time.perf_counter()
calibration = self._calibration # Brightness
border_pixels = extract_border_pixels(frame, calibration.border_width) brightness = self._brightness
t1 = time.perf_counter() if brightness != 1.0:
led_colors = np.clip(
led_colors.astype(np.float32) * brightness, 0, 255
).astype(np.uint8)
t6 = time.perf_counter()
led_colors = self._pixel_mapper.map_border_to_leds(border_pixels) self._previous_colors = led_colors
t2 = time.perf_counter()
# Pad or truncate to match the declared led_count with self._colors_lock:
target_count = self._led_count self._latest_colors = led_colors
if target_count > 0 and len(led_colors) != target_count:
if len(led_colors) < target_count:
pad = np.zeros((target_count - len(led_colors), 3), dtype=np.uint8)
led_colors = np.concatenate([led_colors, pad])
else:
led_colors = led_colors[:target_count]
# Update interpolation buffers (raw colors, before corrections) self._last_timing = {
if self._frame_interpolation: "extract_ms": (t1 - t0) * 1000,
self._interp_from = self._interp_to "map_leds_ms": (t2 - t1) * 1000,
self._interp_to = led_colors.copy() "smooth_ms": (t3 - t2) * 1000,
self._interp_start = loop_start "saturation_ms": (t4 - t3) * 1000,
self._interp_duration = max(interval, 0.001) "gamma_ms": (t5 - t4) * 1000,
"brightness_ms": (t6 - t5) * 1000,
"total_ms": (t6 - t0) * 1000,
}
# Temporal smoothing except Exception as e:
smoothing = self._smoothing logger.error(f"PictureColorStripStream processing error: {e}", exc_info=True)
if (
self._previous_colors is not None
and smoothing > 0
and len(self._previous_colors) == len(led_colors)
):
alpha = int(smoothing * 256)
led_colors = (
(256 - alpha) * led_colors.astype(np.uint16)
+ alpha * self._previous_colors.astype(np.uint16)
) >> 8
led_colors = led_colors.astype(np.uint8)
t3 = time.perf_counter()
# Saturation elapsed = time.perf_counter() - loop_start
saturation = self._saturation remaining = frame_time - elapsed
if saturation != 1.0: if remaining > 0:
led_colors = _apply_saturation(led_colors, saturation) time.sleep(remaining)
t4 = time.perf_counter()
# Gamma (LUT lookup — O(1) per pixel)
if self._gamma != 1.0:
led_colors = self._gamma_lut[led_colors]
t5 = time.perf_counter()
# Brightness
brightness = self._brightness
if brightness != 1.0:
led_colors = np.clip(
led_colors.astype(np.float32) * brightness, 0, 255
).astype(np.uint8)
t6 = time.perf_counter()
self._previous_colors = led_colors
with self._colors_lock:
self._latest_colors = led_colors
self._last_timing = {
"extract_ms": (t1 - t0) * 1000,
"map_leds_ms": (t2 - t1) * 1000,
"smooth_ms": (t3 - t2) * 1000,
"saturation_ms": (t4 - t3) * 1000,
"gamma_ms": (t5 - t4) * 1000,
"brightness_ms": (t6 - t5) * 1000,
"total_ms": (t6 - t0) * 1000,
}
except Exception as e:
logger.error(f"PictureColorStripStream processing error: {e}", exc_info=True)
elapsed = time.perf_counter() - loop_start
remaining = frame_time - elapsed
if remaining > 0:
time.sleep(remaining)
def _compute_gradient_colors(stops: list, led_count: int) -> np.ndarray: def _compute_gradient_colors(stops: list, led_count: int) -> np.ndarray:
@@ -537,28 +539,29 @@ class StaticColorStripStream(ColorStripStream):
def _animate_loop(self) -> None: def _animate_loop(self) -> None:
"""Background thread: compute animated colors at ~30 fps when animation is active.""" """Background thread: compute animated colors at ~30 fps when animation is active."""
frame_time = 1.0 / 30 frame_time = 1.0 / 30
while self._running: with high_resolution_timer():
loop_start = time.time() while self._running:
anim = self._animation loop_start = time.perf_counter()
if anim and anim.get("enabled"): anim = self._animation
speed = float(anim.get("speed", 1.0)) if anim and anim.get("enabled"):
atype = anim.get("type", "breathing") speed = float(anim.get("speed", 1.0))
t = loop_start atype = anim.get("type", "breathing")
n = self._led_count t = loop_start
colors = None n = self._led_count
colors = None
if atype == "breathing": if atype == "breathing":
factor = 0.5 * (1 + math.sin(2 * math.pi * speed * t * 0.5)) factor = 0.5 * (1 + math.sin(2 * math.pi * speed * t * 0.5))
base = np.array(self._source_color, dtype=np.float32) base = np.array(self._source_color, dtype=np.float32)
pixel = np.clip(base * factor, 0, 255).astype(np.uint8) pixel = np.clip(base * factor, 0, 255).astype(np.uint8)
colors = np.tile(pixel, (n, 1)) colors = np.tile(pixel, (n, 1))
if colors is not None: if colors is not None:
with self._colors_lock: with self._colors_lock:
self._colors = colors self._colors = colors
elapsed = time.time() - loop_start elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001)) time.sleep(max(frame_time - elapsed, 0.001))
class ColorCycleColorStripStream(ColorStripStream): class ColorCycleColorStripStream(ColorStripStream):
@@ -651,26 +654,27 @@ class ColorCycleColorStripStream(ColorStripStream):
def _animate_loop(self) -> None: def _animate_loop(self) -> None:
"""Background thread: interpolate between colors at ~30 fps.""" """Background thread: interpolate between colors at ~30 fps."""
frame_time = 1.0 / 30 frame_time = 1.0 / 30
while self._running: with high_resolution_timer():
loop_start = time.time() while self._running:
color_list = self._color_list loop_start = time.perf_counter()
speed = self._cycle_speed color_list = self._color_list
n = self._led_count speed = self._cycle_speed
num = len(color_list) n = self._led_count
if num >= 2: num = len(color_list)
# 0.05 factor → one full cycle every 20s at speed=1.0 if num >= 2:
cycle_pos = (speed * loop_start * 0.05) % 1.0 # 0.05 factor → one full cycle every 20s at speed=1.0
seg = cycle_pos * num cycle_pos = (speed * loop_start * 0.05) % 1.0
idx = int(seg) % num seg = cycle_pos * num
t_interp = seg - int(seg) idx = int(seg) % num
c1 = np.array(color_list[idx], dtype=np.float32) t_interp = seg - int(seg)
c2 = np.array(color_list[(idx + 1) % num], dtype=np.float32) c1 = np.array(color_list[idx], dtype=np.float32)
pixel = np.clip(c1 * (1 - t_interp) + c2 * t_interp, 0, 255).astype(np.uint8) c2 = np.array(color_list[(idx + 1) % num], dtype=np.float32)
led_colors = np.tile(pixel, (n, 1)) pixel = np.clip(c1 * (1 - t_interp) + c2 * t_interp, 0, 255).astype(np.uint8)
with self._colors_lock: led_colors = np.tile(pixel, (n, 1))
self._colors = led_colors with self._colors_lock:
elapsed = time.time() - loop_start self._colors = led_colors
time.sleep(max(frame_time - elapsed, 0.001)) elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))
class GradientColorStripStream(ColorStripStream): class GradientColorStripStream(ColorStripStream):
@@ -765,47 +769,48 @@ class GradientColorStripStream(ColorStripStream):
_cached_base: Optional[np.ndarray] = None _cached_base: Optional[np.ndarray] = None
_cached_n: int = 0 _cached_n: int = 0
_cached_stops: Optional[list] = None _cached_stops: Optional[list] = None
while self._running: with high_resolution_timer():
loop_start = time.time() while self._running:
anim = self._animation loop_start = time.perf_counter()
if anim and anim.get("enabled"): anim = self._animation
speed = float(anim.get("speed", 1.0)) if anim and anim.get("enabled"):
atype = anim.get("type", "breathing") speed = float(anim.get("speed", 1.0))
t = loop_start atype = anim.get("type", "breathing")
n = self._led_count t = loop_start
stops = self._stops n = self._led_count
colors = None stops = self._stops
colors = None
# Recompute base gradient only when stops or led_count change # Recompute base gradient only when stops or led_count change
if _cached_base is None or _cached_n != n or _cached_stops is not stops: if _cached_base is None or _cached_n != n or _cached_stops is not stops:
_cached_base = _compute_gradient_colors(stops, n) _cached_base = _compute_gradient_colors(stops, n)
_cached_n = n _cached_n = n
_cached_stops = stops _cached_stops = stops
base = _cached_base base = _cached_base
if atype == "breathing": if atype == "breathing":
factor = 0.5 * (1 + math.sin(2 * math.pi * speed * t * 0.5)) factor = 0.5 * (1 + math.sin(2 * math.pi * speed * t * 0.5))
colors = np.clip(base.astype(np.float32) * factor, 0, 255).astype(np.uint8) colors = np.clip(base.astype(np.float32) * factor, 0, 255).astype(np.uint8)
elif atype == "gradient_shift": elif atype == "gradient_shift":
shift = int(speed * t * 10) % max(n, 1) shift = int(speed * t * 10) % max(n, 1)
colors = np.roll(base, shift, axis=0) colors = np.roll(base, shift, axis=0)
elif atype == "wave": elif atype == "wave":
if n > 1: if n > 1:
i_arr = np.arange(n, dtype=np.float32) i_arr = np.arange(n, dtype=np.float32)
factor = 0.5 * (1 + np.sin( factor = 0.5 * (1 + np.sin(
2 * math.pi * i_arr / n - 2 * math.pi * speed * t 2 * math.pi * i_arr / n - 2 * math.pi * speed * t
)) ))
colors = np.clip( colors = np.clip(
base.astype(np.float32) * factor[:, None], 0, 255 base.astype(np.float32) * factor[:, None], 0, 255
).astype(np.uint8) ).astype(np.uint8)
else: else:
colors = base colors = base
if colors is not None: if colors is not None:
with self._colors_lock: with self._colors_lock:
self._colors = colors self._colors = colors
elapsed = time.time() - loop_start elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001)) time.sleep(max(frame_time - elapsed, 0.001))

View File

@@ -24,6 +24,7 @@ from wled_controller.core.processing.target_processor import (
TargetProcessor, TargetProcessor,
) )
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -270,7 +271,7 @@ class KCTargetProcessor(TargetProcessor):
frame_time = 1.0 / target_fps frame_time = 1.0 / target_fps
fps_samples: collections.deque = collections.deque(maxlen=10) fps_samples: collections.deque = collections.deque(maxlen=10)
timing_samples: collections.deque = collections.deque(maxlen=10) timing_samples: collections.deque = collections.deque(maxlen=10)
prev_frame_time_stamp = time.time() prev_frame_time_stamp = time.perf_counter()
prev_capture = None prev_capture = None
last_broadcast_time = 0.0 last_broadcast_time = 0.0
send_timestamps: collections.deque = collections.deque() send_timestamps: collections.deque = collections.deque()
@@ -299,94 +300,95 @@ class KCTargetProcessor(TargetProcessor):
) )
try: try:
while self._is_running: with high_resolution_timer():
loop_start = time.time() while self._is_running:
loop_start = time.perf_counter()
try: try:
capture = self._live_stream.get_latest_frame() capture = self._live_stream.get_latest_frame()
if capture is None: if capture is None:
await asyncio.sleep(frame_time) await asyncio.sleep(frame_time)
continue continue
# Skip processing if the frame hasn't changed # Skip processing if the frame hasn't changed
if capture is prev_capture: if capture is prev_capture:
# Keepalive: re-broadcast last colors # Keepalive: re-broadcast last colors
if self._latest_colors and (loop_start - last_broadcast_time) >= 1.0: if self._latest_colors and (loop_start - last_broadcast_time) >= 1.0:
await self._broadcast_colors(self._latest_colors) await self._broadcast_colors(self._latest_colors)
last_broadcast_time = time.time() last_broadcast_time = time.perf_counter()
send_timestamps.append(last_broadcast_time) send_timestamps.append(last_broadcast_time)
self._metrics.frames_keepalive += 1 self._metrics.frames_keepalive += 1
self._metrics.frames_skipped += 1 self._metrics.frames_skipped += 1
now_ts = time.time() now_ts = time.perf_counter()
while send_timestamps and send_timestamps[0] < now_ts - 1.0: while send_timestamps and send_timestamps[0] < now_ts - 1.0:
send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps)
await asyncio.sleep(frame_time)
continue
prev_capture = capture
# Read settings fresh each frame so hot updates (brightness,
# smoothing, interpolation_mode) take effect without restart.
s = self._settings
calc_fn = calc_fns.get(s.interpolation_mode, calculate_average_color)
# CPU-bound work in thread pool
colors, colors_arr, frame_timing = await asyncio.to_thread(
_process_kc_frame,
capture, rect_names, rect_bounds, calc_fn,
prev_colors_arr, s.smoothing, s.brightness,
)
prev_colors_arr = colors_arr
self._latest_colors = dict(colors)
# Broadcast to WebSocket clients
t_broadcast_start = time.perf_counter()
await self._broadcast_colors(colors)
broadcast_ms = (time.perf_counter() - t_broadcast_start) * 1000
last_broadcast_time = time.perf_counter()
send_timestamps.append(last_broadcast_time)
# Per-stage timing (rolling average over last 10 frames)
frame_timing["broadcast"] = broadcast_ms
timing_samples.append(frame_timing)
n = len(timing_samples)
self._metrics.timing_calc_colors_ms = sum(s["calc_colors"] for s in timing_samples) / n
self._metrics.timing_smooth_ms = sum(s["smooth"] for s in timing_samples) / n
self._metrics.timing_broadcast_ms = sum(s["broadcast"] for s in timing_samples) / n
self._metrics.timing_total_ms = sum(s["total"] for s in timing_samples) / n + broadcast_ms
# Update metrics
self._metrics.frames_processed += 1
self._metrics.last_update = datetime.utcnow()
# Calculate actual FPS
now = time.perf_counter()
interval = now - prev_frame_time_stamp
prev_frame_time_stamp = now
fps_samples.append(1.0 / interval if interval > 0 else 0)
self._metrics.fps_actual = sum(fps_samples) / len(fps_samples)
# Potential FPS
processing_time = now - loop_start
self._metrics.fps_potential = 1.0 / processing_time if processing_time > 0 else 0
# fps_current
while send_timestamps and send_timestamps[0] < now - 1.0:
send_timestamps.popleft() send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps) self._metrics.fps_current = len(send_timestamps)
await asyncio.sleep(frame_time)
continue
prev_capture = capture
# Read settings fresh each frame so hot updates (brightness, except Exception as e:
# smoothing, interpolation_mode) take effect without restart. self._metrics.errors_count += 1
s = self._settings self._metrics.last_error = str(e)
calc_fn = calc_fns.get(s.interpolation_mode, calculate_average_color) logger.error(f"KC processing error for {self._target_id}: {e}", exc_info=True)
# CPU-bound work in thread pool # Throttle to target FPS
colors, colors_arr, frame_timing = await asyncio.to_thread( elapsed = time.perf_counter() - loop_start
_process_kc_frame, remaining = frame_time - elapsed
capture, rect_names, rect_bounds, calc_fn, if remaining > 0:
prev_colors_arr, s.smoothing, s.brightness, await asyncio.sleep(remaining)
)
prev_colors_arr = colors_arr
self._latest_colors = dict(colors)
# Broadcast to WebSocket clients
t_broadcast_start = time.perf_counter()
await self._broadcast_colors(colors)
broadcast_ms = (time.perf_counter() - t_broadcast_start) * 1000
last_broadcast_time = time.time()
send_timestamps.append(last_broadcast_time)
# Per-stage timing (rolling average over last 10 frames)
frame_timing["broadcast"] = broadcast_ms
timing_samples.append(frame_timing)
n = len(timing_samples)
self._metrics.timing_calc_colors_ms = sum(s["calc_colors"] for s in timing_samples) / n
self._metrics.timing_smooth_ms = sum(s["smooth"] for s in timing_samples) / n
self._metrics.timing_broadcast_ms = sum(s["broadcast"] for s in timing_samples) / n
self._metrics.timing_total_ms = sum(s["total"] for s in timing_samples) / n + broadcast_ms
# Update metrics
self._metrics.frames_processed += 1
self._metrics.last_update = datetime.utcnow()
# Calculate actual FPS
now = time.time()
interval = now - prev_frame_time_stamp
prev_frame_time_stamp = now
fps_samples.append(1.0 / interval if interval > 0 else 0)
self._metrics.fps_actual = sum(fps_samples) / len(fps_samples)
# Potential FPS
processing_time = now - loop_start
self._metrics.fps_potential = 1.0 / processing_time if processing_time > 0 else 0
# fps_current
while send_timestamps and send_timestamps[0] < now - 1.0:
send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps)
except Exception as e:
self._metrics.errors_count += 1
self._metrics.last_error = str(e)
logger.error(f"KC processing error for {self._target_id}: {e}", exc_info=True)
# Throttle to target FPS
elapsed = time.time() - loop_start
remaining = frame_time - elapsed
if remaining > 0:
await asyncio.sleep(remaining)
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info(f"KC processing loop cancelled for target {self._target_id}") logger.info(f"KC processing loop cancelled for target {self._target_id}")

View File

@@ -22,6 +22,7 @@ import numpy as np
from wled_controller.core.capture_engines.base import CaptureStream, ScreenCapture from wled_controller.core.capture_engines.base import CaptureStream, ScreenCapture
from wled_controller.core.filters import ImagePool, PostprocessingFilter from wled_controller.core.filters import ImagePool, PostprocessingFilter
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -128,24 +129,25 @@ class ScreenCaptureLiveStream(LiveStream):
def _capture_loop(self) -> None: def _capture_loop(self) -> None:
frame_time = 1.0 / self._fps if self._fps > 0 else 1.0 frame_time = 1.0 / self._fps if self._fps > 0 else 1.0
while self._running: with high_resolution_timer():
loop_start = time.time() while self._running:
try: loop_start = time.perf_counter()
frame = self._capture_stream.capture_frame() try:
if frame is not None: frame = self._capture_stream.capture_frame()
with self._frame_lock: if frame is not None:
self._latest_frame = frame with self._frame_lock:
else: self._latest_frame = frame
# Small sleep when no frame available to avoid CPU spinning else:
time.sleep(0.001) # Small sleep when no frame available to avoid CPU spinning
except Exception as e: time.sleep(0.001)
logger.error(f"Capture error (display={self._capture_stream.display_index}): {e}") except Exception as e:
logger.error(f"Capture error (display={self._capture_stream.display_index}): {e}")
# Throttle to target FPS # Throttle to target FPS
elapsed = time.time() - loop_start elapsed = time.perf_counter() - loop_start
remaining = frame_time - elapsed remaining = frame_time - elapsed
if remaining > 0: if remaining > 0:
time.sleep(remaining) time.sleep(remaining)
class ProcessedLiveStream(LiveStream): class ProcessedLiveStream(LiveStream):
@@ -224,78 +226,79 @@ class ProcessedLiveStream(LiveStream):
fps = self.target_fps fps = self.target_fps
frame_time = 1.0 / fps if fps > 0 else 1.0 frame_time = 1.0 / fps if fps > 0 else 1.0
while self._running: with high_resolution_timer():
loop_start = time.time() while self._running:
loop_start = time.perf_counter()
source_frame = self._source.get_latest_frame() source_frame = self._source.get_latest_frame()
if source_frame is None or source_frame is cached_source_frame: if source_frame is None or source_frame is cached_source_frame:
# Idle tick — run filter chain when any filter requests idle processing # Idle tick — run filter chain when any filter requests idle processing
if self._has_idle_filters and cached_source_frame is not None: if self._has_idle_filters and cached_source_frame is not None:
src = cached_source_frame.image src = cached_source_frame.image
h, w, c = src.shape h, w, c = src.shape
if _idle_src_buf is None or _idle_src_buf.shape != (h, w, c): if _idle_src_buf is None or _idle_src_buf.shape != (h, w, c):
_idle_src_buf = np.empty((h, w, c), dtype=np.uint8) _idle_src_buf = np.empty((h, w, c), dtype=np.uint8)
np.copyto(_idle_src_buf, src) np.copyto(_idle_src_buf, src)
idle_image = _idle_src_buf idle_image = _idle_src_buf
for f in self._filters: for f in self._filters:
result = f.process_image(idle_image, self._image_pool) result = f.process_image(idle_image, self._image_pool)
if result is not None: if result is not None:
if idle_image is not _idle_src_buf: if idle_image is not _idle_src_buf:
self._image_pool.release(idle_image) self._image_pool.release(idle_image)
idle_image = result idle_image = result
# Only publish a new frame when the filter chain produced actual # Only publish a new frame when the filter chain produced actual
# interpolated output (idle_image advanced past the input buffer). # interpolated output (idle_image advanced past the input buffer).
# If every filter passed through, idle_image is still _idle_src_buf — # If every filter passed through, idle_image is still _idle_src_buf —
# leave _latest_frame unchanged so consumers that rely on object # leave _latest_frame unchanged so consumers that rely on object
# identity for deduplication correctly detect no new content. # identity for deduplication correctly detect no new content.
if idle_image is not _idle_src_buf: if idle_image is not _idle_src_buf:
processed = ScreenCapture( processed = ScreenCapture(
image=idle_image, image=idle_image,
width=cached_source_frame.width, width=cached_source_frame.width,
height=cached_source_frame.height, height=cached_source_frame.height,
display_index=cached_source_frame.display_index, display_index=cached_source_frame.display_index,
) )
with self._frame_lock: with self._frame_lock:
self._latest_frame = processed self._latest_frame = processed
elapsed = time.time() - loop_start elapsed = time.perf_counter() - loop_start
remaining = frame_time - elapsed remaining = frame_time - elapsed
time.sleep(max(remaining, 0.001)) time.sleep(max(remaining, 0.001))
continue continue
cached_source_frame = source_frame cached_source_frame = source_frame
# Reuse ring buffer slot instead of allocating a new copy each frame # Reuse ring buffer slot instead of allocating a new copy each frame
src = source_frame.image src = source_frame.image
h, w, c = src.shape h, w, c = src.shape
buf = _ring[_ring_idx] buf = _ring[_ring_idx]
if buf is None or buf.shape != (h, w, c): if buf is None or buf.shape != (h, w, c):
buf = np.empty((h, w, c), dtype=np.uint8) buf = np.empty((h, w, c), dtype=np.uint8)
_ring[_ring_idx] = buf _ring[_ring_idx] = buf
_ring_idx = (_ring_idx + 1) % 3 _ring_idx = (_ring_idx + 1) % 3
np.copyto(buf, src) np.copyto(buf, src)
image = buf image = buf
for f in self._filters: for f in self._filters:
result = f.process_image(image, self._image_pool) result = f.process_image(image, self._image_pool)
if result is not None: if result is not None:
# Release intermediate filter output back to pool # Release intermediate filter output back to pool
# (don't release the ring buffer itself) # (don't release the ring buffer itself)
if image is not buf: if image is not buf:
self._image_pool.release(image) self._image_pool.release(image)
image = result image = result
processed = ScreenCapture( processed = ScreenCapture(
image=image, image=image,
width=source_frame.width, width=source_frame.width,
height=source_frame.height, height=source_frame.height,
display_index=source_frame.display_index, display_index=source_frame.display_index,
) )
with self._frame_lock: with self._frame_lock:
self._latest_frame = processed self._latest_frame = processed
class StaticImageLiveStream(LiveStream): class StaticImageLiveStream(LiveStream):

View File

@@ -19,6 +19,7 @@ from wled_controller.core.processing.target_processor import (
TargetProcessor, TargetProcessor,
) )
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -367,7 +368,7 @@ class WledTargetProcessor(TargetProcessor):
send_timestamps: collections.deque = collections.deque() send_timestamps: collections.deque = collections.deque()
prev_colors = None prev_colors = None
last_send_time = 0.0 last_send_time = 0.0
prev_frame_time_stamp = time.time() prev_frame_time_stamp = time.perf_counter()
loop = asyncio.get_running_loop() loop = asyncio.get_running_loop()
logger.info( logger.info(
@@ -376,102 +377,103 @@ class WledTargetProcessor(TargetProcessor):
) )
try: try:
while self._is_running: with high_resolution_timer():
loop_start = now = time.time() while self._is_running:
# Re-read target_fps each tick so hot-updates to the CSS source take effect loop_start = now = time.perf_counter()
target_fps = stream.target_fps if stream.target_fps > 0 else 30 # Re-read target_fps each tick so hot-updates to the CSS source take effect
frame_time = 1.0 / target_fps target_fps = stream.target_fps if stream.target_fps > 0 else 30
frame_time = 1.0 / target_fps
# Re-fetch device info for runtime changes (test mode, brightness) # Re-fetch device info for runtime changes (test mode, brightness)
device_info = self._ctx.get_device_info(self._device_id) device_info = self._ctx.get_device_info(self._device_id)
# Skip send while in calibration test mode # Skip send while in calibration test mode
if device_info and device_info.test_mode_active: if device_info and device_info.test_mode_active:
await asyncio.sleep(frame_time)
continue
try:
colors = stream.get_latest_colors()
if colors is None:
if self._metrics.frames_processed == 0:
logger.info(f"Stream returned None for target {self._target_id} (no data yet)")
await asyncio.sleep(frame_time) await asyncio.sleep(frame_time)
continue continue
if colors is prev_colors: try:
# Same frame — send keepalive if interval elapsed colors = stream.get_latest_colors()
if prev_colors is not None and (loop_start - last_send_time) >= standby_interval:
if not self._is_running or self._led_client is None: if colors is None:
break if self._metrics.frames_processed == 0:
send_colors = self._apply_brightness(prev_colors, device_info) logger.info(f"Stream returned None for target {self._target_id} (no data yet)")
if self._led_client.supports_fast_send: await asyncio.sleep(frame_time)
self._led_client.send_pixels_fast(send_colors) continue
else:
await self._led_client.send_pixels(send_colors) if colors is prev_colors:
now = time.time() # Same frame — send keepalive if interval elapsed
last_send_time = now if prev_colors is not None and (loop_start - last_send_time) >= standby_interval:
send_timestamps.append(now) if not self._is_running or self._led_client is None:
self._metrics.frames_keepalive += 1 break
self._metrics.frames_skipped += 1 send_colors = self._apply_brightness(prev_colors, device_info)
if self._led_client.supports_fast_send:
self._led_client.send_pixels_fast(send_colors)
else:
await self._led_client.send_pixels(send_colors)
now = time.perf_counter()
last_send_time = now
send_timestamps.append(now)
self._metrics.frames_keepalive += 1
self._metrics.frames_skipped += 1
while send_timestamps and send_timestamps[0] < now - 1.0:
send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps)
await asyncio.sleep(frame_time)
continue
prev_colors = colors
# Apply device software brightness
send_colors = self._apply_brightness(colors, device_info)
# Send to LED device
if not self._is_running or self._led_client is None:
break
t_send_start = time.perf_counter()
if self._led_client.supports_fast_send:
self._led_client.send_pixels_fast(send_colors)
else:
await self._led_client.send_pixels(send_colors)
send_ms = (time.perf_counter() - t_send_start) * 1000
now = time.perf_counter()
last_send_time = now
send_timestamps.append(now)
self._metrics.timing_send_ms = send_ms
self._metrics.frames_processed += 1
self._metrics.last_update = datetime.utcnow()
if self._metrics.frames_processed <= 3 or self._metrics.frames_processed % 100 == 0:
logger.info(
f"Frame {self._metrics.frames_processed} for {self._target_id} "
f"({len(send_colors)} LEDs) — send={send_ms:.1f}ms"
)
# FPS tracking
interval = now - prev_frame_time_stamp
prev_frame_time_stamp = now
fps_samples.append(1.0 / interval if interval > 0 else 0)
self._metrics.fps_actual = sum(fps_samples) / len(fps_samples)
processing_time = now - loop_start
self._metrics.fps_potential = 1.0 / processing_time if processing_time > 0 else 0
while send_timestamps and send_timestamps[0] < now - 1.0: while send_timestamps and send_timestamps[0] < now - 1.0:
send_timestamps.popleft() send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps) self._metrics.fps_current = len(send_timestamps)
await asyncio.sleep(frame_time)
continue
prev_colors = colors except Exception as e:
self._metrics.errors_count += 1
self._metrics.last_error = str(e)
logger.error(f"Processing error for target {self._target_id}: {e}", exc_info=True)
# Apply device software brightness # Throttle to target FPS
send_colors = self._apply_brightness(colors, device_info) elapsed = now - loop_start
remaining = frame_time - elapsed
# Send to LED device if remaining > 0:
if not self._is_running or self._led_client is None: await asyncio.sleep(remaining)
break
t_send_start = time.perf_counter()
if self._led_client.supports_fast_send:
self._led_client.send_pixels_fast(send_colors)
else:
await self._led_client.send_pixels(send_colors)
send_ms = (time.perf_counter() - t_send_start) * 1000
now = time.time()
last_send_time = now
send_timestamps.append(now)
self._metrics.timing_send_ms = send_ms
self._metrics.frames_processed += 1
self._metrics.last_update = datetime.utcnow()
if self._metrics.frames_processed <= 3 or self._metrics.frames_processed % 100 == 0:
logger.info(
f"Frame {self._metrics.frames_processed} for {self._target_id} "
f"({len(send_colors)} LEDs) — send={send_ms:.1f}ms"
)
# FPS tracking
interval = now - prev_frame_time_stamp
prev_frame_time_stamp = now
fps_samples.append(1.0 / interval if interval > 0 else 0)
self._metrics.fps_actual = sum(fps_samples) / len(fps_samples)
processing_time = now - loop_start
self._metrics.fps_potential = 1.0 / processing_time if processing_time > 0 else 0
while send_timestamps and send_timestamps[0] < now - 1.0:
send_timestamps.popleft()
self._metrics.fps_current = len(send_timestamps)
except Exception as e:
self._metrics.errors_count += 1
self._metrics.last_error = str(e)
logger.error(f"Processing error for target {self._target_id}: {e}", exc_info=True)
# Throttle to target FPS
elapsed = now - loop_start
remaining = frame_time - elapsed
if remaining > 0:
await asyncio.sleep(remaining)
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info(f"Processing loop cancelled for target {self._target_id}") logger.info(f"Processing loop cancelled for target {self._target_id}")

View File

@@ -172,8 +172,10 @@ export function createColorStripCard(source, pictureSourceMap) {
const isGradient = source.source_type === 'gradient'; const isGradient = source.source_type === 'gradient';
const isColorCycle = source.source_type === 'color_cycle'; const isColorCycle = source.source_type === 'color_cycle';
const animBadge = ((isStatic || isGradient) && source.animation && source.animation.enabled) const anim = (isStatic || isGradient) && source.animation && source.animation.enabled ? source.animation : null;
? `<span class="stream-card-prop" title="${t('color_strip.animation')}">✨ ${t('color_strip.animation.type.' + source.animation.type) || source.animation.type}</span>` const animBadge = anim
? `<span class="stream-card-prop" title="${t('color_strip.animation')}">✨ ${t('color_strip.animation.type.' + anim.type) || anim.type}</span>`
+ `<span class="stream-card-prop" title="${t('color_strip.animation.speed')}">⏩ ${(anim.speed || 1.0).toFixed(1)}×</span>`
: ''; : '';
let propsHtml; let propsHtml;

View File

@@ -2,5 +2,6 @@
from .logger import setup_logging, get_logger from .logger import setup_logging, get_logger
from .monitor_names import get_monitor_names, get_monitor_name, get_monitor_refresh_rates from .monitor_names import get_monitor_names, get_monitor_name, get_monitor_refresh_rates
from .timer import high_resolution_timer
__all__ = ["setup_logging", "get_logger", "get_monitor_names", "get_monitor_name", "get_monitor_refresh_rates"] __all__ = ["setup_logging", "get_logger", "get_monitor_names", "get_monitor_name", "get_monitor_refresh_rates", "high_resolution_timer"]

View File

@@ -0,0 +1,34 @@
"""High-resolution timer utilities for precise sleep on Windows.
Windows default timer resolution is ~15.6ms, making time.sleep() very
imprecise for real-time loops (e.g. 60fps needs 16.67ms per frame).
Calling timeBeginPeriod(1) increases system timer resolution to 1ms,
making time.sleep() accurate to ~1ms. The calls are reference-counted
by Windows — each timeBeginPeriod must be paired with timeEndPeriod.
"""
import sys
from contextlib import contextmanager
@contextmanager
def high_resolution_timer():
"""Context manager that enables 1ms timer resolution on Windows.
Usage::
with high_resolution_timer():
while running:
...
time.sleep(remaining) # now accurate to ~1ms
"""
if sys.platform == "win32":
import ctypes
ctypes.windll.winmm.timeBeginPeriod(1)
try:
yield
finally:
if sys.platform == "win32":
import ctypes
ctypes.windll.winmm.timeEndPeriod(1)