Add audio capture timing metrics to target pipeline

Instrument AudioCaptureStream with read/FFT timing and
AudioColorStripStream with render timing. Display audio-specific
timing segments (read/fft/render/send) in the target card
breakdown bar when an audio source is active.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-24 20:41:29 +03:00
parent a39dc1b06a
commit 34d9495eb3
6 changed files with 86 additions and 7 deletions

View File

@@ -45,6 +45,9 @@ class AudioColorStripStream(ColorStripStream):
self._thread: Optional[threading.Thread] = None
self._fps = 30
# Per-frame timing (read by WledTargetProcessor via get_last_timing())
self._last_timing: dict = {}
# Beat pulse persistent state
self._pulse_brightness = 0.0
@@ -73,7 +76,7 @@ class AudioColorStripStream(ColorStripStream):
self._audio_source_id = audio_source_id
if audio_source_id and self._audio_source_store:
try:
device_index, is_loopback, channel = self._audio_source_store.resolve_mono_source(audio_source_id)
device_index, is_loopback, channel = self._audio_source_store.resolve_audio_source(audio_source_id)
self._audio_device_index = device_index
self._audio_loopback = is_loopback
self._audio_channel = channel
@@ -147,6 +150,9 @@ class AudioColorStripStream(ColorStripStream):
with self._colors_lock:
return self._colors
def get_last_timing(self) -> dict:
return dict(self._last_timing)
def update_source(self, source) -> None:
from wled_controller.storage.color_strip_source import AudioColorStripSource
if isinstance(source, AudioColorStripSource):
@@ -204,11 +210,24 @@ class AudioColorStripStream(ColorStripStream):
analysis = self._audio_stream.get_latest_analysis()
render_fn = renderers.get(self._visualization_mode, self._render_spectrum)
t_render = time.perf_counter()
render_fn(buf, n, analysis)
render_ms = (time.perf_counter() - t_render) * 1000
with self._colors_lock:
self._colors = buf
# Pull capture-side timing and combine with render timing
capture_timing = self._audio_stream.get_last_timing() if self._audio_stream else {}
read_ms = capture_timing.get("read_ms", 0)
fft_ms = capture_timing.get("fft_ms", 0)
self._last_timing = {
"audio_read_ms": read_ms,
"audio_fft_ms": fft_ms,
"audio_render_ms": render_ms,
"total_ms": read_ms + fft_ms + render_ms,
}
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))

View File

@@ -300,9 +300,19 @@ class WledTargetProcessor(TargetProcessor):
css_timing = self._css_stream.get_last_timing()
send_ms = round(metrics.timing_send_ms, 1) if self._is_running else None
# Picture source timing
extract_ms = round(css_timing.get("extract_ms", 0), 1) if css_timing else None
map_ms = round(css_timing.get("map_leds_ms", 0), 1) if css_timing else None
smooth_ms = round(css_timing.get("smooth_ms", 0), 1) if css_timing else None
# Audio source timing (keyed on audio_render_ms presence)
is_audio_source = css_timing and "audio_render_ms" in css_timing
audio_read_ms = round(css_timing.get("audio_read_ms", 0), 1) if is_audio_source else None
audio_fft_ms = round(css_timing.get("audio_fft_ms", 0), 1) if is_audio_source else None
audio_render_ms = round(css_timing.get("audio_render_ms", 0), 1) if is_audio_source else None
# Suppress picture timing when audio source is active
if is_audio_source:
extract_ms = map_ms = smooth_ms = None
if css_timing:
total_ms = round(css_timing.get("total_ms", 0) + metrics.timing_send_ms, 1)
elif self._is_running and send_ms is not None:
@@ -326,6 +336,9 @@ class WledTargetProcessor(TargetProcessor):
"timing_extract_ms": extract_ms,
"timing_map_leds_ms": map_ms,
"timing_smooth_ms": smooth_ms,
"timing_audio_read_ms": audio_read_ms,
"timing_audio_fft_ms": audio_fft_ms,
"timing_audio_render_ms": audio_render_ms,
"timing_total_ms": total_ms,
"display_index": self._resolved_display_index,
"overlay_active": self._overlay_active,