Add audio capture timing metrics to target pipeline

Instrument AudioCaptureStream with read/FFT timing and
AudioColorStripStream with render timing. Display audio-specific
timing segments (read/fft/render/send) in the target card
breakdown bar when an audio source is active.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-24 20:41:29 +03:00
parent a39dc1b06a
commit 34d9495eb3
6 changed files with 86 additions and 7 deletions

View File

@@ -127,6 +127,9 @@ class TargetProcessingState(BaseModel):
timing_map_leds_ms: Optional[float] = Field(None, description="LED color mapping time (ms)")
timing_smooth_ms: Optional[float] = Field(None, description="Temporal smoothing time (ms)")
timing_total_ms: Optional[float] = Field(None, description="Total processing time per frame (ms)")
timing_audio_read_ms: Optional[float] = Field(None, description="Audio device read time (ms)")
timing_audio_fft_ms: Optional[float] = Field(None, description="Audio FFT analysis time (ms)")
timing_audio_render_ms: Optional[float] = Field(None, description="Audio visualization render time (ms)")
timing_calc_colors_ms: Optional[float] = Field(None, description="Color calculation time (ms, KC targets)")
timing_broadcast_ms: Optional[float] = Field(None, description="WebSocket broadcast time (ms, KC targets)")
display_index: Optional[int] = Field(None, description="Current display index")

View File

@@ -123,6 +123,9 @@ class AudioCaptureStream:
self._smooth_spectrum_left = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_right = np.zeros(NUM_BANDS, dtype=np.float32)
# Per-iteration timing (written by capture thread, read by consumers)
self._last_timing: dict = {}
def start(self) -> None:
if self._running:
return
@@ -150,6 +153,10 @@ class AudioCaptureStream:
with self._lock:
return self._latest
def get_last_timing(self) -> dict:
"""Return per-iteration timing from the capture loop (ms)."""
return dict(self._last_timing)
def _capture_loop(self) -> None:
try:
import pyaudiowpatch as pyaudio
@@ -211,6 +218,7 @@ class AudioCaptureStream:
spectrum_buf_right = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running:
t_read_start = time.perf_counter()
try:
raw_data = stream.read(self._chunk_size, exception_on_overflow=False)
data = np.frombuffer(raw_data, dtype=np.float32)
@@ -218,6 +226,7 @@ class AudioCaptureStream:
logger.warning(f"Audio read error: {e}")
time.sleep(0.05)
continue
t_read_end = time.perf_counter()
# Split channels and mix to mono
if channels > 1:
@@ -275,6 +284,12 @@ class AudioCaptureStream:
beat = True
beat_intensity = min(1.0, (ratio - 1.0) / 2.0)
t_fft_end = time.perf_counter()
self._last_timing = {
"read_ms": (t_read_end - t_read_start) * 1000,
"fft_ms": (t_fft_end - t_read_end) * 1000,
}
analysis = AudioAnalysis(
timestamp=time.perf_counter(),
rms=rms,

View File

@@ -45,6 +45,9 @@ class AudioColorStripStream(ColorStripStream):
self._thread: Optional[threading.Thread] = None
self._fps = 30
# Per-frame timing (read by WledTargetProcessor via get_last_timing())
self._last_timing: dict = {}
# Beat pulse persistent state
self._pulse_brightness = 0.0
@@ -73,7 +76,7 @@ class AudioColorStripStream(ColorStripStream):
self._audio_source_id = audio_source_id
if audio_source_id and self._audio_source_store:
try:
device_index, is_loopback, channel = self._audio_source_store.resolve_mono_source(audio_source_id)
device_index, is_loopback, channel = self._audio_source_store.resolve_audio_source(audio_source_id)
self._audio_device_index = device_index
self._audio_loopback = is_loopback
self._audio_channel = channel
@@ -147,6 +150,9 @@ class AudioColorStripStream(ColorStripStream):
with self._colors_lock:
return self._colors
def get_last_timing(self) -> dict:
return dict(self._last_timing)
def update_source(self, source) -> None:
from wled_controller.storage.color_strip_source import AudioColorStripSource
if isinstance(source, AudioColorStripSource):
@@ -204,11 +210,24 @@ class AudioColorStripStream(ColorStripStream):
analysis = self._audio_stream.get_latest_analysis()
render_fn = renderers.get(self._visualization_mode, self._render_spectrum)
t_render = time.perf_counter()
render_fn(buf, n, analysis)
render_ms = (time.perf_counter() - t_render) * 1000
with self._colors_lock:
self._colors = buf
# Pull capture-side timing and combine with render timing
capture_timing = self._audio_stream.get_last_timing() if self._audio_stream else {}
read_ms = capture_timing.get("read_ms", 0)
fft_ms = capture_timing.get("fft_ms", 0)
self._last_timing = {
"audio_read_ms": read_ms,
"audio_fft_ms": fft_ms,
"audio_render_ms": render_ms,
"total_ms": read_ms + fft_ms + render_ms,
}
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))

View File

@@ -300,9 +300,19 @@ class WledTargetProcessor(TargetProcessor):
css_timing = self._css_stream.get_last_timing()
send_ms = round(metrics.timing_send_ms, 1) if self._is_running else None
# Picture source timing
extract_ms = round(css_timing.get("extract_ms", 0), 1) if css_timing else None
map_ms = round(css_timing.get("map_leds_ms", 0), 1) if css_timing else None
smooth_ms = round(css_timing.get("smooth_ms", 0), 1) if css_timing else None
# Audio source timing (keyed on audio_render_ms presence)
is_audio_source = css_timing and "audio_render_ms" in css_timing
audio_read_ms = round(css_timing.get("audio_read_ms", 0), 1) if is_audio_source else None
audio_fft_ms = round(css_timing.get("audio_fft_ms", 0), 1) if is_audio_source else None
audio_render_ms = round(css_timing.get("audio_render_ms", 0), 1) if is_audio_source else None
# Suppress picture timing when audio source is active
if is_audio_source:
extract_ms = map_ms = smooth_ms = None
if css_timing:
total_ms = round(css_timing.get("total_ms", 0) + metrics.timing_send_ms, 1)
elif self._is_running and send_ms is not None:
@@ -326,6 +336,9 @@ class WledTargetProcessor(TargetProcessor):
"timing_extract_ms": extract_ms,
"timing_map_leds_ms": map_ms,
"timing_smooth_ms": smooth_ms,
"timing_audio_read_ms": audio_read_ms,
"timing_audio_fft_ms": audio_fft_ms,
"timing_audio_render_ms": audio_render_ms,
"timing_total_ms": total_ms,
"display_index": self._resolved_display_index,
"overlay_active": self._overlay_active,

View File

@@ -553,6 +553,9 @@ ul.section-tip li {
.timing-map { background: #FF9800; }
.timing-smooth { background: #2196F3; }
.timing-send { background: #E91E63; }
.timing-audio-read { background: #4CAF50; }
.timing-audio-fft { background: #FF9800; }
.timing-audio-render { background: #2196F3; }
.timing-legend {
display: flex;
@@ -580,6 +583,9 @@ ul.section-tip li {
.timing-dot.timing-map { background: #FF9800; }
.timing-dot.timing-smooth { background: #2196F3; }
.timing-dot.timing-send { background: #E91E63; }
.timing-dot.timing-audio-read { background: #4CAF50; }
.timing-dot.timing-audio-fft { background: #FF9800; }
.timing-dot.timing-audio-render { background: #2196F3; }
@media (max-width: 768px) {
.displays-grid,

View File

@@ -387,13 +387,14 @@ export async function loadTargetsTab() {
_loadTargetsLock = true;
try {
// Fetch devices, targets, CSS sources, picture sources, and pattern templates in parallel
const [devicesResp, targetsResp, cssResp, psResp, patResp] = await Promise.all([
// Fetch devices, targets, CSS sources, picture sources, pattern templates, and value sources in parallel
const [devicesResp, targetsResp, cssResp, psResp, patResp, vsResp] = await Promise.all([
fetchWithAuth('/devices'),
fetchWithAuth('/picture-targets'),
fetchWithAuth('/color-strip-sources').catch(() => null),
fetchWithAuth('/picture-sources').catch(() => null),
fetchWithAuth('/pattern-templates').catch(() => null),
fetchWithAuth('/value-sources').catch(() => null),
]);
const devicesData = await devicesResp.json();
@@ -422,6 +423,12 @@ export async function loadTargetsTab() {
patternTemplates.forEach(pt => { patternTemplateMap[pt.id] = pt; });
}
let valueSourceMap = {};
if (vsResp && vsResp.ok) {
const vsData = await vsResp.json();
(vsData.sources || []).forEach(s => { valueSourceMap[s.id] = s; });
}
// Fetch all device states, target states, and target metrics in batch
const [batchDevStatesResp, batchTgtStatesResp, batchTgtMetricsResp] = await Promise.all([
fetchWithAuth('/devices/batch/states'),
@@ -478,8 +485,8 @@ export async function loadTargetsTab() {
// Build items arrays for each section
const deviceItems = ledDevices.map(d => ({ key: d.id, html: createDeviceCard(d) }));
const cssItems = Object.values(colorStripSourceMap).map(s => ({ key: s.id, html: createColorStripCard(s, pictureSourceMap) }));
const ledTargetItems = ledTargets.map(t => ({ key: t.id, html: createTargetCard(t, deviceMap, colorStripSourceMap) }));
const kcTargetItems = kcTargets.map(t => ({ key: t.id, html: createKCTargetCard(t, pictureSourceMap, patternTemplateMap) }));
const ledTargetItems = ledTargets.map(t => ({ key: t.id, html: createTargetCard(t, deviceMap, colorStripSourceMap, valueSourceMap) }));
const kcTargetItems = kcTargets.map(t => ({ key: t.id, html: createKCTargetCard(t, pictureSourceMap, patternTemplateMap, valueSourceMap) }));
const patternItems = patternTemplates.map(pt => ({ key: pt.id, html: createPatternTemplateCard(pt) }));
// Track which target cards were replaced/added (need chart re-init)
@@ -630,7 +637,7 @@ function _cssSourceName(cssId, colorStripSourceMap) {
return css ? escapeHtml(css.name) : escapeHtml(cssId);
}
export function createTargetCard(target, deviceMap, colorStripSourceMap) {
export function createTargetCard(target, deviceMap, colorStripSourceMap, valueSourceMap) {
const state = target.state || {};
const metrics = target.metrics || {};
@@ -642,6 +649,9 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
const cssId = target.color_strip_source_id || '';
const cssSummary = _cssSourceName(cssId, colorStripSourceMap);
const bvsId = target.brightness_value_source_id || '';
const bvs = bvsId && valueSourceMap ? valueSourceMap[bvsId] : null;
// Determine if overlay is available (picture-based CSS)
const css = cssId ? colorStripSourceMap[cssId] : null;
const overlayAvailable = !css || css.source_type === 'picture';
@@ -667,8 +677,9 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
</div>
<div class="stream-card-props">
<span class="stream-card-prop" title="${t('targets.device')}">💡 ${escapeHtml(deviceName)}</span>
<span class="stream-card-prop" title="${t('targets.fps')}">⚡ ${target.fps || 30} fps</span>
<span class="stream-card-prop" title="${t('targets.fps')}">⚡ ${target.fps || 30}</span>
<span class="stream-card-prop stream-card-prop-full" title="${t('targets.color_strip_source')}">🎞️ ${cssSummary}</span>
${bvs ? `<span class="stream-card-prop stream-card-prop-full" title="${t('targets.brightness_vs')}">🔆 ${escapeHtml(bvs.name)}</span>` : ''}
</div>
<div class="card-content">
${isProcessing ? `
@@ -688,15 +699,27 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
<div class="timing-total"><strong>${state.timing_total_ms}ms</strong></div>
</div>
<div class="timing-bar">
${state.timing_audio_read_ms != null ? `
<span class="timing-seg timing-audio-read" style="flex:${state.timing_audio_read_ms}" title="read ${state.timing_audio_read_ms}ms"></span>
<span class="timing-seg timing-audio-fft" style="flex:${state.timing_audio_fft_ms}" title="fft ${state.timing_audio_fft_ms}ms"></span>
<span class="timing-seg timing-audio-render" style="flex:${state.timing_audio_render_ms || 0.1}" title="render ${state.timing_audio_render_ms}ms"></span>
` : `
${state.timing_extract_ms != null ? `<span class="timing-seg timing-extract" style="flex:${state.timing_extract_ms}" title="extract ${state.timing_extract_ms}ms"></span>` : ''}
${state.timing_map_leds_ms != null ? `<span class="timing-seg timing-map" style="flex:${state.timing_map_leds_ms}" title="map ${state.timing_map_leds_ms}ms"></span>` : ''}
${state.timing_smooth_ms != null ? `<span class="timing-seg timing-smooth" style="flex:${state.timing_smooth_ms || 0.1}" title="smooth ${state.timing_smooth_ms}ms"></span>` : ''}
`}
<span class="timing-seg timing-send" style="flex:${state.timing_send_ms}" title="send ${state.timing_send_ms}ms"></span>
</div>
<div class="timing-legend">
${state.timing_audio_read_ms != null ? `
<span class="timing-legend-item"><span class="timing-dot timing-audio-read"></span>read ${state.timing_audio_read_ms}ms</span>
<span class="timing-legend-item"><span class="timing-dot timing-audio-fft"></span>fft ${state.timing_audio_fft_ms}ms</span>
<span class="timing-legend-item"><span class="timing-dot timing-audio-render"></span>render ${state.timing_audio_render_ms}ms</span>
` : `
${state.timing_extract_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-extract"></span>extract ${state.timing_extract_ms}ms</span>` : ''}
${state.timing_map_leds_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-map"></span>map ${state.timing_map_leds_ms}ms</span>` : ''}
${state.timing_smooth_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-smooth"></span>smooth ${state.timing_smooth_ms}ms</span>` : ''}
`}
<span class="timing-legend-item"><span class="timing-dot timing-send"></span>send ${state.timing_send_ms}ms</span>
</div>
</div>