Compare commits

...

5 Commits

Author SHA1 Message Date
468cfa2022 Add brightness source badge to target cards, clean up FPS badge
Show brightness value source name on LED and KC target cards when
configured. Remove redundant 'fps' text from FPS badges.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 20:42:23 +03:00
d45e59b0e6 Add min/max value range to audio value sources
Add min_value and max_value fields to AudioValueSource so audio
brightness can be mapped to a configurable range (e.g. silence =
30% brightness floor instead of fully black).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 20:41:49 +03:00
f96cd5f367 Allow multichannel audio sources as direct CSS and value source input
Add resolve_audio_source() that accepts both MultichannelAudioSource
(defaults to mono mix) and MonoAudioSource. Update CSS and brightness
value source dropdowns to show all audio sources with type badges.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 20:41:42 +03:00
a5d855f469 Fix provider kwargs leak for mock device fields
Pop send_latency_ms and rgbw from kwargs in WLED, Adalight, and
AmbiLED providers so mock-only fields don't leak through to
non-mock client constructors.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 20:41:36 +03:00
34d9495eb3 Add audio capture timing metrics to target pipeline
Instrument AudioCaptureStream with read/FFT timing and
AudioColorStripStream with render timing. Display audio-specific
timing segments (read/fft/render/send) in the target card
breakdown bar when an audio source is active.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 20:41:29 +03:00
19 changed files with 202 additions and 40 deletions

View File

@@ -127,6 +127,9 @@ class TargetProcessingState(BaseModel):
timing_map_leds_ms: Optional[float] = Field(None, description="LED color mapping time (ms)")
timing_smooth_ms: Optional[float] = Field(None, description="Temporal smoothing time (ms)")
timing_total_ms: Optional[float] = Field(None, description="Total processing time per frame (ms)")
timing_audio_read_ms: Optional[float] = Field(None, description="Audio device read time (ms)")
timing_audio_fft_ms: Optional[float] = Field(None, description="Audio FFT analysis time (ms)")
timing_audio_render_ms: Optional[float] = Field(None, description="Audio visualization render time (ms)")
timing_calc_colors_ms: Optional[float] = Field(None, description="Color calculation time (ms, KC targets)")
timing_broadcast_ms: Optional[float] = Field(None, description="WebSocket broadcast time (ms, KC targets)")
display_index: Optional[int] = Field(None, description="Current display index")

View File

@@ -123,6 +123,9 @@ class AudioCaptureStream:
self._smooth_spectrum_left = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_right = np.zeros(NUM_BANDS, dtype=np.float32)
# Per-iteration timing (written by capture thread, read by consumers)
self._last_timing: dict = {}
def start(self) -> None:
if self._running:
return
@@ -150,6 +153,10 @@ class AudioCaptureStream:
with self._lock:
return self._latest
def get_last_timing(self) -> dict:
"""Return per-iteration timing from the capture loop (ms)."""
return dict(self._last_timing)
def _capture_loop(self) -> None:
try:
import pyaudiowpatch as pyaudio
@@ -211,6 +218,7 @@ class AudioCaptureStream:
spectrum_buf_right = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running:
t_read_start = time.perf_counter()
try:
raw_data = stream.read(self._chunk_size, exception_on_overflow=False)
data = np.frombuffer(raw_data, dtype=np.float32)
@@ -218,6 +226,7 @@ class AudioCaptureStream:
logger.warning(f"Audio read error: {e}")
time.sleep(0.05)
continue
t_read_end = time.perf_counter()
# Split channels and mix to mono
if channels > 1:
@@ -275,6 +284,12 @@ class AudioCaptureStream:
beat = True
beat_intensity = min(1.0, (ratio - 1.0) / 2.0)
t_fft_end = time.perf_counter()
self._last_timing = {
"read_ms": (t_read_end - t_read_start) * 1000,
"fft_ms": (t_fft_end - t_read_end) * 1000,
}
analysis = AudioAnalysis(
timestamp=time.perf_counter(),
rms=rms,

View File

@@ -17,4 +17,6 @@ class AdalightDeviceProvider(SerialDeviceProvider):
led_count = kwargs.pop("led_count", 0)
baud_rate = kwargs.pop("baud_rate", None)
kwargs.pop("use_ddp", None) # Not applicable for serial
return AdalightClient(url, led_count=led_count, baud_rate=baud_rate, **kwargs)
kwargs.pop("send_latency_ms", None)
kwargs.pop("rgbw", None)
return AdalightClient(url, led_count=led_count, baud_rate=baud_rate)

View File

@@ -17,4 +17,6 @@ class AmbiLEDDeviceProvider(SerialDeviceProvider):
led_count = kwargs.pop("led_count", 0)
baud_rate = kwargs.pop("baud_rate", None)
kwargs.pop("use_ddp", None)
return AmbiLEDClient(url, led_count=led_count, baud_rate=baud_rate, **kwargs)
kwargs.pop("send_latency_ms", None)
kwargs.pop("rgbw", None)
return AmbiLEDClient(url, led_count=led_count, baud_rate=baud_rate)

View File

@@ -36,6 +36,8 @@ class WLEDDeviceProvider(LEDDeviceProvider):
from wled_controller.core.devices.wled_client import WLEDClient
kwargs.pop("led_count", None)
kwargs.pop("baud_rate", None)
kwargs.pop("send_latency_ms", None)
kwargs.pop("rgbw", None)
return WLEDClient(url, **kwargs)
async def check_health(self, url: str, http_client, prev_health=None) -> DeviceHealth:

View File

@@ -45,6 +45,9 @@ class AudioColorStripStream(ColorStripStream):
self._thread: Optional[threading.Thread] = None
self._fps = 30
# Per-frame timing (read by WledTargetProcessor via get_last_timing())
self._last_timing: dict = {}
# Beat pulse persistent state
self._pulse_brightness = 0.0
@@ -73,7 +76,7 @@ class AudioColorStripStream(ColorStripStream):
self._audio_source_id = audio_source_id
if audio_source_id and self._audio_source_store:
try:
device_index, is_loopback, channel = self._audio_source_store.resolve_mono_source(audio_source_id)
device_index, is_loopback, channel = self._audio_source_store.resolve_audio_source(audio_source_id)
self._audio_device_index = device_index
self._audio_loopback = is_loopback
self._audio_channel = channel
@@ -147,6 +150,9 @@ class AudioColorStripStream(ColorStripStream):
with self._colors_lock:
return self._colors
def get_last_timing(self) -> dict:
return dict(self._last_timing)
def update_source(self, source) -> None:
from wled_controller.storage.color_strip_source import AudioColorStripSource
if isinstance(source, AudioColorStripSource):
@@ -204,11 +210,24 @@ class AudioColorStripStream(ColorStripStream):
analysis = self._audio_stream.get_latest_analysis()
render_fn = renderers.get(self._visualization_mode, self._render_spectrum)
t_render = time.perf_counter()
render_fn(buf, n, analysis)
render_ms = (time.perf_counter() - t_render) * 1000
with self._colors_lock:
self._colors = buf
# Pull capture-side timing and combine with render timing
capture_timing = self._audio_stream.get_last_timing() if self._audio_stream else {}
read_ms = capture_timing.get("read_ms", 0)
fft_ms = capture_timing.get("fft_ms", 0)
self._last_timing = {
"audio_read_ms": read_ms,
"audio_fft_ms": fft_ms,
"audio_render_ms": render_ms,
"total_ms": read_ms + fft_ms + render_ms,
}
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))

View File

@@ -156,6 +156,8 @@ class AudioValueStream(ValueStream):
mode: str = "rms",
sensitivity: float = 1.0,
smoothing: float = 0.3,
min_value: float = 0.0,
max_value: float = 1.0,
audio_capture_manager: Optional["AudioCaptureManager"] = None,
audio_source_store: Optional["AudioSourceStore"] = None,
):
@@ -163,6 +165,8 @@ class AudioValueStream(ValueStream):
self._mode = mode
self._sensitivity = sensitivity
self._smoothing = smoothing
self._min = min_value
self._max = max_value
self._audio_capture_manager = audio_capture_manager
self._audio_source_store = audio_source_store
@@ -178,11 +182,11 @@ class AudioValueStream(ValueStream):
self._resolve_audio_source()
def _resolve_audio_source(self) -> None:
"""Resolve mono audio source to device index / channel."""
"""Resolve audio source (mono or multichannel) to device index / channel."""
if self._audio_source_id and self._audio_source_store:
try:
device_index, is_loopback, channel = (
self._audio_source_store.resolve_mono_source(self._audio_source_id)
self._audio_source_store.resolve_audio_source(self._audio_source_id)
)
self._audio_device_index = device_index
self._audio_loopback = is_loopback
@@ -210,7 +214,7 @@ class AudioValueStream(ValueStream):
def get_value(self) -> float:
if self._audio_stream is None:
return 0.0
return self._min
analysis = self._audio_stream.get_latest_analysis()
if analysis is None:
@@ -222,7 +226,10 @@ class AudioValueStream(ValueStream):
# Temporal smoothing
smoothed = self._smoothing * self._prev_value + (1.0 - self._smoothing) * raw
self._prev_value = smoothed
return max(0.0, min(1.0, smoothed))
# Map to [min, max]
mapped = self._min + smoothed * (self._max - self._min)
return max(0.0, min(1.0, mapped))
def _extract_raw(self, analysis) -> float:
"""Extract raw scalar from audio analysis based on mode."""
@@ -265,6 +272,8 @@ class AudioValueStream(ValueStream):
self._mode = source.mode
self._sensitivity = source.sensitivity
self._smoothing = source.smoothing
self._min = source.min_value
self._max = source.max_value
# If audio source changed, re-resolve and swap capture stream
if source.audio_source_id != old_source_id:
@@ -598,6 +607,8 @@ class ValueStreamManager:
mode=source.mode,
sensitivity=source.sensitivity,
smoothing=source.smoothing,
min_value=source.min_value,
max_value=source.max_value,
audio_capture_manager=self._audio_capture_manager,
audio_source_store=self._audio_source_store,
)

View File

@@ -300,9 +300,19 @@ class WledTargetProcessor(TargetProcessor):
css_timing = self._css_stream.get_last_timing()
send_ms = round(metrics.timing_send_ms, 1) if self._is_running else None
# Picture source timing
extract_ms = round(css_timing.get("extract_ms", 0), 1) if css_timing else None
map_ms = round(css_timing.get("map_leds_ms", 0), 1) if css_timing else None
smooth_ms = round(css_timing.get("smooth_ms", 0), 1) if css_timing else None
# Audio source timing (keyed on audio_render_ms presence)
is_audio_source = css_timing and "audio_render_ms" in css_timing
audio_read_ms = round(css_timing.get("audio_read_ms", 0), 1) if is_audio_source else None
audio_fft_ms = round(css_timing.get("audio_fft_ms", 0), 1) if is_audio_source else None
audio_render_ms = round(css_timing.get("audio_render_ms", 0), 1) if is_audio_source else None
# Suppress picture timing when audio source is active
if is_audio_source:
extract_ms = map_ms = smooth_ms = None
if css_timing:
total_ms = round(css_timing.get("total_ms", 0) + metrics.timing_send_ms, 1)
elif self._is_running and send_ms is not None:
@@ -326,6 +336,9 @@ class WledTargetProcessor(TargetProcessor):
"timing_extract_ms": extract_ms,
"timing_map_leds_ms": map_ms,
"timing_smooth_ms": smooth_ms,
"timing_audio_read_ms": audio_read_ms,
"timing_audio_fft_ms": audio_fft_ms,
"timing_audio_render_ms": audio_render_ms,
"timing_total_ms": total_ms,
"display_index": self._resolved_display_index,
"overlay_active": self._overlay_active,

View File

@@ -553,6 +553,9 @@ ul.section-tip li {
.timing-map { background: #FF9800; }
.timing-smooth { background: #2196F3; }
.timing-send { background: #E91E63; }
.timing-audio-read { background: #4CAF50; }
.timing-audio-fft { background: #FF9800; }
.timing-audio-render { background: #2196F3; }
.timing-legend {
display: flex;
@@ -580,6 +583,9 @@ ul.section-tip li {
.timing-dot.timing-map { background: #FF9800; }
.timing-dot.timing-smooth { background: #2196F3; }
.timing-dot.timing-send { background: #E91E63; }
.timing-dot.timing-audio-read { background: #4CAF50; }
.timing-dot.timing-audio-fft { background: #FF9800; }
.timing-dot.timing-audio-render { background: #2196F3; }
@media (max-width: 768px) {
.displays-grid,

View File

@@ -517,13 +517,14 @@ async function _loadAudioSources() {
const select = document.getElementById('css-editor-audio-source');
if (!select) return;
try {
const resp = await fetchWithAuth('/audio-sources?source_type=mono');
const resp = await fetchWithAuth('/audio-sources');
if (!resp.ok) throw new Error('fetch failed');
const data = await resp.json();
const sources = data.sources || [];
select.innerHTML = sources.map(s =>
`<option value="${s.id}">${escapeHtml(s.name)}</option>`
).join('');
select.innerHTML = sources.map(s => {
const badge = s.source_type === 'multichannel' ? ' [multichannel]' : ' [mono]';
return `<option value="${s.id}">${escapeHtml(s.name)}${badge}</option>`;
}).join('');
if (sources.length === 0) {
select.innerHTML = '';
}

View File

@@ -35,7 +35,7 @@ class KCEditorModal extends Modal {
const kcEditorModal = new KCEditorModal();
export function createKCTargetCard(target, sourceMap, patternTemplateMap) {
export function createKCTargetCard(target, sourceMap, patternTemplateMap, valueSourceMap) {
const state = target.state || {};
const metrics = target.metrics || {};
const kcSettings = target.key_colors_settings || {};
@@ -50,6 +50,9 @@ export function createKCTargetCard(target, sourceMap, patternTemplateMap) {
const patternName = patTmpl ? patTmpl.name : 'No pattern';
const rectCount = patTmpl ? (patTmpl.rectangles || []).length : 0;
const bvsId = kcSettings.brightness_value_source_id || '';
const bvs = bvsId && valueSourceMap ? valueSourceMap[bvsId] : null;
// Render initial color swatches from pre-fetched REST data
let swatchesHtml = '';
const latestColors = target.latestColors && target.latestColors.colors;
@@ -77,7 +80,8 @@ export function createKCTargetCard(target, sourceMap, patternTemplateMap) {
<span class="stream-card-prop" title="${t('kc.source')}">📺 ${escapeHtml(sourceName)}</span>
<span class="stream-card-prop" title="${t('kc.pattern_template')}">📄 ${escapeHtml(patternName)}</span>
<span class="stream-card-prop">▭ ${rectCount} rect${rectCount !== 1 ? 's' : ''}</span>
<span class="stream-card-prop" title="${t('kc.fps')}">⚡ ${kcSettings.fps ?? 10} fps</span>
<span class="stream-card-prop" title="${t('kc.fps')}">⚡ ${kcSettings.fps ?? 10}</span>
${bvs ? `<span class="stream-card-prop stream-card-prop-full" title="${t('targets.brightness_vs')}">🔆 ${escapeHtml(bvs.name)}</span>` : ''}
</div>
<div class="brightness-control" data-kc-brightness-wrap="${target.id}">
<input type="range" class="brightness-slider" min="0" max="255"

View File

@@ -387,13 +387,14 @@ export async function loadTargetsTab() {
_loadTargetsLock = true;
try {
// Fetch devices, targets, CSS sources, picture sources, and pattern templates in parallel
const [devicesResp, targetsResp, cssResp, psResp, patResp] = await Promise.all([
// Fetch devices, targets, CSS sources, picture sources, pattern templates, and value sources in parallel
const [devicesResp, targetsResp, cssResp, psResp, patResp, vsResp] = await Promise.all([
fetchWithAuth('/devices'),
fetchWithAuth('/picture-targets'),
fetchWithAuth('/color-strip-sources').catch(() => null),
fetchWithAuth('/picture-sources').catch(() => null),
fetchWithAuth('/pattern-templates').catch(() => null),
fetchWithAuth('/value-sources').catch(() => null),
]);
const devicesData = await devicesResp.json();
@@ -422,6 +423,12 @@ export async function loadTargetsTab() {
patternTemplates.forEach(pt => { patternTemplateMap[pt.id] = pt; });
}
let valueSourceMap = {};
if (vsResp && vsResp.ok) {
const vsData = await vsResp.json();
(vsData.sources || []).forEach(s => { valueSourceMap[s.id] = s; });
}
// Fetch all device states, target states, and target metrics in batch
const [batchDevStatesResp, batchTgtStatesResp, batchTgtMetricsResp] = await Promise.all([
fetchWithAuth('/devices/batch/states'),
@@ -478,8 +485,8 @@ export async function loadTargetsTab() {
// Build items arrays for each section
const deviceItems = ledDevices.map(d => ({ key: d.id, html: createDeviceCard(d) }));
const cssItems = Object.values(colorStripSourceMap).map(s => ({ key: s.id, html: createColorStripCard(s, pictureSourceMap) }));
const ledTargetItems = ledTargets.map(t => ({ key: t.id, html: createTargetCard(t, deviceMap, colorStripSourceMap) }));
const kcTargetItems = kcTargets.map(t => ({ key: t.id, html: createKCTargetCard(t, pictureSourceMap, patternTemplateMap) }));
const ledTargetItems = ledTargets.map(t => ({ key: t.id, html: createTargetCard(t, deviceMap, colorStripSourceMap, valueSourceMap) }));
const kcTargetItems = kcTargets.map(t => ({ key: t.id, html: createKCTargetCard(t, pictureSourceMap, patternTemplateMap, valueSourceMap) }));
const patternItems = patternTemplates.map(pt => ({ key: pt.id, html: createPatternTemplateCard(pt) }));
// Track which target cards were replaced/added (need chart re-init)
@@ -630,7 +637,7 @@ function _cssSourceName(cssId, colorStripSourceMap) {
return css ? escapeHtml(css.name) : escapeHtml(cssId);
}
export function createTargetCard(target, deviceMap, colorStripSourceMap) {
export function createTargetCard(target, deviceMap, colorStripSourceMap, valueSourceMap) {
const state = target.state || {};
const metrics = target.metrics || {};
@@ -642,6 +649,9 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
const cssId = target.color_strip_source_id || '';
const cssSummary = _cssSourceName(cssId, colorStripSourceMap);
const bvsId = target.brightness_value_source_id || '';
const bvs = bvsId && valueSourceMap ? valueSourceMap[bvsId] : null;
// Determine if overlay is available (picture-based CSS)
const css = cssId ? colorStripSourceMap[cssId] : null;
const overlayAvailable = !css || css.source_type === 'picture';
@@ -667,8 +677,9 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
</div>
<div class="stream-card-props">
<span class="stream-card-prop" title="${t('targets.device')}">💡 ${escapeHtml(deviceName)}</span>
<span class="stream-card-prop" title="${t('targets.fps')}">⚡ ${target.fps || 30} fps</span>
<span class="stream-card-prop" title="${t('targets.fps')}">⚡ ${target.fps || 30}</span>
<span class="stream-card-prop stream-card-prop-full" title="${t('targets.color_strip_source')}">🎞️ ${cssSummary}</span>
${bvs ? `<span class="stream-card-prop stream-card-prop-full" title="${t('targets.brightness_vs')}">🔆 ${escapeHtml(bvs.name)}</span>` : ''}
</div>
<div class="card-content">
${isProcessing ? `
@@ -688,15 +699,27 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap) {
<div class="timing-total"><strong>${state.timing_total_ms}ms</strong></div>
</div>
<div class="timing-bar">
${state.timing_audio_read_ms != null ? `
<span class="timing-seg timing-audio-read" style="flex:${state.timing_audio_read_ms}" title="read ${state.timing_audio_read_ms}ms"></span>
<span class="timing-seg timing-audio-fft" style="flex:${state.timing_audio_fft_ms}" title="fft ${state.timing_audio_fft_ms}ms"></span>
<span class="timing-seg timing-audio-render" style="flex:${state.timing_audio_render_ms || 0.1}" title="render ${state.timing_audio_render_ms}ms"></span>
` : `
${state.timing_extract_ms != null ? `<span class="timing-seg timing-extract" style="flex:${state.timing_extract_ms}" title="extract ${state.timing_extract_ms}ms"></span>` : ''}
${state.timing_map_leds_ms != null ? `<span class="timing-seg timing-map" style="flex:${state.timing_map_leds_ms}" title="map ${state.timing_map_leds_ms}ms"></span>` : ''}
${state.timing_smooth_ms != null ? `<span class="timing-seg timing-smooth" style="flex:${state.timing_smooth_ms || 0.1}" title="smooth ${state.timing_smooth_ms}ms"></span>` : ''}
`}
<span class="timing-seg timing-send" style="flex:${state.timing_send_ms}" title="send ${state.timing_send_ms}ms"></span>
</div>
<div class="timing-legend">
${state.timing_audio_read_ms != null ? `
<span class="timing-legend-item"><span class="timing-dot timing-audio-read"></span>read ${state.timing_audio_read_ms}ms</span>
<span class="timing-legend-item"><span class="timing-dot timing-audio-fft"></span>fft ${state.timing_audio_fft_ms}ms</span>
<span class="timing-legend-item"><span class="timing-dot timing-audio-render"></span>render ${state.timing_audio_render_ms}ms</span>
` : `
${state.timing_extract_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-extract"></span>extract ${state.timing_extract_ms}ms</span>` : ''}
${state.timing_map_leds_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-map"></span>map ${state.timing_map_leds_ms}ms</span>` : ''}
${state.timing_smooth_ms != null ? `<span class="timing-legend-item"><span class="timing-dot timing-smooth"></span>smooth ${state.timing_smooth_ms}ms</span>` : ''}
`}
<span class="timing-legend-item"><span class="timing-dot timing-send"></span>send ${state.timing_send_ms}ms</span>
</div>
</div>

View File

@@ -79,6 +79,8 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = editData.mode || 'rms';
_setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0);
_setSlider('value-source-smoothing', editData.smoothing ?? 0.3);
_setSlider('value-source-audio-min-value', editData.min_value ?? 0);
_setSlider('value-source-audio-max-value', editData.max_value ?? 1);
} else if (editData.source_type === 'adaptive_time') {
_populateScheduleUI(editData.schedule);
_setSlider('value-source-adaptive-min-value', editData.min_value ?? 0);
@@ -105,6 +107,8 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = 'rms';
_setSlider('value-source-sensitivity', 1.0);
_setSlider('value-source-smoothing', 0.3);
_setSlider('value-source-audio-min-value', 0);
_setSlider('value-source-audio-max-value', 1);
// Adaptive defaults
_populateScheduleUI([]);
_populatePictureSourceDropdown('');
@@ -176,6 +180,8 @@ export async function saveValueSource() {
payload.mode = document.getElementById('value-source-mode').value;
payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value);
payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value);
payload.min_value = parseFloat(document.getElementById('value-source-audio-min-value').value);
payload.max_value = parseFloat(document.getElementById('value-source-audio-max-value').value);
} else if (sourceType === 'adaptive_time') {
payload.schedule = _getScheduleFromUI();
if (payload.schedule.length < 2) {
@@ -270,6 +276,7 @@ export function createValueSourceCard(src) {
propsHtml = `
<span class="stream-card-prop" title="${escapeHtml(t('value_source.audio_source'))}">${escapeHtml(audioName)}</span>
<span class="stream-card-prop">${modeLabel.toUpperCase()}</span>
<span class="stream-card-prop">${src.min_value ?? 0}${src.max_value ?? 1}</span>
`;
} else if (src.source_type === 'adaptive_time') {
const pts = (src.schedule || []).length;
@@ -315,10 +322,10 @@ function _setSlider(id, value) {
function _populateAudioSourceDropdown(selectedId) {
const select = document.getElementById('value-source-audio-source');
if (!select) return;
const mono = _cachedAudioSources.filter(s => s.source_type === 'mono');
select.innerHTML = mono.map(s =>
`<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}</option>`
).join('');
select.innerHTML = _cachedAudioSources.map(s => {
const badge = s.source_type === 'multichannel' ? ' [multichannel]' : ' [mono]';
return `<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}${badge}</option>`;
}).join('');
}
// ── Adaptive helpers ──────────────────────────────────────────

View File

@@ -699,7 +699,7 @@
"color_strip.audio.viz.beat_pulse": "Beat Pulse",
"color_strip.audio.viz.vu_meter": "VU Meter",
"color_strip.audio.source": "Audio Source:",
"color_strip.audio.source.hint": "Mono audio source that provides audio data for this visualization. Create and manage audio sources in the Sources tab.",
"color_strip.audio.source.hint": "Audio source for this visualization. Can be a multichannel (device) or mono (single channel) source. Create and manage audio sources in the Sources tab.",
"color_strip.audio.sensitivity": "Sensitivity:",
"color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.",
"color_strip.audio.smoothing": "Smoothing:",
@@ -808,7 +808,7 @@
"value_source.max_value": "Max Value:",
"value_source.max_value.hint": "Maximum output of the waveform cycle",
"value_source.audio_source": "Audio Source:",
"value_source.audio_source.hint": "Mono audio source to read audio levels from",
"value_source.audio_source.hint": "Audio source to read audio levels from (multichannel or mono)",
"value_source.mode": "Mode:",
"value_source.mode.hint": "RMS measures average volume. Peak tracks loudest moments. Beat triggers on rhythm.",
"value_source.mode.rms": "RMS (Volume)",
@@ -818,6 +818,10 @@
"value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)",
"value_source.smoothing": "Smoothing:",
"value_source.smoothing.hint": "Temporal smoothing (0 = instant response, 1 = very smooth/slow)",
"value_source.audio_min_value": "Min Value:",
"value_source.audio_min_value.hint": "Output when audio is silent (e.g. 0.3 = 30% brightness floor)",
"value_source.audio_max_value": "Max Value:",
"value_source.audio_max_value.hint": "Output at maximum audio level",
"value_source.schedule": "Schedule:",
"value_source.schedule.hint": "Define at least 2 time points. Brightness interpolates linearly between them, wrapping at midnight.",
"value_source.schedule.add": "+ Add Point",

View File

@@ -699,7 +699,7 @@
"color_strip.audio.viz.beat_pulse": "Пульс бита",
"color_strip.audio.viz.vu_meter": "VU-метр",
"color_strip.audio.source": "Аудиоисточник:",
"color_strip.audio.source.hint": "Моно-аудиоисточник, предоставляющий аудиоданные для визуализации. Создавайте и управляйте аудиоисточниками на вкладке Источники.",
"color_strip.audio.source.hint": "Аудиоисточник для визуализации. Может быть многоканальным (устройство) или моно (один канал). Создавайте и управляйте аудиоисточниками на вкладке Источники.",
"color_strip.audio.sensitivity": "Чувствительность:",
"color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.",
"color_strip.audio.smoothing": "Сглаживание:",
@@ -808,7 +808,7 @@
"value_source.max_value": "Макс. значение:",
"value_source.max_value.hint": "Максимальный выход цикла волны",
"value_source.audio_source": "Аудиоисточник:",
"value_source.audio_source.hint": "Моно-аудиоисточник для считывания уровня звука",
"value_source.audio_source.hint": "Аудиоисточник для считывания уровня звука (многоканальный или моно)",
"value_source.mode": "Режим:",
"value_source.mode.hint": "RMS измеряет среднюю громкость. Пик отслеживает самые громкие моменты. Бит реагирует на ритм.",
"value_source.mode.rms": "RMS (Громкость)",
@@ -818,6 +818,10 @@
"value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)",
"value_source.smoothing": "Сглаживание:",
"value_source.smoothing.hint": "Временное сглаживание (0 = мгновенный отклик, 1 = очень плавный/медленный)",
"value_source.audio_min_value": "Мин. значение:",
"value_source.audio_min_value.hint": "Выход при тишине (напр. 0.3 = минимум 30% яркости)",
"value_source.audio_max_value": "Макс. значение:",
"value_source.audio_max_value.hint": "Выход при максимальном уровне звука",
"value_source.schedule": "Расписание:",
"value_source.schedule.hint": "Определите минимум 2 временные точки. Яркость линейно интерполируется между ними, с переходом через полночь.",
"value_source.schedule.add": "+ Добавить точку",

View File

@@ -210,25 +210,33 @@ class AudioSourceStore:
# ── Resolution ───────────────────────────────────────────────────
def resolve_mono_source(self, mono_id: str) -> Tuple[int, bool, str]:
"""Resolve a mono audio source to (device_index, is_loopback, channel).
def resolve_audio_source(self, source_id: str) -> Tuple[int, bool, str]:
"""Resolve any audio source to (device_index, is_loopback, channel).
Follows the reference chain: mono → multichannel.
Accepts both MultichannelAudioSource (defaults to "mono" channel)
and MonoAudioSource (follows reference chain to parent multichannel).
Raises:
ValueError: If source not found or chain is broken
"""
mono = self.get_source(mono_id)
if not isinstance(mono, MonoAudioSource):
raise ValueError(f"Audio source {mono_id} is not a mono source")
source = self.get_source(source_id)
parent = self.get_source(mono.audio_source_id)
if not isinstance(parent, MultichannelAudioSource):
raise ValueError(
f"Mono source {mono_id} references non-multichannel source {mono.audio_source_id}"
)
if isinstance(source, MultichannelAudioSource):
return source.device_index, source.is_loopback, "mono"
return parent.device_index, parent.is_loopback, mono.channel
if isinstance(source, MonoAudioSource):
parent = self.get_source(source.audio_source_id)
if not isinstance(parent, MultichannelAudioSource):
raise ValueError(
f"Mono source {source_id} references non-multichannel source {source.audio_source_id}"
)
return parent.device_index, parent.is_loopback, source.channel
raise ValueError(f"Audio source {source_id} is not a valid audio source")
def resolve_mono_source(self, mono_id: str) -> Tuple[int, bool, str]:
"""Backward-compatible wrapper for resolve_audio_source()."""
return self.resolve_audio_source(mono_id)
# ── Migration ────────────────────────────────────────────────────

View File

@@ -91,6 +91,8 @@ class ValueSource:
mode=data.get("mode") or "rms",
sensitivity=float(data.get("sensitivity") or 1.0),
smoothing=float(data.get("smoothing") or 0.3),
min_value=float(data.get("min_value") or 0.0),
max_value=float(data["max_value"]) if data.get("max_value") is not None else 1.0,
)
if source_type == "adaptive_time":
@@ -167,10 +169,12 @@ class AudioValueSource(ValueSource):
into a scalar value for brightness modulation.
"""
audio_source_id: str = "" # references a MonoAudioSource
audio_source_id: str = "" # references an audio source (mono or multichannel)
mode: str = "rms" # rms | peak | beat
sensitivity: float = 1.0 # gain multiplier (0.15.0)
smoothing: float = 0.3 # temporal smoothing (0.01.0)
min_value: float = 0.0 # minimum output (0.01.0)
max_value: float = 1.0 # maximum output (0.01.0)
def to_dict(self) -> dict:
d = super().to_dict()
@@ -178,6 +182,8 @@ class AudioValueSource(ValueSource):
d["mode"] = self.mode
d["sensitivity"] = self.sensitivity
d["smoothing"] = self.smoothing
d["min_value"] = self.min_value
d["max_value"] = self.max_value
return d

View File

@@ -142,6 +142,8 @@ class ValueSourceStore:
mode=mode or "rms",
sensitivity=sensitivity if sensitivity is not None else 1.0,
smoothing=smoothing if smoothing is not None else 0.3,
min_value=min_value if min_value is not None else 0.0,
max_value=max_value if max_value is not None else 1.0,
)
elif source_type == "adaptive_time":
schedule_data = schedule or []
@@ -225,6 +227,10 @@ class ValueSourceStore:
source.sensitivity = sensitivity
if smoothing is not None:
source.smoothing = smoothing
if min_value is not None:
source.min_value = min_value
if max_value is not None:
source.max_value = max_value
elif isinstance(source, AdaptiveValueSource):
if schedule is not None:
if source.source_type == "adaptive_time" and len(schedule) < 2:

View File

@@ -160,6 +160,32 @@
<span id="value-source-smoothing-display">0.3</span>
</div>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-audio-min-value" data-i18n="value_source.audio_min_value">Min Value:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.audio_min_value.hint">Output when audio is silent (e.g. 0.3 = 30% brightness floor)</small>
<div class="range-with-value">
<input type="range" id="value-source-audio-min-value" min="0" max="1" step="0.01" value="0"
oninput="document.getElementById('value-source-audio-min-value-display').textContent = this.value">
<span id="value-source-audio-min-value-display">0</span>
</div>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-audio-max-value" data-i18n="value_source.audio_max_value">Max Value:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.audio_max_value.hint">Output at maximum audio level</small>
<div class="range-with-value">
<input type="range" id="value-source-audio-max-value" min="0" max="1" step="0.01" value="1"
oninput="document.getElementById('value-source-audio-max-value-display').textContent = this.value">
<span id="value-source-audio-max-value-display">1</span>
</div>
</div>
</div>
<!-- Adaptive Time of Day fields -->