Allow multichannel audio sources as direct CSS and value source input

Add resolve_audio_source() that accepts both MultichannelAudioSource
(defaults to mono mix) and MonoAudioSource. Update CSS and brightness
value source dropdowns to show all audio sources with type badges.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-24 20:41:42 +03:00
parent a5d855f469
commit f96cd5f367
6 changed files with 63 additions and 28 deletions

View File

@@ -156,6 +156,8 @@ class AudioValueStream(ValueStream):
mode: str = "rms", mode: str = "rms",
sensitivity: float = 1.0, sensitivity: float = 1.0,
smoothing: float = 0.3, smoothing: float = 0.3,
min_value: float = 0.0,
max_value: float = 1.0,
audio_capture_manager: Optional["AudioCaptureManager"] = None, audio_capture_manager: Optional["AudioCaptureManager"] = None,
audio_source_store: Optional["AudioSourceStore"] = None, audio_source_store: Optional["AudioSourceStore"] = None,
): ):
@@ -163,6 +165,8 @@ class AudioValueStream(ValueStream):
self._mode = mode self._mode = mode
self._sensitivity = sensitivity self._sensitivity = sensitivity
self._smoothing = smoothing self._smoothing = smoothing
self._min = min_value
self._max = max_value
self._audio_capture_manager = audio_capture_manager self._audio_capture_manager = audio_capture_manager
self._audio_source_store = audio_source_store self._audio_source_store = audio_source_store
@@ -178,11 +182,11 @@ class AudioValueStream(ValueStream):
self._resolve_audio_source() self._resolve_audio_source()
def _resolve_audio_source(self) -> None: def _resolve_audio_source(self) -> None:
"""Resolve mono audio source to device index / channel.""" """Resolve audio source (mono or multichannel) to device index / channel."""
if self._audio_source_id and self._audio_source_store: if self._audio_source_id and self._audio_source_store:
try: try:
device_index, is_loopback, channel = ( device_index, is_loopback, channel = (
self._audio_source_store.resolve_mono_source(self._audio_source_id) self._audio_source_store.resolve_audio_source(self._audio_source_id)
) )
self._audio_device_index = device_index self._audio_device_index = device_index
self._audio_loopback = is_loopback self._audio_loopback = is_loopback
@@ -210,7 +214,7 @@ class AudioValueStream(ValueStream):
def get_value(self) -> float: def get_value(self) -> float:
if self._audio_stream is None: if self._audio_stream is None:
return 0.0 return self._min
analysis = self._audio_stream.get_latest_analysis() analysis = self._audio_stream.get_latest_analysis()
if analysis is None: if analysis is None:
@@ -222,7 +226,10 @@ class AudioValueStream(ValueStream):
# Temporal smoothing # Temporal smoothing
smoothed = self._smoothing * self._prev_value + (1.0 - self._smoothing) * raw smoothed = self._smoothing * self._prev_value + (1.0 - self._smoothing) * raw
self._prev_value = smoothed self._prev_value = smoothed
return max(0.0, min(1.0, smoothed))
# Map to [min, max]
mapped = self._min + smoothed * (self._max - self._min)
return max(0.0, min(1.0, mapped))
def _extract_raw(self, analysis) -> float: def _extract_raw(self, analysis) -> float:
"""Extract raw scalar from audio analysis based on mode.""" """Extract raw scalar from audio analysis based on mode."""
@@ -265,6 +272,8 @@ class AudioValueStream(ValueStream):
self._mode = source.mode self._mode = source.mode
self._sensitivity = source.sensitivity self._sensitivity = source.sensitivity
self._smoothing = source.smoothing self._smoothing = source.smoothing
self._min = source.min_value
self._max = source.max_value
# If audio source changed, re-resolve and swap capture stream # If audio source changed, re-resolve and swap capture stream
if source.audio_source_id != old_source_id: if source.audio_source_id != old_source_id:
@@ -598,6 +607,8 @@ class ValueStreamManager:
mode=source.mode, mode=source.mode,
sensitivity=source.sensitivity, sensitivity=source.sensitivity,
smoothing=source.smoothing, smoothing=source.smoothing,
min_value=source.min_value,
max_value=source.max_value,
audio_capture_manager=self._audio_capture_manager, audio_capture_manager=self._audio_capture_manager,
audio_source_store=self._audio_source_store, audio_source_store=self._audio_source_store,
) )

View File

@@ -517,13 +517,14 @@ async function _loadAudioSources() {
const select = document.getElementById('css-editor-audio-source'); const select = document.getElementById('css-editor-audio-source');
if (!select) return; if (!select) return;
try { try {
const resp = await fetchWithAuth('/audio-sources?source_type=mono'); const resp = await fetchWithAuth('/audio-sources');
if (!resp.ok) throw new Error('fetch failed'); if (!resp.ok) throw new Error('fetch failed');
const data = await resp.json(); const data = await resp.json();
const sources = data.sources || []; const sources = data.sources || [];
select.innerHTML = sources.map(s => select.innerHTML = sources.map(s => {
`<option value="${s.id}">${escapeHtml(s.name)}</option>` const badge = s.source_type === 'multichannel' ? ' [multichannel]' : ' [mono]';
).join(''); return `<option value="${s.id}">${escapeHtml(s.name)}${badge}</option>`;
}).join('');
if (sources.length === 0) { if (sources.length === 0) {
select.innerHTML = ''; select.innerHTML = '';
} }

View File

@@ -79,6 +79,8 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = editData.mode || 'rms'; document.getElementById('value-source-mode').value = editData.mode || 'rms';
_setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0); _setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0);
_setSlider('value-source-smoothing', editData.smoothing ?? 0.3); _setSlider('value-source-smoothing', editData.smoothing ?? 0.3);
_setSlider('value-source-audio-min-value', editData.min_value ?? 0);
_setSlider('value-source-audio-max-value', editData.max_value ?? 1);
} else if (editData.source_type === 'adaptive_time') { } else if (editData.source_type === 'adaptive_time') {
_populateScheduleUI(editData.schedule); _populateScheduleUI(editData.schedule);
_setSlider('value-source-adaptive-min-value', editData.min_value ?? 0); _setSlider('value-source-adaptive-min-value', editData.min_value ?? 0);
@@ -105,6 +107,8 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = 'rms'; document.getElementById('value-source-mode').value = 'rms';
_setSlider('value-source-sensitivity', 1.0); _setSlider('value-source-sensitivity', 1.0);
_setSlider('value-source-smoothing', 0.3); _setSlider('value-source-smoothing', 0.3);
_setSlider('value-source-audio-min-value', 0);
_setSlider('value-source-audio-max-value', 1);
// Adaptive defaults // Adaptive defaults
_populateScheduleUI([]); _populateScheduleUI([]);
_populatePictureSourceDropdown(''); _populatePictureSourceDropdown('');
@@ -176,6 +180,8 @@ export async function saveValueSource() {
payload.mode = document.getElementById('value-source-mode').value; payload.mode = document.getElementById('value-source-mode').value;
payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value); payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value);
payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value); payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value);
payload.min_value = parseFloat(document.getElementById('value-source-audio-min-value').value);
payload.max_value = parseFloat(document.getElementById('value-source-audio-max-value').value);
} else if (sourceType === 'adaptive_time') { } else if (sourceType === 'adaptive_time') {
payload.schedule = _getScheduleFromUI(); payload.schedule = _getScheduleFromUI();
if (payload.schedule.length < 2) { if (payload.schedule.length < 2) {
@@ -270,6 +276,7 @@ export function createValueSourceCard(src) {
propsHtml = ` propsHtml = `
<span class="stream-card-prop" title="${escapeHtml(t('value_source.audio_source'))}">${escapeHtml(audioName)}</span> <span class="stream-card-prop" title="${escapeHtml(t('value_source.audio_source'))}">${escapeHtml(audioName)}</span>
<span class="stream-card-prop">${modeLabel.toUpperCase()}</span> <span class="stream-card-prop">${modeLabel.toUpperCase()}</span>
<span class="stream-card-prop">${src.min_value ?? 0}${src.max_value ?? 1}</span>
`; `;
} else if (src.source_type === 'adaptive_time') { } else if (src.source_type === 'adaptive_time') {
const pts = (src.schedule || []).length; const pts = (src.schedule || []).length;
@@ -315,10 +322,10 @@ function _setSlider(id, value) {
function _populateAudioSourceDropdown(selectedId) { function _populateAudioSourceDropdown(selectedId) {
const select = document.getElementById('value-source-audio-source'); const select = document.getElementById('value-source-audio-source');
if (!select) return; if (!select) return;
const mono = _cachedAudioSources.filter(s => s.source_type === 'mono'); select.innerHTML = _cachedAudioSources.map(s => {
select.innerHTML = mono.map(s => const badge = s.source_type === 'multichannel' ? ' [multichannel]' : ' [mono]';
`<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}</option>` return `<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}${badge}</option>`;
).join(''); }).join('');
} }
// ── Adaptive helpers ────────────────────────────────────────── // ── Adaptive helpers ──────────────────────────────────────────

View File

@@ -699,7 +699,7 @@
"color_strip.audio.viz.beat_pulse": "Beat Pulse", "color_strip.audio.viz.beat_pulse": "Beat Pulse",
"color_strip.audio.viz.vu_meter": "VU Meter", "color_strip.audio.viz.vu_meter": "VU Meter",
"color_strip.audio.source": "Audio Source:", "color_strip.audio.source": "Audio Source:",
"color_strip.audio.source.hint": "Mono audio source that provides audio data for this visualization. Create and manage audio sources in the Sources tab.", "color_strip.audio.source.hint": "Audio source for this visualization. Can be a multichannel (device) or mono (single channel) source. Create and manage audio sources in the Sources tab.",
"color_strip.audio.sensitivity": "Sensitivity:", "color_strip.audio.sensitivity": "Sensitivity:",
"color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.", "color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.",
"color_strip.audio.smoothing": "Smoothing:", "color_strip.audio.smoothing": "Smoothing:",
@@ -808,7 +808,7 @@
"value_source.max_value": "Max Value:", "value_source.max_value": "Max Value:",
"value_source.max_value.hint": "Maximum output of the waveform cycle", "value_source.max_value.hint": "Maximum output of the waveform cycle",
"value_source.audio_source": "Audio Source:", "value_source.audio_source": "Audio Source:",
"value_source.audio_source.hint": "Mono audio source to read audio levels from", "value_source.audio_source.hint": "Audio source to read audio levels from (multichannel or mono)",
"value_source.mode": "Mode:", "value_source.mode": "Mode:",
"value_source.mode.hint": "RMS measures average volume. Peak tracks loudest moments. Beat triggers on rhythm.", "value_source.mode.hint": "RMS measures average volume. Peak tracks loudest moments. Beat triggers on rhythm.",
"value_source.mode.rms": "RMS (Volume)", "value_source.mode.rms": "RMS (Volume)",
@@ -818,6 +818,10 @@
"value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)", "value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)",
"value_source.smoothing": "Smoothing:", "value_source.smoothing": "Smoothing:",
"value_source.smoothing.hint": "Temporal smoothing (0 = instant response, 1 = very smooth/slow)", "value_source.smoothing.hint": "Temporal smoothing (0 = instant response, 1 = very smooth/slow)",
"value_source.audio_min_value": "Min Value:",
"value_source.audio_min_value.hint": "Output when audio is silent (e.g. 0.3 = 30% brightness floor)",
"value_source.audio_max_value": "Max Value:",
"value_source.audio_max_value.hint": "Output at maximum audio level",
"value_source.schedule": "Schedule:", "value_source.schedule": "Schedule:",
"value_source.schedule.hint": "Define at least 2 time points. Brightness interpolates linearly between them, wrapping at midnight.", "value_source.schedule.hint": "Define at least 2 time points. Brightness interpolates linearly between them, wrapping at midnight.",
"value_source.schedule.add": "+ Add Point", "value_source.schedule.add": "+ Add Point",

View File

@@ -699,7 +699,7 @@
"color_strip.audio.viz.beat_pulse": "Пульс бита", "color_strip.audio.viz.beat_pulse": "Пульс бита",
"color_strip.audio.viz.vu_meter": "VU-метр", "color_strip.audio.viz.vu_meter": "VU-метр",
"color_strip.audio.source": "Аудиоисточник:", "color_strip.audio.source": "Аудиоисточник:",
"color_strip.audio.source.hint": "Моно-аудиоисточник, предоставляющий аудиоданные для визуализации. Создавайте и управляйте аудиоисточниками на вкладке Источники.", "color_strip.audio.source.hint": "Аудиоисточник для визуализации. Может быть многоканальным (устройство) или моно (один канал). Создавайте и управляйте аудиоисточниками на вкладке Источники.",
"color_strip.audio.sensitivity": "Чувствительность:", "color_strip.audio.sensitivity": "Чувствительность:",
"color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.", "color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.",
"color_strip.audio.smoothing": "Сглаживание:", "color_strip.audio.smoothing": "Сглаживание:",
@@ -808,7 +808,7 @@
"value_source.max_value": "Макс. значение:", "value_source.max_value": "Макс. значение:",
"value_source.max_value.hint": "Максимальный выход цикла волны", "value_source.max_value.hint": "Максимальный выход цикла волны",
"value_source.audio_source": "Аудиоисточник:", "value_source.audio_source": "Аудиоисточник:",
"value_source.audio_source.hint": "Моно-аудиоисточник для считывания уровня звука", "value_source.audio_source.hint": "Аудиоисточник для считывания уровня звука (многоканальный или моно)",
"value_source.mode": "Режим:", "value_source.mode": "Режим:",
"value_source.mode.hint": "RMS измеряет среднюю громкость. Пик отслеживает самые громкие моменты. Бит реагирует на ритм.", "value_source.mode.hint": "RMS измеряет среднюю громкость. Пик отслеживает самые громкие моменты. Бит реагирует на ритм.",
"value_source.mode.rms": "RMS (Громкость)", "value_source.mode.rms": "RMS (Громкость)",
@@ -818,6 +818,10 @@
"value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)", "value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)",
"value_source.smoothing": "Сглаживание:", "value_source.smoothing": "Сглаживание:",
"value_source.smoothing.hint": "Временное сглаживание (0 = мгновенный отклик, 1 = очень плавный/медленный)", "value_source.smoothing.hint": "Временное сглаживание (0 = мгновенный отклик, 1 = очень плавный/медленный)",
"value_source.audio_min_value": "Мин. значение:",
"value_source.audio_min_value.hint": "Выход при тишине (напр. 0.3 = минимум 30% яркости)",
"value_source.audio_max_value": "Макс. значение:",
"value_source.audio_max_value.hint": "Выход при максимальном уровне звука",
"value_source.schedule": "Расписание:", "value_source.schedule": "Расписание:",
"value_source.schedule.hint": "Определите минимум 2 временные точки. Яркость линейно интерполируется между ними, с переходом через полночь.", "value_source.schedule.hint": "Определите минимум 2 временные точки. Яркость линейно интерполируется между ними, с переходом через полночь.",
"value_source.schedule.add": "+ Добавить точку", "value_source.schedule.add": "+ Добавить точку",

View File

@@ -210,25 +210,33 @@ class AudioSourceStore:
# ── Resolution ─────────────────────────────────────────────────── # ── Resolution ───────────────────────────────────────────────────
def resolve_mono_source(self, mono_id: str) -> Tuple[int, bool, str]: def resolve_audio_source(self, source_id: str) -> Tuple[int, bool, str]:
"""Resolve a mono audio source to (device_index, is_loopback, channel). """Resolve any audio source to (device_index, is_loopback, channel).
Follows the reference chain: mono → multichannel. Accepts both MultichannelAudioSource (defaults to "mono" channel)
and MonoAudioSource (follows reference chain to parent multichannel).
Raises: Raises:
ValueError: If source not found or chain is broken ValueError: If source not found or chain is broken
""" """
mono = self.get_source(mono_id) source = self.get_source(source_id)
if not isinstance(mono, MonoAudioSource):
raise ValueError(f"Audio source {mono_id} is not a mono source")
parent = self.get_source(mono.audio_source_id) if isinstance(source, MultichannelAudioSource):
if not isinstance(parent, MultichannelAudioSource): return source.device_index, source.is_loopback, "mono"
raise ValueError(
f"Mono source {mono_id} references non-multichannel source {mono.audio_source_id}"
)
return parent.device_index, parent.is_loopback, mono.channel if isinstance(source, MonoAudioSource):
parent = self.get_source(source.audio_source_id)
if not isinstance(parent, MultichannelAudioSource):
raise ValueError(
f"Mono source {source_id} references non-multichannel source {source.audio_source_id}"
)
return parent.device_index, parent.is_loopback, source.channel
raise ValueError(f"Audio source {source_id} is not a valid audio source")
def resolve_mono_source(self, mono_id: str) -> Tuple[int, bool, str]:
"""Backward-compatible wrapper for resolve_audio_source()."""
return self.resolve_audio_source(mono_id)
# ── Migration ──────────────────────────────────────────────────── # ── Migration ────────────────────────────────────────────────────