Add audio channel selection (mono/left/right), show device LED count in target editor

Audio capture now produces per-channel FFT spectrum and RMS alongside
the existing mono mix. Each audio color strip source can select which
channel to visualize via a new "Channel" dropdown. This enables stereo
setups with separate left/right segments on the same LED strip.

Also shows the device LED count under the device selector in the target
editor for quick reference.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 15:05:15 +03:00
parent 9d593379b8
commit f15ff8fea0
13 changed files with 129 additions and 31 deletions

View File

@@ -83,6 +83,7 @@ def _css_to_response(source, overlay_active: bool = False) -> ColorStripSourceRe
visualization_mode=getattr(source, "visualization_mode", None), visualization_mode=getattr(source, "visualization_mode", None),
audio_device_index=getattr(source, "audio_device_index", None), audio_device_index=getattr(source, "audio_device_index", None),
audio_loopback=getattr(source, "audio_loopback", None), audio_loopback=getattr(source, "audio_loopback", None),
audio_channel=getattr(source, "audio_channel", None),
sensitivity=getattr(source, "sensitivity", None), sensitivity=getattr(source, "sensitivity", None),
color_peak=getattr(source, "color_peak", None), color_peak=getattr(source, "color_peak", None),
overlay_active=overlay_active, overlay_active=overlay_active,
@@ -164,6 +165,7 @@ async def create_color_strip_source(
visualization_mode=data.visualization_mode, visualization_mode=data.visualization_mode,
audio_device_index=data.audio_device_index, audio_device_index=data.audio_device_index,
audio_loopback=data.audio_loopback, audio_loopback=data.audio_loopback,
audio_channel=data.audio_channel,
sensitivity=data.sensitivity, sensitivity=data.sensitivity,
color_peak=data.color_peak, color_peak=data.color_peak,
) )
@@ -237,6 +239,7 @@ async def update_color_strip_source(
visualization_mode=data.visualization_mode, visualization_mode=data.visualization_mode,
audio_device_index=data.audio_device_index, audio_device_index=data.audio_device_index,
audio_loopback=data.audio_loopback, audio_loopback=data.audio_loopback,
audio_channel=data.audio_channel,
sensitivity=data.sensitivity, sensitivity=data.sensitivity,
color_peak=data.color_peak, color_peak=data.color_peak,
) )

View File

@@ -71,6 +71,7 @@ class ColorStripSourceCreate(BaseModel):
audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in") audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0) sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0)
color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]") color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]")
audio_channel: Optional[str] = Field(None, description="Audio channel: mono|left|right")
# shared # shared
led_count: int = Field(default=0, description="Total LED count (0 = auto from calibration / device)", ge=0) led_count: int = Field(default=0, description="Total LED count (0 = auto from calibration / device)", ge=0)
description: Optional[str] = Field(None, description="Optional description", max_length=500) description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -112,6 +113,7 @@ class ColorStripSourceUpdate(BaseModel):
audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in") audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0) sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0)
color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]") color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]")
audio_channel: Optional[str] = Field(None, description="Audio channel: mono|left|right")
# shared # shared
led_count: Optional[int] = Field(None, description="Total LED count (0 = auto from calibration / device)", ge=0) led_count: Optional[int] = Field(None, description="Total LED count (0 = auto from calibration / device)", ge=0)
description: Optional[str] = Field(None, description="Optional description", max_length=500) description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -155,6 +157,7 @@ class ColorStripSourceResponse(BaseModel):
audio_loopback: Optional[bool] = Field(None, description="WASAPI loopback mode") audio_loopback: Optional[bool] = Field(None, description="WASAPI loopback mode")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity") sensitivity: Optional[float] = Field(None, description="Audio sensitivity")
color_peak: Optional[List[int]] = Field(None, description="Peak color [R,G,B]") color_peak: Optional[List[int]] = Field(None, description="Peak color [R,G,B]")
audio_channel: Optional[str] = Field(None, description="Audio channel: mono|left|right")
# shared # shared
led_count: int = Field(0, description="Total LED count (0 = auto from calibration / device)") led_count: int = Field(0, description="Total LED count (0 = auto from calibration / device)")
description: Optional[str] = Field(None, description="Description") description: Optional[str] = Field(None, description="Description")

View File

@@ -37,14 +37,23 @@ class AudioAnalysis:
"""Snapshot of audio analysis results. """Snapshot of audio analysis results.
Written by the capture thread, read by visualization streams. Written by the capture thread, read by visualization streams.
Mono fields contain the mixed-down signal (all channels averaged).
Per-channel fields (left/right) are populated when the source is stereo+.
For mono sources, left/right are copies of the mono data.
""" """
timestamp: float = 0.0 timestamp: float = 0.0
# Mono (mixed) — backward-compatible fields
rms: float = 0.0 rms: float = 0.0
peak: float = 0.0 peak: float = 0.0
spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32)) spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
beat: bool = False beat: bool = False
beat_intensity: float = 0.0 beat_intensity: float = 0.0
# Per-channel
left_rms: float = 0.0
left_spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
right_rms: float = 0.0
right_spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -111,6 +120,8 @@ class AudioCaptureStream:
# Smoothed spectrum (exponential decay between frames) # Smoothed spectrum (exponential decay between frames)
self._smooth_spectrum = np.zeros(NUM_BANDS, dtype=np.float32) self._smooth_spectrum = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_left = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_right = np.zeros(NUM_BANDS, dtype=np.float32)
def start(self) -> None: def start(self) -> None:
if self._running: if self._running:
@@ -196,6 +207,8 @@ class AudioCaptureStream:
) )
spectrum_buf = np.zeros(NUM_BANDS, dtype=np.float32) spectrum_buf = np.zeros(NUM_BANDS, dtype=np.float32)
spectrum_buf_left = np.zeros(NUM_BANDS, dtype=np.float32)
spectrum_buf_right = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running: while self._running:
try: try:
@@ -206,45 +219,49 @@ class AudioCaptureStream:
time.sleep(0.05) time.sleep(0.05)
continue continue
# Mix to mono if multi-channel # Split channels and mix to mono
if channels > 1: if channels > 1:
data = data.reshape(-1, channels) data = data.reshape(-1, channels)
left_samples = data[:, 0].copy()
right_samples = data[:, 1].copy() if channels >= 2 else left_samples.copy()
samples = data.mean(axis=1).astype(np.float32) samples = data.mean(axis=1).astype(np.float32)
else: else:
samples = data samples = data
left_samples = samples
right_samples = samples
# RMS and peak # RMS and peak (mono)
rms = float(np.sqrt(np.mean(samples ** 2))) rms = float(np.sqrt(np.mean(samples ** 2)))
peak = float(np.max(np.abs(samples))) peak = float(np.max(np.abs(samples)))
left_rms = float(np.sqrt(np.mean(left_samples ** 2)))
right_rms = float(np.sqrt(np.mean(right_samples ** 2)))
# FFT # FFT helper
chunk = samples[: self._chunk_size] alpha = 0.3 # smoothing factor (lower = smoother)
def _fft_bands(samps, buf, smooth_buf):
chunk = samps[: self._chunk_size]
if len(chunk) < self._chunk_size: if len(chunk) < self._chunk_size:
chunk = np.pad(chunk, (0, self._chunk_size - len(chunk))) chunk = np.pad(chunk, (0, self._chunk_size - len(chunk)))
windowed = chunk * self._window windowed = chunk * self._window
fft_mag = np.abs(np.fft.rfft(windowed)) fft_mag = np.abs(np.fft.rfft(windowed))
# Normalize by chunk size
fft_mag /= self._chunk_size fft_mag /= self._chunk_size
for b, (s, e) in enumerate(self._bands):
# Bin into logarithmic bands if s < len(fft_mag) and e <= len(fft_mag):
for b, (start, end) in enumerate(self._bands): buf[b] = float(np.mean(fft_mag[s:e]))
if start < len(fft_mag) and end <= len(fft_mag):
spectrum_buf[b] = float(np.mean(fft_mag[start:end]))
else: else:
spectrum_buf[b] = 0.0 buf[b] = 0.0
spec_max = float(np.max(buf))
# Normalize spectrum to 0-1 range (adaptive)
spec_max = float(np.max(spectrum_buf))
if spec_max > 1e-6: if spec_max > 1e-6:
spectrum_buf /= spec_max buf /= spec_max
smooth_buf[:] = alpha * buf + (1.0 - alpha) * smooth_buf
# Exponential smoothing # Compute FFT for mono, left, right
alpha = 0.3 # smoothing factor (lower = smoother) _fft_bands(samples, spectrum_buf, self._smooth_spectrum)
self._smooth_spectrum[:] = ( _fft_bands(left_samples, spectrum_buf_left, self._smooth_spectrum_left)
alpha * spectrum_buf + (1.0 - alpha) * self._smooth_spectrum _fft_bands(right_samples, spectrum_buf_right, self._smooth_spectrum_right)
)
# Beat detection — compare current energy to rolling average # Beat detection — compare current energy to rolling average (mono)
energy = float(np.sum(samples ** 2)) energy = float(np.sum(samples ** 2))
self._energy_history[self._energy_idx % len(self._energy_history)] = energy self._energy_history[self._energy_idx % len(self._energy_history)] = energy
self._energy_idx += 1 self._energy_idx += 1
@@ -265,6 +282,10 @@ class AudioCaptureStream:
spectrum=self._smooth_spectrum.copy(), spectrum=self._smooth_spectrum.copy(),
beat=beat, beat=beat,
beat_intensity=beat_intensity, beat_intensity=beat_intensity,
left_rms=left_rms,
left_spectrum=self._smooth_spectrum_left.copy(),
right_rms=right_rms,
right_spectrum=self._smooth_spectrum_right.copy(),
) )
with self._lock: with self._lock:

View File

@@ -68,6 +68,7 @@ class AudioColorStripStream(ColorStripStream):
self._auto_size = not source.led_count self._auto_size = not source.led_count
self._led_count = source.led_count if source.led_count and source.led_count > 0 else 1 self._led_count = source.led_count if source.led_count and source.led_count > 0 else 1
self._mirror = bool(getattr(source, "mirror", False)) self._mirror = bool(getattr(source, "mirror", False))
self._audio_channel = getattr(source, "audio_channel", "mono") # mono | left | right
with self._colors_lock: with self._colors_lock:
self._colors: Optional[np.ndarray] = None self._colors: Optional[np.ndarray] = None
@@ -193,6 +194,16 @@ class AudioColorStripStream(ColorStripStream):
elapsed = time.perf_counter() - loop_start elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001)) time.sleep(max(frame_time - elapsed, 0.001))
# ── Channel selection ─────────────────────────────────────────
def _pick_channel(self, analysis):
"""Return (spectrum, rms) for the configured audio channel."""
if self._audio_channel == "left":
return analysis.left_spectrum, analysis.left_rms
elif self._audio_channel == "right":
return analysis.right_spectrum, analysis.right_rms
return analysis.spectrum, analysis.rms
# ── Spectrum Analyzer ────────────────────────────────────────── # ── Spectrum Analyzer ──────────────────────────────────────────
def _render_spectrum(self, buf: np.ndarray, n: int, analysis) -> None: def _render_spectrum(self, buf: np.ndarray, n: int, analysis) -> None:
@@ -200,7 +211,7 @@ class AudioColorStripStream(ColorStripStream):
buf[:] = 0 buf[:] = 0
return return
spectrum = analysis.spectrum spectrum, _ = self._pick_channel(analysis)
sensitivity = self._sensitivity sensitivity = self._sensitivity
smoothing = self._smoothing smoothing = self._smoothing
lut = self._palette_lut lut = self._palette_lut
@@ -249,7 +260,8 @@ class AudioColorStripStream(ColorStripStream):
buf[:] = 0 buf[:] = 0
return return
rms = analysis.rms * self._sensitivity _, ch_rms = self._pick_channel(analysis)
rms = ch_rms * self._sensitivity
# Temporal smoothing on RMS # Temporal smoothing on RMS
rms = self._smoothing * self._prev_rms + (1.0 - self._smoothing) * rms rms = self._smoothing * self._prev_rms + (1.0 - self._smoothing) * rms
self._prev_rms = rms self._prev_rms = rms

View File

@@ -240,6 +240,13 @@
background: var(--card-bg, #1e1e1e); background: var(--card-bg, #1e1e1e);
} }
.device-led-info {
display: block;
margin-top: 4px;
color: var(--text-muted, #888);
font-size: 0.85em;
}
.segment-row-header { .segment-row-header {
display: flex; display: flex;
justify-content: space-between; justify-content: space-between;

View File

@@ -438,6 +438,7 @@ function _loadAudioState(css) {
document.getElementById('css-editor-audio-smoothing').value = smoothing; document.getElementById('css-editor-audio-smoothing').value = smoothing;
document.getElementById('css-editor-audio-smoothing-val').textContent = parseFloat(smoothing).toFixed(2); document.getElementById('css-editor-audio-smoothing-val').textContent = parseFloat(smoothing).toFixed(2);
document.getElementById('css-editor-audio-channel').value = css.audio_channel || 'mono';
document.getElementById('css-editor-audio-palette').value = css.palette || 'rainbow'; document.getElementById('css-editor-audio-palette').value = css.palette || 'rainbow';
document.getElementById('css-editor-audio-color').value = rgbArrayToHex(css.color || [0, 255, 0]); document.getElementById('css-editor-audio-color').value = rgbArrayToHex(css.color || [0, 255, 0]);
document.getElementById('css-editor-audio-color-peak').value = rgbArrayToHex(css.color_peak || [255, 0, 0]); document.getElementById('css-editor-audio-color-peak').value = rgbArrayToHex(css.color_peak || [255, 0, 0]);
@@ -461,6 +462,7 @@ function _resetAudioState() {
document.getElementById('css-editor-audio-sensitivity-val').textContent = '1.0'; document.getElementById('css-editor-audio-sensitivity-val').textContent = '1.0';
document.getElementById('css-editor-audio-smoothing').value = 0.3; document.getElementById('css-editor-audio-smoothing').value = 0.3;
document.getElementById('css-editor-audio-smoothing-val').textContent = '0.30'; document.getElementById('css-editor-audio-smoothing-val').textContent = '0.30';
document.getElementById('css-editor-audio-channel').value = 'mono';
document.getElementById('css-editor-audio-palette').value = 'rainbow'; document.getElementById('css-editor-audio-palette').value = 'rainbow';
document.getElementById('css-editor-audio-color').value = '#00ff00'; document.getElementById('css-editor-audio-color').value = '#00ff00';
document.getElementById('css-editor-audio-color-peak').value = '#ff0000'; document.getElementById('css-editor-audio-color-peak').value = '#ff0000';
@@ -544,9 +546,12 @@ export function createColorStripCard(source, pictureSourceMap) {
} else if (isAudio) { } else if (isAudio) {
const vizLabel = t('color_strip.audio.viz.' + (source.visualization_mode || 'spectrum')) || source.visualization_mode || 'spectrum'; const vizLabel = t('color_strip.audio.viz.' + (source.visualization_mode || 'spectrum')) || source.visualization_mode || 'spectrum';
const sensitivityVal = (source.sensitivity || 1.0).toFixed(1); const sensitivityVal = (source.sensitivity || 1.0).toFixed(1);
const ch = source.audio_channel || 'mono';
const chBadge = ch !== 'mono' ? `<span class="stream-card-prop" title="${t('color_strip.audio.channel')}">${ch === 'left' ? 'L' : 'R'}</span>` : '';
propsHtml = ` propsHtml = `
<span class="stream-card-prop">🎵 ${escapeHtml(vizLabel)}</span> <span class="stream-card-prop">🎵 ${escapeHtml(vizLabel)}</span>
<span class="stream-card-prop" title="${t('color_strip.audio.sensitivity')}">📶 ${sensitivityVal}</span> <span class="stream-card-prop" title="${t('color_strip.audio.sensitivity')}">📶 ${sensitivityVal}</span>
${chBadge}
${source.mirror ? `<span class="stream-card-prop">🪞</span>` : ''} ${source.mirror ? `<span class="stream-card-prop">🪞</span>` : ''}
`; `;
} else { } else {
@@ -808,6 +813,7 @@ export async function saveCSSEditor() {
visualization_mode: document.getElementById('css-editor-audio-viz').value, visualization_mode: document.getElementById('css-editor-audio-viz').value,
audio_device_index: parseInt(devIdx) || -1, audio_device_index: parseInt(devIdx) || -1,
audio_loopback: devLoop !== '0', audio_loopback: devLoop !== '0',
audio_channel: document.getElementById('css-editor-audio-channel').value,
sensitivity: parseFloat(document.getElementById('css-editor-audio-sensitivity').value), sensitivity: parseFloat(document.getElementById('css-editor-audio-sensitivity').value),
smoothing: parseFloat(document.getElementById('css-editor-audio-smoothing').value), smoothing: parseFloat(document.getElementById('css-editor-audio-smoothing').value),
palette: document.getElementById('css-editor-audio-palette').value, palette: document.getElementById('css-editor-audio-palette').value,

View File

@@ -140,6 +140,18 @@ function _updateFpsRecommendation() {
} }
} }
function _updateDeviceInfo() {
const deviceSelect = document.getElementById('target-editor-device');
const el = document.getElementById('target-editor-device-info');
const device = _targetEditorDevices.find(d => d.id === deviceSelect.value);
if (device && device.led_count) {
el.textContent = `${device.led_count} LEDs`;
el.style.display = '';
} else {
el.style.display = 'none';
}
}
function _updateKeepaliveVisibility() { function _updateKeepaliveVisibility() {
const deviceSelect = document.getElementById('target-editor-device'); const deviceSelect = document.getElementById('target-editor-device');
const keepaliveGroup = document.getElementById('target-editor-keepalive-group'); const keepaliveGroup = document.getElementById('target-editor-keepalive-group');
@@ -267,10 +279,11 @@ export async function showTargetEditor(targetId = null) {
_targetNameManuallyEdited = !!targetId; _targetNameManuallyEdited = !!targetId;
document.getElementById('target-editor-name').oninput = () => { _targetNameManuallyEdited = true; }; document.getElementById('target-editor-name').oninput = () => { _targetNameManuallyEdited = true; };
window._targetAutoName = _autoGenerateTargetName; window._targetAutoName = _autoGenerateTargetName;
deviceSelect.onchange = () => { _updateKeepaliveVisibility(); _updateFpsRecommendation(); _autoGenerateTargetName(); }; deviceSelect.onchange = () => { _updateDeviceInfo(); _updateKeepaliveVisibility(); _updateFpsRecommendation(); _autoGenerateTargetName(); };
if (!targetId) _autoGenerateTargetName(); if (!targetId) _autoGenerateTargetName();
// Show/hide standby interval based on selected device capabilities // Show/hide standby interval based on selected device capabilities
_updateDeviceInfo();
_updateKeepaliveVisibility(); _updateKeepaliveVisibility();
_updateFpsRecommendation(); _updateFpsRecommendation();

View File

@@ -667,6 +667,11 @@
"color_strip.audio.viz.vu_meter": "VU Meter", "color_strip.audio.viz.vu_meter": "VU Meter",
"color_strip.audio.device": "Audio Device:", "color_strip.audio.device": "Audio Device:",
"color_strip.audio.device.hint": "Audio input source. Loopback devices capture system audio output; input devices capture microphone or line-in.", "color_strip.audio.device.hint": "Audio input source. Loopback devices capture system audio output; input devices capture microphone or line-in.",
"color_strip.audio.channel": "Channel:",
"color_strip.audio.channel.hint": "Select which audio channel to visualize. Use Left/Right for stereo setups.",
"color_strip.audio.channel.mono": "Mono (L+R mix)",
"color_strip.audio.channel.left": "Left",
"color_strip.audio.channel.right": "Right",
"color_strip.audio.sensitivity": "Sensitivity:", "color_strip.audio.sensitivity": "Sensitivity:",
"color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.", "color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.",
"color_strip.audio.smoothing": "Smoothing:", "color_strip.audio.smoothing": "Smoothing:",

View File

@@ -667,6 +667,11 @@
"color_strip.audio.viz.vu_meter": "VU-метр", "color_strip.audio.viz.vu_meter": "VU-метр",
"color_strip.audio.device": "Аудиоустройство:", "color_strip.audio.device": "Аудиоустройство:",
"color_strip.audio.device.hint": "Источник аудиосигнала. Устройства обратной петли захватывают системный звук; устройства ввода — микрофон или линейный вход.", "color_strip.audio.device.hint": "Источник аудиосигнала. Устройства обратной петли захватывают системный звук; устройства ввода — микрофон или линейный вход.",
"color_strip.audio.channel": "Канал:",
"color_strip.audio.channel.hint": "Какой аудиоканал визуализировать. Используйте Левый/Правый для стерео-режима.",
"color_strip.audio.channel.mono": "Моно (Л+П микс)",
"color_strip.audio.channel.left": "Левый",
"color_strip.audio.channel.right": "Правый",
"color_strip.audio.sensitivity": "Чувствительность:", "color_strip.audio.sensitivity": "Чувствительность:",
"color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.", "color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.",
"color_strip.audio.smoothing": "Сглаживание:", "color_strip.audio.smoothing": "Сглаживание:",

View File

@@ -78,6 +78,7 @@ class ColorStripSource:
"audio_loopback": None, "audio_loopback": None,
"sensitivity": None, "sensitivity": None,
"color_peak": None, "color_peak": None,
"audio_channel": None,
} }
@staticmethod @staticmethod
@@ -165,6 +166,7 @@ class ColorStripSource:
visualization_mode=data.get("visualization_mode") or "spectrum", visualization_mode=data.get("visualization_mode") or "spectrum",
audio_device_index=int(data.get("audio_device_index", -1)), audio_device_index=int(data.get("audio_device_index", -1)),
audio_loopback=bool(data.get("audio_loopback", True)), audio_loopback=bool(data.get("audio_loopback", True)),
audio_channel=data.get("audio_channel") or "mono",
sensitivity=float(data.get("sensitivity") or 1.0), sensitivity=float(data.get("sensitivity") or 1.0),
smoothing=float(data.get("smoothing") or 0.3), smoothing=float(data.get("smoothing") or 0.3),
palette=data.get("palette") or "rainbow", palette=data.get("palette") or "rainbow",
@@ -366,6 +368,7 @@ class AudioColorStripSource(ColorStripSource):
visualization_mode: str = "spectrum" # spectrum | beat_pulse | vu_meter visualization_mode: str = "spectrum" # spectrum | beat_pulse | vu_meter
audio_device_index: int = -1 # -1 = default input device audio_device_index: int = -1 # -1 = default input device
audio_loopback: bool = True # True = WASAPI loopback (system audio) audio_loopback: bool = True # True = WASAPI loopback (system audio)
audio_channel: str = "mono" # mono | left | right
sensitivity: float = 1.0 # gain multiplier (0.15.0) sensitivity: float = 1.0 # gain multiplier (0.15.0)
smoothing: float = 0.3 # temporal smoothing (0.01.0) smoothing: float = 0.3 # temporal smoothing (0.01.0)
palette: str = "rainbow" # named color palette palette: str = "rainbow" # named color palette
@@ -379,6 +382,7 @@ class AudioColorStripSource(ColorStripSource):
d["visualization_mode"] = self.visualization_mode d["visualization_mode"] = self.visualization_mode
d["audio_device_index"] = self.audio_device_index d["audio_device_index"] = self.audio_device_index
d["audio_loopback"] = self.audio_loopback d["audio_loopback"] = self.audio_loopback
d["audio_channel"] = self.audio_channel
d["sensitivity"] = self.sensitivity d["sensitivity"] = self.sensitivity
d["smoothing"] = self.smoothing d["smoothing"] = self.smoothing
d["palette"] = self.palette d["palette"] = self.palette

View File

@@ -122,6 +122,7 @@ class ColorStripStore:
visualization_mode: str = "spectrum", visualization_mode: str = "spectrum",
audio_device_index: int = -1, audio_device_index: int = -1,
audio_loopback: bool = True, audio_loopback: bool = True,
audio_channel: str = "mono",
sensitivity: float = 1.0, sensitivity: float = 1.0,
color_peak: Optional[list] = None, color_peak: Optional[list] = None,
) -> ColorStripSource: ) -> ColorStripSource:
@@ -215,6 +216,7 @@ class ColorStripStore:
visualization_mode=visualization_mode or "spectrum", visualization_mode=visualization_mode or "spectrum",
audio_device_index=audio_device_index if audio_device_index is not None else -1, audio_device_index=audio_device_index if audio_device_index is not None else -1,
audio_loopback=bool(audio_loopback), audio_loopback=bool(audio_loopback),
audio_channel=audio_channel or "mono",
sensitivity=float(sensitivity) if sensitivity else 1.0, sensitivity=float(sensitivity) if sensitivity else 1.0,
smoothing=float(smoothing) if smoothing else 0.3, smoothing=float(smoothing) if smoothing else 0.3,
palette=palette or "rainbow", palette=palette or "rainbow",
@@ -292,6 +294,7 @@ class ColorStripStore:
visualization_mode: Optional[str] = None, visualization_mode: Optional[str] = None,
audio_device_index: Optional[int] = None, audio_device_index: Optional[int] = None,
audio_loopback: Optional[bool] = None, audio_loopback: Optional[bool] = None,
audio_channel: Optional[str] = None,
sensitivity: Optional[float] = None, sensitivity: Optional[float] = None,
color_peak: Optional[list] = None, color_peak: Optional[list] = None,
) -> ColorStripSource: ) -> ColorStripSource:
@@ -381,6 +384,8 @@ class ColorStripStore:
source.audio_device_index = audio_device_index source.audio_device_index = audio_device_index
if audio_loopback is not None: if audio_loopback is not None:
source.audio_loopback = bool(audio_loopback) source.audio_loopback = bool(audio_loopback)
if audio_channel is not None:
source.audio_channel = audio_channel
if sensitivity is not None: if sensitivity is not None:
source.sensitivity = float(sensitivity) source.sensitivity = float(sensitivity)
if smoothing is not None: if smoothing is not None:

View File

@@ -342,6 +342,19 @@
</select> </select>
</div> </div>
<div class="form-group">
<div class="label-row">
<label for="css-editor-audio-channel" data-i18n="color_strip.audio.channel">Channel:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.channel.hint">Select which audio channel to visualize. Use Left/Right for stereo setups.</small>
<select id="css-editor-audio-channel">
<option value="mono" data-i18n="color_strip.audio.channel.mono">Mono (L+R mix)</option>
<option value="left" data-i18n="color_strip.audio.channel.left">Left</option>
<option value="right" data-i18n="color_strip.audio.channel.right">Right</option>
</select>
</div>
<div class="form-group"> <div class="form-group">
<div class="label-row"> <div class="label-row">
<label for="css-editor-audio-sensitivity"> <label for="css-editor-audio-sensitivity">

View File

@@ -21,6 +21,7 @@
</div> </div>
<small class="input-hint" style="display:none" data-i18n="targets.device.hint">Select the LED device to send data to</small> <small class="input-hint" style="display:none" data-i18n="targets.device.hint">Select the LED device to send data to</small>
<select id="target-editor-device"></select> <select id="target-editor-device"></select>
<small id="target-editor-device-info" class="device-led-info" style="display:none"></small>
</div> </div>
<div class="form-group" id="target-editor-segments-group"> <div class="form-group" id="target-editor-segments-group">