Add audio channel selection (mono/left/right), show device LED count in target editor

Audio capture now produces per-channel FFT spectrum and RMS alongside
the existing mono mix. Each audio color strip source can select which
channel to visualize via a new "Channel" dropdown. This enables stereo
setups with separate left/right segments on the same LED strip.

Also shows the device LED count under the device selector in the target
editor for quick reference.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 15:05:15 +03:00
parent 9d593379b8
commit f15ff8fea0
13 changed files with 129 additions and 31 deletions

View File

@@ -37,14 +37,23 @@ class AudioAnalysis:
"""Snapshot of audio analysis results.
Written by the capture thread, read by visualization streams.
Mono fields contain the mixed-down signal (all channels averaged).
Per-channel fields (left/right) are populated when the source is stereo+.
For mono sources, left/right are copies of the mono data.
"""
timestamp: float = 0.0
# Mono (mixed) — backward-compatible fields
rms: float = 0.0
peak: float = 0.0
spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
beat: bool = False
beat_intensity: float = 0.0
# Per-channel
left_rms: float = 0.0
left_spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
right_rms: float = 0.0
right_spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
# ---------------------------------------------------------------------------
@@ -111,6 +120,8 @@ class AudioCaptureStream:
# Smoothed spectrum (exponential decay between frames)
self._smooth_spectrum = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_left = np.zeros(NUM_BANDS, dtype=np.float32)
self._smooth_spectrum_right = np.zeros(NUM_BANDS, dtype=np.float32)
def start(self) -> None:
if self._running:
@@ -196,6 +207,8 @@ class AudioCaptureStream:
)
spectrum_buf = np.zeros(NUM_BANDS, dtype=np.float32)
spectrum_buf_left = np.zeros(NUM_BANDS, dtype=np.float32)
spectrum_buf_right = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running:
try:
@@ -206,45 +219,49 @@ class AudioCaptureStream:
time.sleep(0.05)
continue
# Mix to mono if multi-channel
# Split channels and mix to mono
if channels > 1:
data = data.reshape(-1, channels)
left_samples = data[:, 0].copy()
right_samples = data[:, 1].copy() if channels >= 2 else left_samples.copy()
samples = data.mean(axis=1).astype(np.float32)
else:
samples = data
left_samples = samples
right_samples = samples
# RMS and peak
# RMS and peak (mono)
rms = float(np.sqrt(np.mean(samples ** 2)))
peak = float(np.max(np.abs(samples)))
left_rms = float(np.sqrt(np.mean(left_samples ** 2)))
right_rms = float(np.sqrt(np.mean(right_samples ** 2)))
# FFT
chunk = samples[: self._chunk_size]
if len(chunk) < self._chunk_size:
chunk = np.pad(chunk, (0, self._chunk_size - len(chunk)))
windowed = chunk * self._window
fft_mag = np.abs(np.fft.rfft(windowed))
# Normalize by chunk size
fft_mag /= self._chunk_size
# Bin into logarithmic bands
for b, (start, end) in enumerate(self._bands):
if start < len(fft_mag) and end <= len(fft_mag):
spectrum_buf[b] = float(np.mean(fft_mag[start:end]))
else:
spectrum_buf[b] = 0.0
# Normalize spectrum to 0-1 range (adaptive)
spec_max = float(np.max(spectrum_buf))
if spec_max > 1e-6:
spectrum_buf /= spec_max
# Exponential smoothing
# FFT helper
alpha = 0.3 # smoothing factor (lower = smoother)
self._smooth_spectrum[:] = (
alpha * spectrum_buf + (1.0 - alpha) * self._smooth_spectrum
)
# Beat detection — compare current energy to rolling average
def _fft_bands(samps, buf, smooth_buf):
chunk = samps[: self._chunk_size]
if len(chunk) < self._chunk_size:
chunk = np.pad(chunk, (0, self._chunk_size - len(chunk)))
windowed = chunk * self._window
fft_mag = np.abs(np.fft.rfft(windowed))
fft_mag /= self._chunk_size
for b, (s, e) in enumerate(self._bands):
if s < len(fft_mag) and e <= len(fft_mag):
buf[b] = float(np.mean(fft_mag[s:e]))
else:
buf[b] = 0.0
spec_max = float(np.max(buf))
if spec_max > 1e-6:
buf /= spec_max
smooth_buf[:] = alpha * buf + (1.0 - alpha) * smooth_buf
# Compute FFT for mono, left, right
_fft_bands(samples, spectrum_buf, self._smooth_spectrum)
_fft_bands(left_samples, spectrum_buf_left, self._smooth_spectrum_left)
_fft_bands(right_samples, spectrum_buf_right, self._smooth_spectrum_right)
# Beat detection — compare current energy to rolling average (mono)
energy = float(np.sum(samples ** 2))
self._energy_history[self._energy_idx % len(self._energy_history)] = energy
self._energy_idx += 1
@@ -265,6 +282,10 @@ class AudioCaptureStream:
spectrum=self._smooth_spectrum.copy(),
beat=beat,
beat_intensity=beat_intensity,
left_rms=left_rms,
left_spectrum=self._smooth_spectrum_left.copy(),
right_rms=right_rms,
right_spectrum=self._smooth_spectrum_right.copy(),
)
with self._lock: