Add audio-reactive color strip sources, improve delete error messages

Add new "audio" color strip source type with three visualization modes
(spectrum analyzer, beat pulse, VU meter) supporting WASAPI loopback and
microphone input via PyAudioWPatch. Includes shared audio capture with
ref counting, real-time FFT spectrum analysis, and beat detection.

Improve all referential integrity 409 error messages across delete
endpoints to include specific names of referencing entities instead of
generic "one or more" messages.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 11:56:54 +03:00
parent 2657f46e5d
commit bbd2ac9910
24 changed files with 1247 additions and 86 deletions

View File

@@ -42,6 +42,7 @@ dependencies = [
"pyserial>=3.5",
"psutil>=5.9.0",
"nvidia-ml-py>=12.0.0; sys_platform == 'win32'",
"PyAudioWPatch>=0.2.12; sys_platform == 'win32'",
]
[project.optional-dependencies]

View File

@@ -10,6 +10,7 @@ from .routes.picture_sources import router as picture_sources_router
from .routes.pattern_templates import router as pattern_templates_router
from .routes.picture_targets import router as picture_targets_router
from .routes.color_strip_sources import router as color_strip_sources_router
from .routes.audio import router as audio_router
from .routes.profiles import router as profiles_router
router = APIRouter()
@@ -20,6 +21,7 @@ router.include_router(postprocessing_router)
router.include_router(pattern_templates_router)
router.include_router(picture_sources_router)
router.include_router(color_strip_sources_router)
router.include_router(audio_router)
router.include_router(picture_targets_router)
router.include_router(profiles_router)

View File

@@ -0,0 +1,18 @@
"""Audio device routes: enumerate available audio devices."""
from fastapi import APIRouter
from wled_controller.api.auth import AuthRequired
from wled_controller.core.audio.audio_capture import AudioCaptureManager
router = APIRouter()
@router.get("/api/v1/audio-devices", tags=["Audio"])
async def list_audio_devices(_auth: AuthRequired):
"""List available audio input/output devices for audio-reactive sources."""
try:
devices = AudioCaptureManager.enumerate_devices()
return {"devices": devices, "count": len(devices)}
except Exception as e:
return {"devices": [], "count": 0, "error": str(e)}

View File

@@ -80,6 +80,11 @@ def _css_to_response(source, overlay_active: bool = False) -> ColorStripSourceRe
frame_interpolation=getattr(source, "frame_interpolation", None),
animation=getattr(source, "animation", None),
layers=getattr(source, "layers", None),
visualization_mode=getattr(source, "visualization_mode", None),
audio_device_index=getattr(source, "audio_device_index", None),
audio_loopback=getattr(source, "audio_loopback", None),
sensitivity=getattr(source, "sensitivity", None),
color_peak=getattr(source, "color_peak", None),
overlay_active=overlay_active,
created_at=source.created_at,
updated_at=source.updated_at,
@@ -156,6 +161,11 @@ async def create_color_strip_source(
scale=data.scale,
mirror=data.mirror,
layers=layers,
visualization_mode=data.visualization_mode,
audio_device_index=data.audio_device_index,
audio_loopback=data.audio_loopback,
sensitivity=data.sensitivity,
color_peak=data.color_peak,
)
return _css_to_response(source)
@@ -224,6 +234,11 @@ async def update_color_strip_source(
scale=data.scale,
mirror=data.mirror,
layers=layers,
visualization_mode=data.visualization_mode,
audio_device_index=data.audio_device_index,
audio_loopback=data.audio_loopback,
sensitivity=data.sensitivity,
color_peak=data.color_peak,
)
# Hot-reload running stream (no restart needed for in-place param changes)
@@ -250,17 +265,21 @@ async def delete_color_strip_source(
):
"""Delete a color strip source. Returns 409 if referenced by any LED target."""
try:
if target_store.is_referenced_by_color_strip_source(source_id):
target_names = target_store.get_targets_referencing_css(source_id)
if target_names:
names = ", ".join(target_names)
raise HTTPException(
status_code=409,
detail="Color strip source is referenced by one or more LED targets. "
"Delete or reassign the targets first.",
detail=f"Color strip source is referenced by target(s): {names}. "
"Delete or reassign the target(s) first.",
)
if store.is_referenced_by_composite(source_id):
composite_names = store.get_composites_referencing(source_id)
if composite_names:
names = ", ".join(composite_names)
raise HTTPException(
status_code=409,
detail="Color strip source is used as a layer in a composite source. "
"Remove it from the composite first.",
detail=f"Color strip source is used as a layer in composite source(s): {names}. "
"Remove it from the composite(s) first.",
)
store.delete_source(source_id)
except HTTPException:

View File

@@ -131,10 +131,12 @@ async def delete_pattern_template(
):
"""Delete a pattern template."""
try:
if store.is_referenced_by(template_id, target_store):
target_names = store.get_targets_referencing(template_id, target_store)
if target_names:
names = ", ".join(target_names)
raise HTTPException(
status_code=409,
detail="Cannot delete pattern template: it is referenced by one or more key colors targets. "
detail=f"Cannot delete pattern template: it is referenced by target(s): {names}. "
"Please reassign those targets before deleting.",
)
store.delete_template(template_id)

View File

@@ -263,10 +263,12 @@ async def delete_picture_source(
"""Delete a picture source."""
try:
# Check if any target references this stream
if store.is_referenced_by_target(stream_id, target_store):
target_names = store.get_targets_referencing(stream_id, target_store)
if target_names:
names = ", ".join(target_names)
raise HTTPException(
status_code=409,
detail="Cannot delete picture source: it is assigned to one or more targets. "
detail=f"Cannot delete picture source: it is assigned to target(s): {names}. "
"Please reassign those targets before deleting.",
)
store.delete_stream(stream_id)

View File

@@ -142,10 +142,12 @@ async def delete_pp_template(
"""Delete a postprocessing template."""
try:
# Check if any picture source references this template
if store.is_referenced_by(template_id, stream_store):
source_names = store.get_sources_referencing(template_id, stream_store)
if source_names:
names = ", ".join(source_names)
raise HTTPException(
status_code=409,
detail="Cannot delete postprocessing template: it is referenced by one or more picture sources. "
detail=f"Cannot delete postprocessing template: it is referenced by picture source(s): {names}. "
"Please reassign those streams before deleting.",
)
store.delete_template(template_id)

View File

@@ -40,7 +40,7 @@ class ColorStripSourceCreate(BaseModel):
"""Request to create a color strip source."""
name: str = Field(description="Source name", min_length=1, max_length=100)
source_type: Literal["picture", "static", "gradient", "color_cycle", "effect", "composite"] = Field(default="picture", description="Source type")
source_type: Literal["picture", "static", "gradient", "color_cycle", "effect", "composite", "audio"] = Field(default="picture", description="Source type")
# picture-type fields
picture_source_id: str = Field(default="", description="Picture source ID (for picture type)")
brightness: float = Field(default=1.0, description="Brightness multiplier (0.0-2.0)", ge=0.0, le=2.0)
@@ -65,6 +65,12 @@ class ColorStripSourceCreate(BaseModel):
mirror: Optional[bool] = Field(None, description="Mirror/bounce mode (meteor)")
# composite-type fields
layers: Optional[List[CompositeLayer]] = Field(None, description="Layers for composite type")
# audio-type fields
visualization_mode: Optional[str] = Field(None, description="Audio visualization: spectrum|beat_pulse|vu_meter")
audio_device_index: Optional[int] = Field(None, description="Audio device index (-1 = default)")
audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0)
color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]")
# shared
led_count: int = Field(default=0, description="Total LED count (0 = auto from calibration / device)", ge=0)
description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -100,6 +106,12 @@ class ColorStripSourceUpdate(BaseModel):
mirror: Optional[bool] = Field(None, description="Mirror/bounce mode")
# composite-type fields
layers: Optional[List[CompositeLayer]] = Field(None, description="Layers for composite type")
# audio-type fields
visualization_mode: Optional[str] = Field(None, description="Audio visualization: spectrum|beat_pulse|vu_meter")
audio_device_index: Optional[int] = Field(None, description="Audio device index (-1 = default)")
audio_loopback: Optional[bool] = Field(None, description="True for system audio (WASAPI loopback), False for mic/line-in")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity/gain 0.1-5.0", ge=0.1, le=5.0)
color_peak: Optional[List[int]] = Field(None, description="Peak/high RGB color for VU meter [R,G,B]")
# shared
led_count: Optional[int] = Field(None, description="Total LED count (0 = auto from calibration / device)", ge=0)
description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -137,6 +149,12 @@ class ColorStripSourceResponse(BaseModel):
mirror: Optional[bool] = Field(None, description="Mirror/bounce mode")
# composite-type fields
layers: Optional[List[dict]] = Field(None, description="Layers for composite type")
# audio-type fields
visualization_mode: Optional[str] = Field(None, description="Audio visualization mode")
audio_device_index: Optional[int] = Field(None, description="Audio device index")
audio_loopback: Optional[bool] = Field(None, description="WASAPI loopback mode")
sensitivity: Optional[float] = Field(None, description="Audio sensitivity")
color_peak: Optional[List[int]] = Field(None, description="Peak color [R,G,B]")
# shared
led_count: int = Field(0, description="Total LED count (0 = auto from calibration / device)")
description: Optional[str] = Field(None, description="Description")

View File

@@ -0,0 +1,442 @@
"""Audio capture service — shared audio analysis with ref counting.
Provides real-time FFT spectrum, RMS level, and beat detection from
system audio (WASAPI loopback) or microphone/line-in. Multiple
AudioColorStripStreams sharing the same device reuse a single capture
thread via AudioCaptureManager.
Uses PyAudioWPatch for WASAPI loopback support on Windows.
"""
import math
import threading
import time
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
from wled_controller.utils import get_logger
logger = get_logger(__name__)
# Number of logarithmic frequency bands for spectrum analysis
NUM_BANDS = 64
# Audio defaults
DEFAULT_SAMPLE_RATE = 44100
DEFAULT_CHUNK_SIZE = 2048 # ~46 ms at 44100 Hz
# ---------------------------------------------------------------------------
# AudioAnalysis — thread-safe snapshot of latest analysis results
# ---------------------------------------------------------------------------
@dataclass
class AudioAnalysis:
"""Snapshot of audio analysis results.
Written by the capture thread, read by visualization streams.
"""
timestamp: float = 0.0
rms: float = 0.0
peak: float = 0.0
spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
beat: bool = False
beat_intensity: float = 0.0
# ---------------------------------------------------------------------------
# AudioCaptureStream — one per unique audio device
# ---------------------------------------------------------------------------
def _build_log_bands(num_bands: int, fft_size: int, sample_rate: int) -> List[Tuple[int, int]]:
"""Build logarithmically-spaced frequency band boundaries for FFT bins.
Returns list of (start_bin, end_bin) pairs.
"""
nyquist = sample_rate / 2
# Map bands to log-spaced frequencies from 20 Hz to Nyquist
min_freq = 20.0
max_freq = min(nyquist, 20000.0)
log_min = math.log10(min_freq)
log_max = math.log10(max_freq)
freqs = np.logspace(log_min, log_max, num_bands + 1)
bin_width = sample_rate / fft_size
bands = []
for i in range(num_bands):
start_bin = max(1, int(freqs[i] / bin_width))
end_bin = max(start_bin + 1, int(freqs[i + 1] / bin_width))
# Clamp to FFT range
end_bin = min(end_bin, fft_size // 2)
bands.append((start_bin, end_bin))
return bands
class AudioCaptureStream:
"""Captures audio from a single device and provides real-time analysis.
Runs a background thread that reads audio chunks, computes FFT, RMS,
and beat detection. Consumers read the latest analysis via
``get_latest_analysis()`` (thread-safe).
"""
def __init__(
self,
device_index: int,
is_loopback: bool,
sample_rate: int = DEFAULT_SAMPLE_RATE,
chunk_size: int = DEFAULT_CHUNK_SIZE,
):
self._device_index = device_index
self._is_loopback = is_loopback
self._sample_rate = sample_rate
self._chunk_size = chunk_size
self._running = False
self._thread: Optional[threading.Thread] = None
self._lock = threading.Lock()
self._latest: Optional[AudioAnalysis] = None
# Pre-allocated FFT helpers
self._window = np.hanning(chunk_size).astype(np.float32)
self._bands = _build_log_bands(NUM_BANDS, chunk_size, sample_rate)
# Beat detection state
self._energy_history: np.ndarray = np.zeros(43, dtype=np.float64) # ~1s at 44100/2048
self._energy_idx = 0
# Smoothed spectrum (exponential decay between frames)
self._smooth_spectrum = np.zeros(NUM_BANDS, dtype=np.float32)
def start(self) -> None:
if self._running:
return
self._running = True
self._thread = threading.Thread(
target=self._capture_loop, daemon=True,
name=f"AudioCapture-{self._device_index}-{'lb' if self._is_loopback else 'in'}",
)
self._thread.start()
logger.info(
f"AudioCaptureStream started: device={self._device_index} "
f"loopback={self._is_loopback} sr={self._sample_rate} chunk={self._chunk_size}"
)
def stop(self) -> None:
self._running = False
if self._thread is not None:
self._thread.join(timeout=5.0)
self._thread = None
with self._lock:
self._latest = None
logger.info(f"AudioCaptureStream stopped: device={self._device_index}")
def get_latest_analysis(self) -> Optional[AudioAnalysis]:
with self._lock:
return self._latest
def _capture_loop(self) -> None:
try:
import pyaudiowpatch as pyaudio
except ImportError:
logger.error("PyAudioWPatch is not installed — audio capture unavailable")
self._running = False
return
pa = None
stream = None
try:
pa = pyaudio.PyAudio()
if self._is_loopback:
# Loopback capture: find the loopback device for the output device
loopback_device = self._find_loopback_device(pa, self._device_index)
if loopback_device is None:
logger.error(
f"No loopback device found for output device {self._device_index}"
)
self._running = False
return
device_idx = loopback_device["index"]
channels = loopback_device["maxInputChannels"]
sample_rate = int(loopback_device["defaultSampleRate"])
else:
# Regular input device
device_idx = self._device_index if self._device_index >= 0 else None
if device_idx is not None:
dev_info = pa.get_device_info_by_index(device_idx)
channels = max(1, dev_info["maxInputChannels"])
sample_rate = int(dev_info["defaultSampleRate"])
else:
channels = 1
sample_rate = self._sample_rate
# Update FFT helpers if sample rate changed
if sample_rate != self._sample_rate:
self._sample_rate = sample_rate
self._bands = _build_log_bands(NUM_BANDS, self._chunk_size, sample_rate)
stream = pa.open(
format=pyaudio.paFloat32,
channels=channels,
rate=sample_rate,
input=True,
input_device_index=device_idx,
frames_per_buffer=self._chunk_size,
)
logger.info(
f"Audio stream opened: device={device_idx} loopback={self._is_loopback} "
f"channels={channels} sr={sample_rate}"
)
spectrum_buf = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running:
try:
raw_data = stream.read(self._chunk_size, exception_on_overflow=False)
data = np.frombuffer(raw_data, dtype=np.float32)
except Exception as e:
logger.warning(f"Audio read error: {e}")
time.sleep(0.05)
continue
# Mix to mono if multi-channel
if channels > 1:
data = data.reshape(-1, channels)
samples = data.mean(axis=1).astype(np.float32)
else:
samples = data
# RMS and peak
rms = float(np.sqrt(np.mean(samples ** 2)))
peak = float(np.max(np.abs(samples)))
# FFT
chunk = samples[: self._chunk_size]
if len(chunk) < self._chunk_size:
chunk = np.pad(chunk, (0, self._chunk_size - len(chunk)))
windowed = chunk * self._window
fft_mag = np.abs(np.fft.rfft(windowed))
# Normalize by chunk size
fft_mag /= self._chunk_size
# Bin into logarithmic bands
for b, (start, end) in enumerate(self._bands):
if start < len(fft_mag) and end <= len(fft_mag):
spectrum_buf[b] = float(np.mean(fft_mag[start:end]))
else:
spectrum_buf[b] = 0.0
# Normalize spectrum to 0-1 range (adaptive)
spec_max = float(np.max(spectrum_buf))
if spec_max > 1e-6:
spectrum_buf /= spec_max
# Exponential smoothing
alpha = 0.3 # smoothing factor (lower = smoother)
self._smooth_spectrum[:] = (
alpha * spectrum_buf + (1.0 - alpha) * self._smooth_spectrum
)
# Beat detection — compare current energy to rolling average
energy = float(np.sum(samples ** 2))
self._energy_history[self._energy_idx % len(self._energy_history)] = energy
self._energy_idx += 1
avg_energy = float(np.mean(self._energy_history))
beat = False
beat_intensity = 0.0
if avg_energy > 1e-8:
ratio = energy / avg_energy
if ratio > 1.5:
beat = True
beat_intensity = min(1.0, (ratio - 1.0) / 2.0)
analysis = AudioAnalysis(
timestamp=time.perf_counter(),
rms=rms,
peak=peak,
spectrum=self._smooth_spectrum.copy(),
beat=beat,
beat_intensity=beat_intensity,
)
with self._lock:
self._latest = analysis
except Exception as e:
logger.error(f"AudioCaptureStream fatal error: {e}", exc_info=True)
finally:
if stream is not None:
try:
stream.stop_stream()
stream.close()
except Exception:
pass
if pa is not None:
try:
pa.terminate()
except Exception:
pass
self._running = False
logger.info(f"AudioCaptureStream loop ended: device={self._device_index}")
@staticmethod
def _find_loopback_device(pa, output_device_index: int) -> Optional[dict]:
"""Find the PyAudioWPatch loopback device for a given output device.
PyAudioWPatch exposes virtual loopback input devices for each WASAPI
output device. We match by name via ``get_loopback_device_info_generator()``.
"""
try:
first_loopback = None
for loopback in pa.get_loopback_device_info_generator():
if first_loopback is None:
first_loopback = loopback
# Default (-1): return first loopback device (typically default speakers)
if output_device_index < 0:
return loopback
# Match by output device name contained in loopback device name
target_info = pa.get_device_info_by_index(output_device_index)
if target_info["name"] in loopback["name"]:
return loopback
# No exact match — return first available loopback
return first_loopback
except Exception as e:
logger.error(f"Error finding loopback device: {e}")
return None
# ---------------------------------------------------------------------------
# AudioCaptureManager — ref-counted shared capture streams
# ---------------------------------------------------------------------------
class AudioCaptureManager:
"""Manages shared AudioCaptureStream instances with reference counting.
Multiple AudioColorStripStreams using the same audio device share a
single capture thread.
"""
def __init__(self):
self._streams: Dict[Tuple[int, bool], Tuple[AudioCaptureStream, int]] = {}
self._lock = threading.Lock()
def acquire(self, device_index: int, is_loopback: bool) -> AudioCaptureStream:
"""Get or create an AudioCaptureStream for the given device.
Returns:
Shared AudioCaptureStream instance.
"""
key = (device_index, is_loopback)
with self._lock:
if key in self._streams:
stream, ref_count = self._streams[key]
self._streams[key] = (stream, ref_count + 1)
logger.info(f"Reusing audio capture {key} (ref_count={ref_count + 1})")
return stream
stream = AudioCaptureStream(device_index, is_loopback)
stream.start()
self._streams[key] = (stream, 1)
logger.info(f"Created audio capture {key}")
return stream
def release(self, device_index: int, is_loopback: bool) -> None:
"""Release a reference to an AudioCaptureStream."""
key = (device_index, is_loopback)
with self._lock:
if key not in self._streams:
logger.warning(f"Attempted to release unknown audio capture: {key}")
return
stream, ref_count = self._streams[key]
ref_count -= 1
if ref_count <= 0:
stream.stop()
del self._streams[key]
logger.info(f"Removed audio capture {key}")
else:
self._streams[key] = (stream, ref_count)
logger.debug(f"Released audio capture {key} (ref_count={ref_count})")
def release_all(self) -> None:
"""Stop and remove all capture streams. Called on shutdown."""
with self._lock:
for key, (stream, _) in list(self._streams.items()):
try:
stream.stop()
except Exception as e:
logger.error(f"Error stopping audio capture {key}: {e}")
self._streams.clear()
logger.info("Released all audio capture streams")
@staticmethod
def enumerate_devices() -> List[dict]:
"""List available audio devices for the frontend dropdown.
Returns list of dicts with device info. Output devices with WASAPI
hostapi are marked as loopback candidates.
"""
try:
import pyaudiowpatch as pyaudio
except ImportError:
logger.warning("PyAudioWPatch not installed — no audio devices available")
return []
pa = None
try:
pa = pyaudio.PyAudio()
wasapi_info = pa.get_host_api_info_by_type(pyaudio.paWASAPI)
wasapi_idx = wasapi_info["index"]
result = []
device_count = pa.get_device_count()
for i in range(device_count):
dev = pa.get_device_info_by_index(i)
if dev["hostApi"] != wasapi_idx:
continue
is_input = dev["maxInputChannels"] > 0
is_output = dev["maxOutputChannels"] > 0
if is_input:
result.append({
"index": i,
"name": dev["name"],
"is_input": True,
"is_loopback": False,
"channels": dev["maxInputChannels"],
"default_samplerate": dev["defaultSampleRate"],
})
if is_output:
result.append({
"index": i,
"name": f"{dev['name']} [Loopback]",
"is_input": False,
"is_loopback": True,
"channels": dev["maxOutputChannels"],
"default_samplerate": dev["defaultSampleRate"],
})
return result
except Exception as e:
logger.error(f"Failed to enumerate audio devices: {e}", exc_info=True)
return []
finally:
if pa is not None:
try:
pa.terminate()
except Exception:
pass

View File

@@ -0,0 +1,300 @@
"""Audio-reactive LED color strip stream.
Implements AudioColorStripStream which produces LED color arrays driven by
real-time audio analysis (spectrum, beat detection, RMS levels).
Three visualization modes:
spectrum — FFT frequency bars mapped across LEDs with palette coloring
beat_pulse — full-strip flash on beat detection with exponential decay
vu_meter — volume level fills LEDs like a progress bar
"""
import threading
import time
from typing import Optional
import numpy as np
from wled_controller.core.audio.audio_capture import AudioCaptureManager
from wled_controller.core.processing.color_strip_stream import ColorStripStream
from wled_controller.core.processing.effect_stream import _build_palette_lut
from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__)
class AudioColorStripStream(ColorStripStream):
"""Color strip stream driven by audio analysis.
Dispatches to one of three render methods based on visualization_mode:
spectrum, beat_pulse, vu_meter.
Polls AudioCaptureStream for latest analysis data and renders to LED array.
Uses the same lifecycle pattern as EffectColorStripStream: background
thread, double-buffered output, configure() for auto-sizing.
"""
def __init__(self, source, audio_capture_manager: AudioCaptureManager):
self._audio_capture_manager = audio_capture_manager
self._audio_stream = None # acquired on start
self._colors_lock = threading.Lock()
self._running = False
self._thread: Optional[threading.Thread] = None
self._fps = 30
# Beat pulse persistent state
self._pulse_brightness = 0.0
# Smoothed spectrum for temporal smoothing between frames
self._prev_spectrum: Optional[np.ndarray] = None
self._prev_rms = 0.0
self._update_from_source(source)
def _update_from_source(self, source) -> None:
self._visualization_mode = getattr(source, "visualization_mode", "spectrum")
self._audio_device_index = getattr(source, "audio_device_index", -1)
self._audio_loopback = bool(getattr(source, "audio_loopback", True))
self._sensitivity = float(getattr(source, "sensitivity", 1.0))
self._smoothing = float(getattr(source, "smoothing", 0.3))
self._palette_name = getattr(source, "palette", "rainbow")
self._palette_lut = _build_palette_lut(self._palette_name)
color = getattr(source, "color", None)
self._color = color if isinstance(color, list) and len(color) == 3 else [0, 255, 0]
color_peak = getattr(source, "color_peak", None)
self._color_peak = color_peak if isinstance(color_peak, list) and len(color_peak) == 3 else [255, 0, 0]
self._auto_size = not source.led_count
self._led_count = source.led_count if source.led_count and source.led_count > 0 else 1
self._mirror = bool(getattr(source, "mirror", False))
with self._colors_lock:
self._colors: Optional[np.ndarray] = None
# ── ColorStripStream interface ──────────────────────────────────
def configure(self, device_led_count: int) -> None:
if self._auto_size and device_led_count > 0:
new_count = max(self._led_count, device_led_count)
if new_count != self._led_count:
self._led_count = new_count
logger.debug(f"AudioColorStripStream auto-sized to {new_count} LEDs")
@property
def target_fps(self) -> int:
return self._fps
@property
def led_count(self) -> int:
return self._led_count
def set_capture_fps(self, fps: int) -> None:
self._fps = max(1, min(90, fps))
def start(self) -> None:
if self._running:
return
# Acquire shared audio capture stream
self._audio_stream = self._audio_capture_manager.acquire(
self._audio_device_index, self._audio_loopback
)
self._running = True
self._thread = threading.Thread(
target=self._animate_loop,
name=f"css-audio-{self._visualization_mode}",
daemon=True,
)
self._thread.start()
logger.info(
f"AudioColorStripStream started (viz={self._visualization_mode}, "
f"device={self._audio_device_index}, loopback={self._audio_loopback})"
)
def stop(self) -> None:
self._running = False
if self._thread:
self._thread.join(timeout=5.0)
if self._thread.is_alive():
logger.warning("AudioColorStripStream thread did not terminate within 5s")
self._thread = None
# Release shared audio capture
if self._audio_stream is not None:
self._audio_capture_manager.release(self._audio_device_index, self._audio_loopback)
self._audio_stream = None
self._prev_spectrum = None
logger.info("AudioColorStripStream stopped")
def get_latest_colors(self) -> Optional[np.ndarray]:
with self._colors_lock:
return self._colors
def update_source(self, source) -> None:
from wled_controller.storage.color_strip_source import AudioColorStripSource
if isinstance(source, AudioColorStripSource):
old_device = self._audio_device_index
old_loopback = self._audio_loopback
prev_led_count = self._led_count if self._auto_size else None
self._update_from_source(source)
if prev_led_count and self._auto_size:
self._led_count = prev_led_count
# If audio device changed, swap capture stream
if self._running and (self._audio_device_index != old_device or self._audio_loopback != old_loopback):
self._audio_capture_manager.release(old_device, old_loopback)
self._audio_stream = self._audio_capture_manager.acquire(
self._audio_device_index, self._audio_loopback
)
logger.info(
f"AudioColorStripStream swapped audio device: "
f"{old_device}:{old_loopback}{self._audio_device_index}:{self._audio_loopback}"
)
logger.info("AudioColorStripStream params updated in-place")
# ── Main animation loop ─────────────────────────────────────────
def _animate_loop(self) -> None:
_pool_n = 0
_buf_a = _buf_b = None
_use_a = True
renderers = {
"spectrum": self._render_spectrum,
"beat_pulse": self._render_beat_pulse,
"vu_meter": self._render_vu_meter,
}
with high_resolution_timer():
while self._running:
loop_start = time.perf_counter()
frame_time = 1.0 / self._fps
n = self._led_count
# Rebuild scratch buffers when LED count changes
if n != _pool_n:
_pool_n = n
_buf_a = np.zeros((n, 3), dtype=np.uint8)
_buf_b = np.zeros((n, 3), dtype=np.uint8)
buf = _buf_a if _use_a else _buf_b
_use_a = not _use_a
# Get latest audio analysis
analysis = None
if self._audio_stream is not None:
analysis = self._audio_stream.get_latest_analysis()
render_fn = renderers.get(self._visualization_mode, self._render_spectrum)
render_fn(buf, n, analysis)
with self._colors_lock:
self._colors = buf
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))
# ── Spectrum Analyzer ──────────────────────────────────────────
def _render_spectrum(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
spectrum = analysis.spectrum
sensitivity = self._sensitivity
smoothing = self._smoothing
lut = self._palette_lut
num_bands = len(spectrum)
band_x = np.arange(num_bands, dtype=np.float32)
if self._mirror:
half = (n + 1) // 2
led_x = np.linspace(0, num_bands - 1, half)
amplitudes = np.interp(led_x, band_x, spectrum)
amplitudes *= sensitivity
np.clip(amplitudes, 0.0, 1.0, out=amplitudes)
# Temporal smoothing
if self._prev_spectrum is not None and len(self._prev_spectrum) == half:
amplitudes[:] = smoothing * self._prev_spectrum + (1.0 - smoothing) * amplitudes
self._prev_spectrum = amplitudes.copy()
# Mirror: center = bass, edges = treble
full_amp = np.empty(n, dtype=np.float32)
full_amp[:half] = amplitudes[::-1]
full_amp[half:] = amplitudes[: n - half]
else:
led_x = np.linspace(0, num_bands - 1, n)
amplitudes = np.interp(led_x, band_x, spectrum)
amplitudes *= sensitivity
np.clip(amplitudes, 0.0, 1.0, out=amplitudes)
# Temporal smoothing
if self._prev_spectrum is not None and len(self._prev_spectrum) == n:
amplitudes[:] = smoothing * self._prev_spectrum + (1.0 - smoothing) * amplitudes
self._prev_spectrum = amplitudes.copy()
full_amp = amplitudes
# Map to palette: amplitude → palette index → color
indices = (full_amp * 255).astype(np.int32)
np.clip(indices, 0, 255, out=indices)
colors = lut[indices] # (n, 3) uint8
# Scale brightness by amplitude
for ch in range(3):
buf[:, ch] = (colors[:, ch].astype(np.float32) * full_amp).astype(np.uint8)
# ── VU Meter ───────────────────────────────────────────────────
def _render_vu_meter(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
rms = analysis.rms * self._sensitivity
# Temporal smoothing on RMS
rms = self._smoothing * self._prev_rms + (1.0 - self._smoothing) * rms
self._prev_rms = rms
rms = min(1.0, rms)
fill_count = int(rms * n)
buf[:] = 0
if fill_count > 0:
base = np.array(self._color, dtype=np.float32)
peak = np.array(self._color_peak, dtype=np.float32)
# Gradient from base color to peak color
t = np.linspace(0, 1, n, dtype=np.float32)[:fill_count]
for ch in range(3):
buf[:fill_count, ch] = np.clip(
base[ch] + (peak[ch] - base[ch]) * t, 0, 255
).astype(np.uint8)
# ── Beat Pulse ─────────────────────────────────────────────────
def _render_beat_pulse(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
# On beat: flash to full brightness
if analysis.beat:
self._pulse_brightness = 1.0
else:
# Exponential decay — sensitivity controls decay speed
decay_rate = 0.05 + 0.15 * (1.0 / max(self._sensitivity, 0.1))
self._pulse_brightness = max(0.0, self._pulse_brightness - decay_rate)
brightness = self._pulse_brightness
if brightness < 0.01:
buf[:] = 0
return
# Color from palette based on beat intensity
palette_idx = int(analysis.beat_intensity * 255)
palette_idx = max(0, min(255, palette_idx))
base_color = self._palette_lut[palette_idx]
# Fill all LEDs with pulsing color
buf[:, 0] = int(base_color[0] * brightness)
buf[:, 1] = int(base_color[1] * brightness)
buf[:, 2] = int(base_color[2] * brightness)

View File

@@ -56,14 +56,16 @@ class ColorStripStreamManager:
keyed by ``{css_id}:{consumer_id}``.
"""
def __init__(self, color_strip_store, live_stream_manager):
def __init__(self, color_strip_store, live_stream_manager, audio_capture_manager=None):
"""
Args:
color_strip_store: ColorStripStore for resolving source configs
live_stream_manager: LiveStreamManager for acquiring picture streams
audio_capture_manager: AudioCaptureManager for audio-reactive sources
"""
self._color_strip_store = color_strip_store
self._live_stream_manager = live_stream_manager
self._audio_capture_manager = audio_capture_manager
self._streams: Dict[str, _ColorStripEntry] = {}
def _resolve_key(self, css_id: str, consumer_id: str) -> str:
@@ -100,7 +102,10 @@ class ColorStripStreamManager:
# Non-sharable: always create a fresh per-consumer instance
if not source.sharable:
if source.source_type == "composite":
if source.source_type == "audio":
from wled_controller.core.processing.audio_stream import AudioColorStripStream
css_stream = AudioColorStripStream(source, self._audio_capture_manager)
elif source.source_type == "composite":
from wled_controller.core.processing.composite_stream import CompositeColorStripStream
css_stream = CompositeColorStripStream(source, self)
else:

View File

@@ -13,6 +13,7 @@ from wled_controller.core.devices.led_client import (
create_led_client,
get_provider,
)
from wled_controller.core.audio.audio_capture import AudioCaptureManager
from wled_controller.core.processing.live_stream_manager import LiveStreamManager
from wled_controller.core.processing.color_strip_stream_manager import ColorStripStreamManager
from wled_controller.core.capture.screen_overlay import OverlayManager
@@ -79,9 +80,11 @@ class ProcessorManager:
self._live_stream_manager = LiveStreamManager(
picture_source_store, capture_template_store, pp_template_store
)
self._audio_capture_manager = AudioCaptureManager()
self._color_strip_stream_manager = ColorStripStreamManager(
color_strip_store=color_strip_store,
live_stream_manager=self._live_stream_manager,
audio_capture_manager=self._audio_capture_manager,
)
self._overlay_manager = OverlayManager()
self._event_queues: List[asyncio.Queue] = []

View File

@@ -92,6 +92,7 @@ import {
onCSSTypeChange, onEffectTypeChange, onAnimationTypeChange, updateEffectPreview,
colorCycleAddColor, colorCycleRemoveColor,
compositeAddLayer, compositeRemoveLayer,
onAudioVizChange,
applyGradientPreset,
} from './features/color-strips.js';
@@ -284,6 +285,7 @@ Object.assign(window, {
colorCycleRemoveColor,
compositeAddLayer,
compositeRemoveLayer,
onAudioVizChange,
applyGradientPreset,
// calibration

View File

@@ -39,6 +39,14 @@ class CSSEditorModal extends Modal {
effect_scale: document.getElementById('css-editor-effect-scale').value,
effect_mirror: document.getElementById('css-editor-effect-mirror').checked,
composite_layers: JSON.stringify(_compositeLayers),
audio_viz: document.getElementById('css-editor-audio-viz').value,
audio_device: document.getElementById('css-editor-audio-device').value,
audio_sensitivity: document.getElementById('css-editor-audio-sensitivity').value,
audio_smoothing: document.getElementById('css-editor-audio-smoothing').value,
audio_palette: document.getElementById('css-editor-audio-palette').value,
audio_color: document.getElementById('css-editor-audio-color').value,
audio_color_peak: document.getElementById('css-editor-audio-color-peak').value,
audio_mirror: document.getElementById('css-editor-audio-mirror').checked,
};
}
}
@@ -55,8 +63,10 @@ export function onCSSTypeChange() {
document.getElementById('css-editor-gradient-section').style.display = type === 'gradient' ? '' : 'none';
document.getElementById('css-editor-effect-section').style.display = type === 'effect' ? '' : 'none';
document.getElementById('css-editor-composite-section').style.display = type === 'composite' ? '' : 'none';
document.getElementById('css-editor-audio-section').style.display = type === 'audio' ? '' : 'none';
if (type === 'effect') onEffectTypeChange();
if (type === 'audio') onAudioVizChange();
// Animation section — shown for static/gradient only
const animSection = document.getElementById('css-editor-animation-section');
@@ -87,10 +97,13 @@ export function onCSSTypeChange() {
}
_syncAnimationSpeedState();
// LED count — not needed for composite (uses device count)
document.getElementById('css-editor-led-count-group').style.display = type === 'composite' ? 'none' : '';
// LED count — not needed for composite/audio (uses device count)
document.getElementById('css-editor-led-count-group').style.display =
(type === 'composite' || type === 'audio') ? 'none' : '';
if (type === 'composite') {
if (type === 'audio') {
_loadAudioDevices();
} else if (type === 'composite') {
_compositeRenderList();
} else if (type === 'gradient') {
requestAnimationFrame(() => gradientRenderAll());
@@ -378,6 +391,82 @@ function _loadCompositeState(css) {
_compositeRenderList();
}
/* ── Audio visualization helpers ──────────────────────────────── */
export function onAudioVizChange() {
const viz = document.getElementById('css-editor-audio-viz').value;
// Palette: spectrum / beat_pulse
document.getElementById('css-editor-audio-palette-group').style.display =
(viz === 'spectrum' || viz === 'beat_pulse') ? '' : 'none';
// Base color + Peak color: vu_meter only
document.getElementById('css-editor-audio-color-group').style.display = viz === 'vu_meter' ? '' : 'none';
document.getElementById('css-editor-audio-color-peak-group').style.display = viz === 'vu_meter' ? '' : 'none';
// Mirror: spectrum only
document.getElementById('css-editor-audio-mirror-group').style.display = viz === 'spectrum' ? '' : 'none';
}
async function _loadAudioDevices() {
const select = document.getElementById('css-editor-audio-device');
if (!select) return;
try {
const resp = await fetchWithAuth('/audio-devices');
if (!resp.ok) throw new Error('fetch failed');
const data = await resp.json();
const devices = data.devices || [];
select.innerHTML = devices.map(d => {
const label = d.is_loopback ? `🔊 ${d.name}` : `🎤 ${d.name}`;
const val = `${d.index}:${d.is_loopback ? '1' : '0'}`;
return `<option value="${val}">${escapeHtml(label)}</option>`;
}).join('');
if (devices.length === 0) {
select.innerHTML = '<option value="-1:1">Default</option>';
}
} catch {
select.innerHTML = '<option value="-1:1">Default</option>';
}
}
function _loadAudioState(css) {
document.getElementById('css-editor-audio-viz').value = css.visualization_mode || 'spectrum';
onAudioVizChange();
const sensitivity = css.sensitivity ?? 1.0;
document.getElementById('css-editor-audio-sensitivity').value = sensitivity;
document.getElementById('css-editor-audio-sensitivity-val').textContent = parseFloat(sensitivity).toFixed(1);
const smoothing = css.smoothing ?? 0.3;
document.getElementById('css-editor-audio-smoothing').value = smoothing;
document.getElementById('css-editor-audio-smoothing-val').textContent = parseFloat(smoothing).toFixed(2);
document.getElementById('css-editor-audio-palette').value = css.palette || 'rainbow';
document.getElementById('css-editor-audio-color').value = rgbArrayToHex(css.color || [0, 255, 0]);
document.getElementById('css-editor-audio-color-peak').value = rgbArrayToHex(css.color_peak || [255, 0, 0]);
document.getElementById('css-editor-audio-mirror').checked = css.mirror || false;
// Set audio device selector to match stored values
const deviceIdx = css.audio_device_index ?? -1;
const loopback = css.audio_loopback !== false ? '1' : '0';
const deviceVal = `${deviceIdx}:${loopback}`;
const select = document.getElementById('css-editor-audio-device');
if (select) {
// Try exact match, fall back to first option
const opt = Array.from(select.options).find(o => o.value === deviceVal);
if (opt) select.value = deviceVal;
}
}
function _resetAudioState() {
document.getElementById('css-editor-audio-viz').value = 'spectrum';
document.getElementById('css-editor-audio-sensitivity').value = 1.0;
document.getElementById('css-editor-audio-sensitivity-val').textContent = '1.0';
document.getElementById('css-editor-audio-smoothing').value = 0.3;
document.getElementById('css-editor-audio-smoothing-val').textContent = '0.30';
document.getElementById('css-editor-audio-palette').value = 'rainbow';
document.getElementById('css-editor-audio-color').value = '#00ff00';
document.getElementById('css-editor-audio-color-peak').value = '#ff0000';
document.getElementById('css-editor-audio-mirror').checked = false;
}
/* ── Card ─────────────────────────────────────────────────────── */
export function createColorStripCard(source, pictureSourceMap) {
@@ -386,6 +475,7 @@ export function createColorStripCard(source, pictureSourceMap) {
const isColorCycle = source.source_type === 'color_cycle';
const isEffect = source.source_type === 'effect';
const isComposite = source.source_type === 'composite';
const isAudio = source.source_type === 'audio';
const anim = (isStatic || isGradient) && source.animation && source.animation.enabled ? source.animation : null;
const animBadge = anim
@@ -451,6 +541,14 @@ export function createColorStripCard(source, pictureSourceMap) {
<span class="stream-card-prop">🔗 ${enabledCount}/${layerCount} ${t('color_strip.composite.layers_count')}</span>
${source.led_count ? `<span class="stream-card-prop" title="${t('color_strip.leds')}">💡 ${source.led_count}</span>` : ''}
`;
} else if (isAudio) {
const vizLabel = t('color_strip.audio.viz.' + (source.visualization_mode || 'spectrum')) || source.visualization_mode || 'spectrum';
const sensitivityVal = (source.sensitivity || 1.0).toFixed(1);
propsHtml = `
<span class="stream-card-prop">🎵 ${escapeHtml(vizLabel)}</span>
<span class="stream-card-prop" title="${t('color_strip.audio.sensitivity')}">📶 ${sensitivityVal}</span>
${source.mirror ? `<span class="stream-card-prop">🪞</span>` : ''}
`;
} else {
const srcName = (pictureSourceMap && pictureSourceMap[source.picture_source_id])
? pictureSourceMap[source.picture_source_id].name
@@ -464,8 +562,8 @@ export function createColorStripCard(source, pictureSourceMap) {
`;
}
const icon = isStatic ? '🎨' : isColorCycle ? '🔄' : isGradient ? '🌈' : isEffect ? '⚡' : isComposite ? '🔗' : '🎞️';
const calibrationBtn = (!isStatic && !isGradient && !isColorCycle && !isEffect && !isComposite)
const icon = isStatic ? '🎨' : isColorCycle ? '🔄' : isGradient ? '🌈' : isEffect ? '⚡' : isComposite ? '🔗' : isAudio ? '🎵' : '🎞️';
const calibrationBtn = (!isStatic && !isGradient && !isColorCycle && !isEffect && !isComposite && !isAudio)
? `<button class="btn btn-icon btn-secondary" onclick="showCSSCalibration('${source.id}')" title="${t('calibration.title')}">📐</button>`
: '';
@@ -549,6 +647,9 @@ export async function showCSSEditor(cssId = null) {
document.getElementById('css-editor-effect-scale').value = css.scale ?? 1.0;
document.getElementById('css-editor-effect-scale-val').textContent = parseFloat(css.scale ?? 1.0).toFixed(1);
document.getElementById('css-editor-effect-mirror').checked = css.mirror || false;
} else if (sourceType === 'audio') {
await _loadAudioDevices();
_loadAudioState(css);
} else if (sourceType === 'composite') {
// Exclude self from available sources when editing
_compositeAvailableSources = allCssSources.filter(s =>
@@ -611,6 +712,7 @@ export async function showCSSEditor(cssId = null) {
document.getElementById('css-editor-effect-scale-val').textContent = '1.0';
document.getElementById('css-editor-effect-mirror').checked = false;
_loadCompositeState(null);
_resetAudioState();
document.getElementById('css-editor-title').textContent = t('color_strip.add');
document.getElementById('css-editor-gradient-preset').value = '';
gradientInit([
@@ -698,6 +800,22 @@ export async function saveCSSEditor() {
payload.color = [parseInt(hex.slice(1, 3), 16), parseInt(hex.slice(3, 5), 16), parseInt(hex.slice(5, 7), 16)];
}
if (!cssId) payload.source_type = 'effect';
} else if (sourceType === 'audio') {
const deviceVal = document.getElementById('css-editor-audio-device').value || '-1:1';
const [devIdx, devLoop] = deviceVal.split(':');
payload = {
name,
visualization_mode: document.getElementById('css-editor-audio-viz').value,
audio_device_index: parseInt(devIdx) || -1,
audio_loopback: devLoop !== '0',
sensitivity: parseFloat(document.getElementById('css-editor-audio-sensitivity').value),
smoothing: parseFloat(document.getElementById('css-editor-audio-smoothing').value),
palette: document.getElementById('css-editor-audio-palette').value,
color: hexToRgbArray(document.getElementById('css-editor-audio-color').value),
color_peak: hexToRgbArray(document.getElementById('css-editor-audio-color-peak').value),
mirror: document.getElementById('css-editor-audio-mirror').checked,
};
if (!cssId) payload.source_type = 'audio';
} else if (sourceType === 'composite') {
const layers = _compositeGetLayers();
if (layers.length < 1) {

View File

@@ -578,7 +578,7 @@
"color_strip.delete.referenced": "Cannot delete: this source is in use by a target",
"color_strip.error.name_required": "Please enter a name",
"color_strip.type": "Type:",
"color_strip.type.hint": "Picture Source derives LED colors from a screen capture. Static Color fills all LEDs with a single constant color. Gradient distributes a color gradient across all LEDs. Color Cycle smoothly cycles through a user-defined list of colors. Composite stacks multiple sources as blended layers.",
"color_strip.type.hint": "Picture Source derives LED colors from a screen capture. Static Color fills all LEDs with a single constant color. Gradient distributes a color gradient across all LEDs. Color Cycle smoothly cycles through a user-defined list of colors. Composite stacks multiple sources as blended layers. Audio Reactive drives LEDs from real-time audio input.",
"color_strip.type.picture": "Picture Source",
"color_strip.type.static": "Static Color",
"color_strip.type.gradient": "Gradient",
@@ -642,6 +642,8 @@
"color_strip.type.effect.hint": "Procedural LED effects (fire, meteor, plasma, noise, aurora) generated in real time.",
"color_strip.type.composite": "Composite",
"color_strip.type.composite.hint": "Stack multiple color strip sources as layers with blend modes and opacity.",
"color_strip.type.audio": "Audio Reactive",
"color_strip.type.audio.hint": "LED colors driven by real-time audio input — system audio or microphone.",
"color_strip.composite.layers": "Layers:",
"color_strip.composite.layers.hint": "Stack multiple color strip sources. First layer is the bottom, last is the top. Each layer can have its own blend mode and opacity.",
"color_strip.composite.add_layer": "+ Add Layer",
@@ -656,6 +658,25 @@
"color_strip.composite.error.min_layers": "At least 1 layer is required",
"color_strip.composite.error.no_source": "Each layer must have a source selected",
"color_strip.composite.layers_count": "layers",
"color_strip.audio.visualization": "Visualization:",
"color_strip.audio.visualization.hint": "How audio data is rendered to LEDs.",
"color_strip.audio.viz.spectrum": "Spectrum Analyzer",
"color_strip.audio.viz.beat_pulse": "Beat Pulse",
"color_strip.audio.viz.vu_meter": "VU Meter",
"color_strip.audio.device": "Audio Device:",
"color_strip.audio.device.hint": "Audio input source. Loopback devices capture system audio output; input devices capture microphone or line-in.",
"color_strip.audio.sensitivity": "Sensitivity:",
"color_strip.audio.sensitivity.hint": "Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.",
"color_strip.audio.smoothing": "Smoothing:",
"color_strip.audio.smoothing.hint": "Temporal smoothing between frames. Higher values produce smoother but slower-reacting visuals.",
"color_strip.audio.palette": "Palette:",
"color_strip.audio.palette.hint": "Color palette used for spectrum bars or beat pulse coloring.",
"color_strip.audio.color": "Base Color:",
"color_strip.audio.color.hint": "Low-level color for VU meter bar.",
"color_strip.audio.color_peak": "Peak Color:",
"color_strip.audio.color_peak.hint": "High-level color at the top of the VU meter bar.",
"color_strip.audio.mirror": "Mirror:",
"color_strip.audio.mirror.hint": "Mirror spectrum from center outward: bass in the middle, treble at the edges.",
"color_strip.effect.type": "Effect Type:",
"color_strip.effect.type.hint": "Choose the procedural algorithm.",
"color_strip.effect.fire": "Fire",

View File

@@ -578,7 +578,7 @@
"color_strip.delete.referenced": "Невозможно удалить: источник используется в цели",
"color_strip.error.name_required": "Введите название",
"color_strip.type": "Тип:",
"color_strip.type.hint": "Источник изображения получает цвета светодиодов из захвата экрана. Статический цвет заполняет все светодиоды одним постоянным цветом. Градиент распределяет цветовой градиент по всем светодиодам. Смена цвета плавно циклически переключается между заданными цветами. Композит накладывает несколько источников как смешанные слои.",
"color_strip.type.hint": "Источник изображения получает цвета светодиодов из захвата экрана. Статический цвет заполняет все светодиоды одним постоянным цветом. Градиент распределяет цветовой градиент по всем светодиодам. Смена цвета плавно циклически переключается между заданными цветами. Композит накладывает несколько источников как смешанные слои. Аудиореактив управляет LED от аудиосигнала в реальном времени.",
"color_strip.type.picture": "Источник изображения",
"color_strip.type.static": "Статический цвет",
"color_strip.type.gradient": "Градиент",
@@ -642,6 +642,8 @@
"color_strip.type.effect.hint": "Процедурные LED-эффекты (огонь, метеор, плазма, шум, аврора), генерируемые в реальном времени.",
"color_strip.type.composite": "Композит",
"color_strip.type.composite.hint": "Наложение нескольких источников цветовой ленты как слоёв с режимами смешивания и прозрачностью.",
"color_strip.type.audio": "Аудиореактив",
"color_strip.type.audio.hint": "Цвета LED управляются аудиосигналом в реальном времени — системный звук или микрофон.",
"color_strip.composite.layers": "Слои:",
"color_strip.composite.layers.hint": "Наложение нескольких источников. Первый слой — нижний, последний — верхний. Каждый слой может иметь свой режим смешивания и прозрачность.",
"color_strip.composite.add_layer": "+ Добавить слой",
@@ -656,6 +658,25 @@
"color_strip.composite.error.min_layers": "Необходим хотя бы 1 слой",
"color_strip.composite.error.no_source": "Для каждого слоя должен быть выбран источник",
"color_strip.composite.layers_count": "слоёв",
"color_strip.audio.visualization": "Визуализация:",
"color_strip.audio.visualization.hint": "Способ отображения аудиоданных на LED.",
"color_strip.audio.viz.spectrum": "Анализатор спектра",
"color_strip.audio.viz.beat_pulse": "Пульс бита",
"color_strip.audio.viz.vu_meter": "VU-метр",
"color_strip.audio.device": "Аудиоустройство:",
"color_strip.audio.device.hint": "Источник аудиосигнала. Устройства обратной петли захватывают системный звук; устройства ввода — микрофон или линейный вход.",
"color_strip.audio.sensitivity": "Чувствительность:",
"color_strip.audio.sensitivity.hint": "Множитель усиления аудиосигнала. Более высокие значения делают LED чувствительнее к тихим звукам.",
"color_strip.audio.smoothing": "Сглаживание:",
"color_strip.audio.smoothing.hint": "Временное сглаживание между кадрами. Более высокие значения дают плавную, но медленнее реагирующую визуализацию.",
"color_strip.audio.palette": "Палитра:",
"color_strip.audio.palette.hint": "Цветовая палитра для полос спектра или пульсации бита.",
"color_strip.audio.color": "Базовый цвет:",
"color_strip.audio.color.hint": "Цвет низкого уровня для полосы VU-метра.",
"color_strip.audio.color_peak": "Пиковый цвет:",
"color_strip.audio.color_peak.hint": "Цвет высокого уровня в верхней части полосы VU-метра.",
"color_strip.audio.mirror": "Зеркало:",
"color_strip.audio.mirror.hint": "Зеркалирование спектра от центра к краям: басы в середине, высокие частоты по краям.",
"color_strip.effect.type": "Тип эффекта:",
"color_strip.effect.type.hint": "Выберите процедурный алгоритм.",
"color_strip.effect.fire": "Огонь",

View File

@@ -9,6 +9,7 @@ Current types:
StaticColorStripSource — constant solid color fills all LEDs
GradientColorStripSource — linear gradient across all LEDs from user-defined color stops
ColorCycleColorStripSource — smoothly cycles through a user-defined list of colors
AudioColorStripSource — audio-reactive visualization (spectrum, beat pulse, VU meter)
"""
from dataclasses import dataclass, field
@@ -72,6 +73,11 @@ class ColorStripSource:
"scale": None,
"mirror": None,
"layers": None,
"visualization_mode": None,
"audio_device_index": None,
"audio_loopback": None,
"sensitivity": None,
"color_peak": None,
}
@staticmethod
@@ -148,6 +154,26 @@ class ColorStripSource:
led_count=data.get("led_count") or 0,
)
if source_type == "audio":
raw_color = data.get("color")
color = raw_color if isinstance(raw_color, list) and len(raw_color) == 3 else [0, 255, 0]
raw_peak = data.get("color_peak")
color_peak = raw_peak if isinstance(raw_peak, list) and len(raw_peak) == 3 else [255, 0, 0]
return AudioColorStripSource(
id=sid, name=name, source_type="audio",
created_at=created_at, updated_at=updated_at, description=description,
visualization_mode=data.get("visualization_mode") or "spectrum",
audio_device_index=int(data.get("audio_device_index", -1)),
audio_loopback=bool(data.get("audio_loopback", True)),
sensitivity=float(data.get("sensitivity") or 1.0),
smoothing=float(data.get("smoothing") or 0.3),
palette=data.get("palette") or "rainbow",
color=color,
color_peak=color_peak,
led_count=data.get("led_count") or 0,
mirror=bool(data.get("mirror", False)),
)
if source_type == "effect":
raw_color = data.get("color")
color = (
@@ -328,6 +354,41 @@ class EffectColorStripSource(ColorStripSource):
return d
@dataclass
class AudioColorStripSource(ColorStripSource):
"""Color strip source driven by audio input (microphone or system audio).
visualization_mode selects the rendering algorithm:
spectrum, beat_pulse, vu_meter.
LED count auto-sizes from the connected device when led_count == 0.
"""
visualization_mode: str = "spectrum" # spectrum | beat_pulse | vu_meter
audio_device_index: int = -1 # -1 = default input device
audio_loopback: bool = True # True = WASAPI loopback (system audio)
sensitivity: float = 1.0 # gain multiplier (0.15.0)
smoothing: float = 0.3 # temporal smoothing (0.01.0)
palette: str = "rainbow" # named color palette
color: list = field(default_factory=lambda: [0, 255, 0]) # base RGB for VU meter
color_peak: list = field(default_factory=lambda: [255, 0, 0]) # peak RGB for VU meter
led_count: int = 0 # 0 = use device LED count
mirror: bool = False # mirror spectrum from center outward
def to_dict(self) -> dict:
d = super().to_dict()
d["visualization_mode"] = self.visualization_mode
d["audio_device_index"] = self.audio_device_index
d["audio_loopback"] = self.audio_loopback
d["sensitivity"] = self.sensitivity
d["smoothing"] = self.smoothing
d["palette"] = self.palette
d["color"] = list(self.color)
d["color_peak"] = list(self.color_peak)
d["led_count"] = self.led_count
d["mirror"] = self.mirror
return d
@dataclass
class CompositeColorStripSource(ColorStripSource):
"""Color strip source that composites (stacks) multiple other sources as layers.

View File

@@ -8,6 +8,7 @@ from typing import Dict, List, Optional
from wled_controller.core.capture.calibration import CalibrationConfig, calibration_to_dict
from wled_controller.storage.color_strip_source import (
AudioColorStripSource,
ColorCycleColorStripSource,
ColorStripSource,
CompositeColorStripSource,
@@ -118,6 +119,11 @@ class ColorStripStore:
scale: float = 1.0,
mirror: bool = False,
layers: Optional[list] = None,
visualization_mode: str = "spectrum",
audio_device_index: int = -1,
audio_loopback: bool = True,
sensitivity: float = 1.0,
color_peak: Optional[list] = None,
) -> ColorStripSource:
"""Create a new color strip source.
@@ -196,6 +202,27 @@ class ColorStripStore:
scale=float(scale) if scale else 1.0,
mirror=bool(mirror),
)
elif source_type == "audio":
rgb = color if isinstance(color, list) and len(color) == 3 else [0, 255, 0]
peak = color_peak if isinstance(color_peak, list) and len(color_peak) == 3 else [255, 0, 0]
source = AudioColorStripSource(
id=source_id,
name=name,
source_type="audio",
created_at=now,
updated_at=now,
description=description,
visualization_mode=visualization_mode or "spectrum",
audio_device_index=audio_device_index if audio_device_index is not None else -1,
audio_loopback=bool(audio_loopback),
sensitivity=float(sensitivity) if sensitivity else 1.0,
smoothing=float(smoothing) if smoothing else 0.3,
palette=palette or "rainbow",
color=rgb,
color_peak=peak,
led_count=led_count,
mirror=bool(mirror),
)
elif source_type == "composite":
source = CompositeColorStripSource(
id=source_id,
@@ -262,6 +289,11 @@ class ColorStripStore:
scale: Optional[float] = None,
mirror: Optional[bool] = None,
layers: Optional[list] = None,
visualization_mode: Optional[str] = None,
audio_device_index: Optional[int] = None,
audio_loopback: Optional[bool] = None,
sensitivity: Optional[float] = None,
color_peak: Optional[list] = None,
) -> ColorStripSource:
"""Update an existing color strip source.
@@ -342,6 +374,27 @@ class ColorStripStore:
source.scale = float(scale)
if mirror is not None:
source.mirror = bool(mirror)
elif isinstance(source, AudioColorStripSource):
if visualization_mode is not None:
source.visualization_mode = visualization_mode
if audio_device_index is not None:
source.audio_device_index = audio_device_index
if audio_loopback is not None:
source.audio_loopback = bool(audio_loopback)
if sensitivity is not None:
source.sensitivity = float(sensitivity)
if smoothing is not None:
source.smoothing = float(smoothing)
if palette is not None:
source.palette = palette
if color is not None and isinstance(color, list) and len(color) == 3:
source.color = color
if color_peak is not None and isinstance(color_peak, list) and len(color_peak) == 3:
source.color_peak = color_peak
if led_count is not None:
source.led_count = led_count
if mirror is not None:
source.mirror = bool(mirror)
elif isinstance(source, CompositeColorStripSource):
if layers is not None and isinstance(layers, list):
source.layers = layers
@@ -368,20 +421,13 @@ class ColorStripStore:
logger.info(f"Deleted color strip source: {source_id}")
def is_referenced_by_composite(self, source_id: str) -> bool:
"""Check if this source is referenced as a layer in any composite source."""
def get_composites_referencing(self, source_id: str) -> List[str]:
"""Return names of composite sources that reference a given source as a layer."""
names = []
for source in self._sources.values():
if isinstance(source, CompositeColorStripSource):
for layer in source.layers:
if layer.get("source_id") == source_id:
return True
return False
def is_referenced_by_target(self, source_id: str, target_store) -> bool:
"""Check if this source is referenced by any picture target."""
from wled_controller.storage.wled_picture_target import WledPictureTarget
for target in target_store.get_all_targets():
if isinstance(target, WledPictureTarget) and target.color_strip_source_id == source_id:
return True
return False
names.append(source.name)
break
return names

View File

@@ -207,19 +207,11 @@ class PatternTemplateStore:
logger.info(f"Deleted pattern template: {template_id}")
def is_referenced_by(self, template_id: str, picture_target_store) -> bool:
"""Check if this template is referenced by any key colors target.
Args:
template_id: Template ID to check
picture_target_store: PictureTargetStore instance
Returns:
True if any KC target references this template
"""
def get_targets_referencing(self, template_id: str, picture_target_store) -> List[str]:
"""Return names of KC targets that reference this template."""
from wled_controller.storage.key_colors_picture_target import KeyColorsPictureTarget
for target in picture_target_store.get_all_targets():
if isinstance(target, KeyColorsPictureTarget) and target.settings.pattern_template_id == template_id:
return True
return False
return [
target.name for target in picture_target_store.get_all_targets()
if isinstance(target, KeyColorsPictureTarget) and target.settings.pattern_template_id == template_id
]

View File

@@ -301,17 +301,9 @@ class PictureSourceStore:
logger.info(f"Deleted picture source: {stream_id}")
def is_referenced_by_target(self, stream_id: str, target_store) -> bool:
"""Check if this stream is referenced by any picture target.
Args:
stream_id: Stream ID to check
target_store: PictureTargetStore instance
Returns:
True if any target references this stream
"""
return target_store.is_referenced_by_source(stream_id)
def get_targets_referencing(self, stream_id: str, target_store) -> List[str]:
"""Return names of targets that reference this stream."""
return target_store.get_targets_referencing_source(stream_id)
def resolve_stream_chain(self, stream_id: str) -> dict:
"""Resolve a stream chain to get the terminal stream and collected postprocessing templates.

View File

@@ -251,19 +251,19 @@ class PictureTargetStore:
if isinstance(t, WledPictureTarget) and t.device_id == device_id
]
def is_referenced_by_source(self, source_id: str) -> bool:
"""Check if any KC target directly references a picture source."""
for target in self._targets.values():
if isinstance(target, KeyColorsPictureTarget) and target.picture_source_id == source_id:
return True
return False
def get_targets_referencing_source(self, source_id: str) -> List[str]:
"""Return names of KC targets that reference a picture source."""
return [
target.name for target in self._targets.values()
if isinstance(target, KeyColorsPictureTarget) and target.picture_source_id == source_id
]
def is_referenced_by_color_strip_source(self, css_id: str) -> bool:
"""Check if any WLED target references a color strip source."""
for target in self._targets.values():
if isinstance(target, WledPictureTarget) and target.color_strip_source_id == css_id:
return True
return False
def get_targets_referencing_css(self, css_id: str) -> List[str]:
"""Return names of LED targets that reference a color strip source."""
return [
target.name for target in self._targets.values()
if isinstance(target, WledPictureTarget) and target.color_strip_source_id == css_id
]
def count(self) -> int:
"""Get number of targets."""

View File

@@ -220,17 +220,9 @@ class PostprocessingTemplateStore:
logger.info(f"Deleted postprocessing template: {template_id}")
def is_referenced_by(self, template_id: str, picture_source_store) -> bool:
"""Check if this template is referenced by any picture source.
Args:
template_id: Template ID to check
picture_source_store: PictureSourceStore instance
Returns:
True if any picture source references this template
"""
for stream in picture_source_store.get_all_streams():
if isinstance(stream, ProcessedPictureSource) and stream.postprocessing_template_id == template_id:
return True
return False
def get_sources_referencing(self, template_id: str, picture_source_store) -> List[str]:
"""Return names of picture sources that reference this template."""
return [
stream.name for stream in picture_source_store.get_all_streams()
if isinstance(stream, ProcessedPictureSource) and stream.postprocessing_template_id == template_id
]

View File

@@ -27,6 +27,7 @@
<option value="color_cycle" data-i18n="color_strip.type.color_cycle">Color Cycle</option>
<option value="effect" data-i18n="color_strip.type.effect">Procedural Effect</option>
<option value="composite" data-i18n="color_strip.type.composite">Composite</option>
<option value="audio" data-i18n="color_strip.type.audio">Audio Reactive</option>
</select>
</div>
@@ -315,6 +316,107 @@
</div>
</div>
<!-- Audio-reactive fields -->
<div id="css-editor-audio-section" style="display:none">
<div class="form-group">
<div class="label-row">
<label for="css-editor-audio-viz" data-i18n="color_strip.audio.visualization">Visualization:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.visualization.hint">How audio data is rendered to LEDs.</small>
<select id="css-editor-audio-viz" onchange="onAudioVizChange()">
<option value="spectrum" data-i18n="color_strip.audio.viz.spectrum">Spectrum Analyzer</option>
<option value="beat_pulse" data-i18n="color_strip.audio.viz.beat_pulse">Beat Pulse</option>
<option value="vu_meter" data-i18n="color_strip.audio.viz.vu_meter">VU Meter</option>
</select>
</div>
<div class="form-group">
<div class="label-row">
<label for="css-editor-audio-device" data-i18n="color_strip.audio.device">Audio Device:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.device.hint">Audio input source. Loopback devices capture system audio output; input devices capture microphone or line-in.</small>
<select id="css-editor-audio-device">
<!-- populated dynamically from /api/v1/audio-devices -->
</select>
</div>
<div class="form-group">
<div class="label-row">
<label for="css-editor-audio-sensitivity">
<span data-i18n="color_strip.audio.sensitivity">Sensitivity:</span>
<span id="css-editor-audio-sensitivity-val">1.0</span>
</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.sensitivity.hint">Gain multiplier for audio levels. Higher values make LEDs react to quieter sounds.</small>
<input type="range" id="css-editor-audio-sensitivity" min="0.1" max="5.0" step="0.1" value="1.0"
oninput="document.getElementById('css-editor-audio-sensitivity-val').textContent = parseFloat(this.value).toFixed(1)">
</div>
<div class="form-group">
<div class="label-row">
<label for="css-editor-audio-smoothing">
<span data-i18n="color_strip.audio.smoothing">Smoothing:</span>
<span id="css-editor-audio-smoothing-val">0.30</span>
</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.smoothing.hint">Temporal smoothing between frames. Higher values produce smoother but slower-reacting visuals.</small>
<input type="range" id="css-editor-audio-smoothing" min="0.0" max="1.0" step="0.05" value="0.3"
oninput="document.getElementById('css-editor-audio-smoothing-val').textContent = parseFloat(this.value).toFixed(2)">
</div>
<div id="css-editor-audio-palette-group" class="form-group">
<div class="label-row">
<label for="css-editor-audio-palette" data-i18n="color_strip.audio.palette">Palette:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.palette.hint">Color palette used for spectrum bars or beat pulse coloring.</small>
<select id="css-editor-audio-palette">
<option value="rainbow" data-i18n="color_strip.palette.rainbow">Rainbow</option>
<option value="fire" data-i18n="color_strip.palette.fire">Fire</option>
<option value="ocean" data-i18n="color_strip.palette.ocean">Ocean</option>
<option value="lava" data-i18n="color_strip.palette.lava">Lava</option>
<option value="forest" data-i18n="color_strip.palette.forest">Forest</option>
<option value="aurora" data-i18n="color_strip.palette.aurora">Aurora</option>
<option value="sunset" data-i18n="color_strip.palette.sunset">Sunset</option>
<option value="ice" data-i18n="color_strip.palette.ice">Ice</option>
</select>
</div>
<div id="css-editor-audio-color-group" class="form-group" style="display:none">
<div class="label-row">
<label for="css-editor-audio-color" data-i18n="color_strip.audio.color">Base Color:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.color.hint">Low-level color for VU meter bar.</small>
<input type="color" id="css-editor-audio-color" value="#00ff00">
</div>
<div id="css-editor-audio-color-peak-group" class="form-group" style="display:none">
<div class="label-row">
<label for="css-editor-audio-color-peak" data-i18n="color_strip.audio.color_peak">Peak Color:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.color_peak.hint">High-level color at the top of the VU meter bar.</small>
<input type="color" id="css-editor-audio-color-peak" value="#ff0000">
</div>
<div id="css-editor-audio-mirror-group" class="form-group" style="display:none">
<div class="label-row">
<label for="css-editor-audio-mirror" data-i18n="color_strip.audio.mirror">Mirror:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="color_strip.audio.mirror.hint">Mirror spectrum from center outward: bass in the middle, treble at the edges.</small>
<label class="settings-toggle">
<input type="checkbox" id="css-editor-audio-mirror">
<span class="settings-toggle-slider"></span>
</label>
</div>
</div>
<!-- Shared LED count field -->
<div id="css-editor-led-count-group" class="form-group">
<div class="label-row">