Add audio-reactive color strip sources, improve delete error messages

Add new "audio" color strip source type with three visualization modes
(spectrum analyzer, beat pulse, VU meter) supporting WASAPI loopback and
microphone input via PyAudioWPatch. Includes shared audio capture with
ref counting, real-time FFT spectrum analysis, and beat detection.

Improve all referential integrity 409 error messages across delete
endpoints to include specific names of referencing entities instead of
generic "one or more" messages.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 11:56:54 +03:00
parent 2657f46e5d
commit bbd2ac9910
24 changed files with 1247 additions and 86 deletions

View File

@@ -0,0 +1,442 @@
"""Audio capture service — shared audio analysis with ref counting.
Provides real-time FFT spectrum, RMS level, and beat detection from
system audio (WASAPI loopback) or microphone/line-in. Multiple
AudioColorStripStreams sharing the same device reuse a single capture
thread via AudioCaptureManager.
Uses PyAudioWPatch for WASAPI loopback support on Windows.
"""
import math
import threading
import time
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
from wled_controller.utils import get_logger
logger = get_logger(__name__)
# Number of logarithmic frequency bands for spectrum analysis
NUM_BANDS = 64
# Audio defaults
DEFAULT_SAMPLE_RATE = 44100
DEFAULT_CHUNK_SIZE = 2048 # ~46 ms at 44100 Hz
# ---------------------------------------------------------------------------
# AudioAnalysis — thread-safe snapshot of latest analysis results
# ---------------------------------------------------------------------------
@dataclass
class AudioAnalysis:
"""Snapshot of audio analysis results.
Written by the capture thread, read by visualization streams.
"""
timestamp: float = 0.0
rms: float = 0.0
peak: float = 0.0
spectrum: np.ndarray = field(default_factory=lambda: np.zeros(NUM_BANDS, dtype=np.float32))
beat: bool = False
beat_intensity: float = 0.0
# ---------------------------------------------------------------------------
# AudioCaptureStream — one per unique audio device
# ---------------------------------------------------------------------------
def _build_log_bands(num_bands: int, fft_size: int, sample_rate: int) -> List[Tuple[int, int]]:
"""Build logarithmically-spaced frequency band boundaries for FFT bins.
Returns list of (start_bin, end_bin) pairs.
"""
nyquist = sample_rate / 2
# Map bands to log-spaced frequencies from 20 Hz to Nyquist
min_freq = 20.0
max_freq = min(nyquist, 20000.0)
log_min = math.log10(min_freq)
log_max = math.log10(max_freq)
freqs = np.logspace(log_min, log_max, num_bands + 1)
bin_width = sample_rate / fft_size
bands = []
for i in range(num_bands):
start_bin = max(1, int(freqs[i] / bin_width))
end_bin = max(start_bin + 1, int(freqs[i + 1] / bin_width))
# Clamp to FFT range
end_bin = min(end_bin, fft_size // 2)
bands.append((start_bin, end_bin))
return bands
class AudioCaptureStream:
"""Captures audio from a single device and provides real-time analysis.
Runs a background thread that reads audio chunks, computes FFT, RMS,
and beat detection. Consumers read the latest analysis via
``get_latest_analysis()`` (thread-safe).
"""
def __init__(
self,
device_index: int,
is_loopback: bool,
sample_rate: int = DEFAULT_SAMPLE_RATE,
chunk_size: int = DEFAULT_CHUNK_SIZE,
):
self._device_index = device_index
self._is_loopback = is_loopback
self._sample_rate = sample_rate
self._chunk_size = chunk_size
self._running = False
self._thread: Optional[threading.Thread] = None
self._lock = threading.Lock()
self._latest: Optional[AudioAnalysis] = None
# Pre-allocated FFT helpers
self._window = np.hanning(chunk_size).astype(np.float32)
self._bands = _build_log_bands(NUM_BANDS, chunk_size, sample_rate)
# Beat detection state
self._energy_history: np.ndarray = np.zeros(43, dtype=np.float64) # ~1s at 44100/2048
self._energy_idx = 0
# Smoothed spectrum (exponential decay between frames)
self._smooth_spectrum = np.zeros(NUM_BANDS, dtype=np.float32)
def start(self) -> None:
if self._running:
return
self._running = True
self._thread = threading.Thread(
target=self._capture_loop, daemon=True,
name=f"AudioCapture-{self._device_index}-{'lb' if self._is_loopback else 'in'}",
)
self._thread.start()
logger.info(
f"AudioCaptureStream started: device={self._device_index} "
f"loopback={self._is_loopback} sr={self._sample_rate} chunk={self._chunk_size}"
)
def stop(self) -> None:
self._running = False
if self._thread is not None:
self._thread.join(timeout=5.0)
self._thread = None
with self._lock:
self._latest = None
logger.info(f"AudioCaptureStream stopped: device={self._device_index}")
def get_latest_analysis(self) -> Optional[AudioAnalysis]:
with self._lock:
return self._latest
def _capture_loop(self) -> None:
try:
import pyaudiowpatch as pyaudio
except ImportError:
logger.error("PyAudioWPatch is not installed — audio capture unavailable")
self._running = False
return
pa = None
stream = None
try:
pa = pyaudio.PyAudio()
if self._is_loopback:
# Loopback capture: find the loopback device for the output device
loopback_device = self._find_loopback_device(pa, self._device_index)
if loopback_device is None:
logger.error(
f"No loopback device found for output device {self._device_index}"
)
self._running = False
return
device_idx = loopback_device["index"]
channels = loopback_device["maxInputChannels"]
sample_rate = int(loopback_device["defaultSampleRate"])
else:
# Regular input device
device_idx = self._device_index if self._device_index >= 0 else None
if device_idx is not None:
dev_info = pa.get_device_info_by_index(device_idx)
channels = max(1, dev_info["maxInputChannels"])
sample_rate = int(dev_info["defaultSampleRate"])
else:
channels = 1
sample_rate = self._sample_rate
# Update FFT helpers if sample rate changed
if sample_rate != self._sample_rate:
self._sample_rate = sample_rate
self._bands = _build_log_bands(NUM_BANDS, self._chunk_size, sample_rate)
stream = pa.open(
format=pyaudio.paFloat32,
channels=channels,
rate=sample_rate,
input=True,
input_device_index=device_idx,
frames_per_buffer=self._chunk_size,
)
logger.info(
f"Audio stream opened: device={device_idx} loopback={self._is_loopback} "
f"channels={channels} sr={sample_rate}"
)
spectrum_buf = np.zeros(NUM_BANDS, dtype=np.float32)
while self._running:
try:
raw_data = stream.read(self._chunk_size, exception_on_overflow=False)
data = np.frombuffer(raw_data, dtype=np.float32)
except Exception as e:
logger.warning(f"Audio read error: {e}")
time.sleep(0.05)
continue
# Mix to mono if multi-channel
if channels > 1:
data = data.reshape(-1, channels)
samples = data.mean(axis=1).astype(np.float32)
else:
samples = data
# RMS and peak
rms = float(np.sqrt(np.mean(samples ** 2)))
peak = float(np.max(np.abs(samples)))
# FFT
chunk = samples[: self._chunk_size]
if len(chunk) < self._chunk_size:
chunk = np.pad(chunk, (0, self._chunk_size - len(chunk)))
windowed = chunk * self._window
fft_mag = np.abs(np.fft.rfft(windowed))
# Normalize by chunk size
fft_mag /= self._chunk_size
# Bin into logarithmic bands
for b, (start, end) in enumerate(self._bands):
if start < len(fft_mag) and end <= len(fft_mag):
spectrum_buf[b] = float(np.mean(fft_mag[start:end]))
else:
spectrum_buf[b] = 0.0
# Normalize spectrum to 0-1 range (adaptive)
spec_max = float(np.max(spectrum_buf))
if spec_max > 1e-6:
spectrum_buf /= spec_max
# Exponential smoothing
alpha = 0.3 # smoothing factor (lower = smoother)
self._smooth_spectrum[:] = (
alpha * spectrum_buf + (1.0 - alpha) * self._smooth_spectrum
)
# Beat detection — compare current energy to rolling average
energy = float(np.sum(samples ** 2))
self._energy_history[self._energy_idx % len(self._energy_history)] = energy
self._energy_idx += 1
avg_energy = float(np.mean(self._energy_history))
beat = False
beat_intensity = 0.0
if avg_energy > 1e-8:
ratio = energy / avg_energy
if ratio > 1.5:
beat = True
beat_intensity = min(1.0, (ratio - 1.0) / 2.0)
analysis = AudioAnalysis(
timestamp=time.perf_counter(),
rms=rms,
peak=peak,
spectrum=self._smooth_spectrum.copy(),
beat=beat,
beat_intensity=beat_intensity,
)
with self._lock:
self._latest = analysis
except Exception as e:
logger.error(f"AudioCaptureStream fatal error: {e}", exc_info=True)
finally:
if stream is not None:
try:
stream.stop_stream()
stream.close()
except Exception:
pass
if pa is not None:
try:
pa.terminate()
except Exception:
pass
self._running = False
logger.info(f"AudioCaptureStream loop ended: device={self._device_index}")
@staticmethod
def _find_loopback_device(pa, output_device_index: int) -> Optional[dict]:
"""Find the PyAudioWPatch loopback device for a given output device.
PyAudioWPatch exposes virtual loopback input devices for each WASAPI
output device. We match by name via ``get_loopback_device_info_generator()``.
"""
try:
first_loopback = None
for loopback in pa.get_loopback_device_info_generator():
if first_loopback is None:
first_loopback = loopback
# Default (-1): return first loopback device (typically default speakers)
if output_device_index < 0:
return loopback
# Match by output device name contained in loopback device name
target_info = pa.get_device_info_by_index(output_device_index)
if target_info["name"] in loopback["name"]:
return loopback
# No exact match — return first available loopback
return first_loopback
except Exception as e:
logger.error(f"Error finding loopback device: {e}")
return None
# ---------------------------------------------------------------------------
# AudioCaptureManager — ref-counted shared capture streams
# ---------------------------------------------------------------------------
class AudioCaptureManager:
"""Manages shared AudioCaptureStream instances with reference counting.
Multiple AudioColorStripStreams using the same audio device share a
single capture thread.
"""
def __init__(self):
self._streams: Dict[Tuple[int, bool], Tuple[AudioCaptureStream, int]] = {}
self._lock = threading.Lock()
def acquire(self, device_index: int, is_loopback: bool) -> AudioCaptureStream:
"""Get or create an AudioCaptureStream for the given device.
Returns:
Shared AudioCaptureStream instance.
"""
key = (device_index, is_loopback)
with self._lock:
if key in self._streams:
stream, ref_count = self._streams[key]
self._streams[key] = (stream, ref_count + 1)
logger.info(f"Reusing audio capture {key} (ref_count={ref_count + 1})")
return stream
stream = AudioCaptureStream(device_index, is_loopback)
stream.start()
self._streams[key] = (stream, 1)
logger.info(f"Created audio capture {key}")
return stream
def release(self, device_index: int, is_loopback: bool) -> None:
"""Release a reference to an AudioCaptureStream."""
key = (device_index, is_loopback)
with self._lock:
if key not in self._streams:
logger.warning(f"Attempted to release unknown audio capture: {key}")
return
stream, ref_count = self._streams[key]
ref_count -= 1
if ref_count <= 0:
stream.stop()
del self._streams[key]
logger.info(f"Removed audio capture {key}")
else:
self._streams[key] = (stream, ref_count)
logger.debug(f"Released audio capture {key} (ref_count={ref_count})")
def release_all(self) -> None:
"""Stop and remove all capture streams. Called on shutdown."""
with self._lock:
for key, (stream, _) in list(self._streams.items()):
try:
stream.stop()
except Exception as e:
logger.error(f"Error stopping audio capture {key}: {e}")
self._streams.clear()
logger.info("Released all audio capture streams")
@staticmethod
def enumerate_devices() -> List[dict]:
"""List available audio devices for the frontend dropdown.
Returns list of dicts with device info. Output devices with WASAPI
hostapi are marked as loopback candidates.
"""
try:
import pyaudiowpatch as pyaudio
except ImportError:
logger.warning("PyAudioWPatch not installed — no audio devices available")
return []
pa = None
try:
pa = pyaudio.PyAudio()
wasapi_info = pa.get_host_api_info_by_type(pyaudio.paWASAPI)
wasapi_idx = wasapi_info["index"]
result = []
device_count = pa.get_device_count()
for i in range(device_count):
dev = pa.get_device_info_by_index(i)
if dev["hostApi"] != wasapi_idx:
continue
is_input = dev["maxInputChannels"] > 0
is_output = dev["maxOutputChannels"] > 0
if is_input:
result.append({
"index": i,
"name": dev["name"],
"is_input": True,
"is_loopback": False,
"channels": dev["maxInputChannels"],
"default_samplerate": dev["defaultSampleRate"],
})
if is_output:
result.append({
"index": i,
"name": f"{dev['name']} [Loopback]",
"is_input": False,
"is_loopback": True,
"channels": dev["maxOutputChannels"],
"default_samplerate": dev["defaultSampleRate"],
})
return result
except Exception as e:
logger.error(f"Failed to enumerate audio devices: {e}", exc_info=True)
return []
finally:
if pa is not None:
try:
pa.terminate()
except Exception:
pass

View File

@@ -0,0 +1,300 @@
"""Audio-reactive LED color strip stream.
Implements AudioColorStripStream which produces LED color arrays driven by
real-time audio analysis (spectrum, beat detection, RMS levels).
Three visualization modes:
spectrum — FFT frequency bars mapped across LEDs with palette coloring
beat_pulse — full-strip flash on beat detection with exponential decay
vu_meter — volume level fills LEDs like a progress bar
"""
import threading
import time
from typing import Optional
import numpy as np
from wled_controller.core.audio.audio_capture import AudioCaptureManager
from wled_controller.core.processing.color_strip_stream import ColorStripStream
from wled_controller.core.processing.effect_stream import _build_palette_lut
from wled_controller.utils import get_logger
from wled_controller.utils.timer import high_resolution_timer
logger = get_logger(__name__)
class AudioColorStripStream(ColorStripStream):
"""Color strip stream driven by audio analysis.
Dispatches to one of three render methods based on visualization_mode:
spectrum, beat_pulse, vu_meter.
Polls AudioCaptureStream for latest analysis data and renders to LED array.
Uses the same lifecycle pattern as EffectColorStripStream: background
thread, double-buffered output, configure() for auto-sizing.
"""
def __init__(self, source, audio_capture_manager: AudioCaptureManager):
self._audio_capture_manager = audio_capture_manager
self._audio_stream = None # acquired on start
self._colors_lock = threading.Lock()
self._running = False
self._thread: Optional[threading.Thread] = None
self._fps = 30
# Beat pulse persistent state
self._pulse_brightness = 0.0
# Smoothed spectrum for temporal smoothing between frames
self._prev_spectrum: Optional[np.ndarray] = None
self._prev_rms = 0.0
self._update_from_source(source)
def _update_from_source(self, source) -> None:
self._visualization_mode = getattr(source, "visualization_mode", "spectrum")
self._audio_device_index = getattr(source, "audio_device_index", -1)
self._audio_loopback = bool(getattr(source, "audio_loopback", True))
self._sensitivity = float(getattr(source, "sensitivity", 1.0))
self._smoothing = float(getattr(source, "smoothing", 0.3))
self._palette_name = getattr(source, "palette", "rainbow")
self._palette_lut = _build_palette_lut(self._palette_name)
color = getattr(source, "color", None)
self._color = color if isinstance(color, list) and len(color) == 3 else [0, 255, 0]
color_peak = getattr(source, "color_peak", None)
self._color_peak = color_peak if isinstance(color_peak, list) and len(color_peak) == 3 else [255, 0, 0]
self._auto_size = not source.led_count
self._led_count = source.led_count if source.led_count and source.led_count > 0 else 1
self._mirror = bool(getattr(source, "mirror", False))
with self._colors_lock:
self._colors: Optional[np.ndarray] = None
# ── ColorStripStream interface ──────────────────────────────────
def configure(self, device_led_count: int) -> None:
if self._auto_size and device_led_count > 0:
new_count = max(self._led_count, device_led_count)
if new_count != self._led_count:
self._led_count = new_count
logger.debug(f"AudioColorStripStream auto-sized to {new_count} LEDs")
@property
def target_fps(self) -> int:
return self._fps
@property
def led_count(self) -> int:
return self._led_count
def set_capture_fps(self, fps: int) -> None:
self._fps = max(1, min(90, fps))
def start(self) -> None:
if self._running:
return
# Acquire shared audio capture stream
self._audio_stream = self._audio_capture_manager.acquire(
self._audio_device_index, self._audio_loopback
)
self._running = True
self._thread = threading.Thread(
target=self._animate_loop,
name=f"css-audio-{self._visualization_mode}",
daemon=True,
)
self._thread.start()
logger.info(
f"AudioColorStripStream started (viz={self._visualization_mode}, "
f"device={self._audio_device_index}, loopback={self._audio_loopback})"
)
def stop(self) -> None:
self._running = False
if self._thread:
self._thread.join(timeout=5.0)
if self._thread.is_alive():
logger.warning("AudioColorStripStream thread did not terminate within 5s")
self._thread = None
# Release shared audio capture
if self._audio_stream is not None:
self._audio_capture_manager.release(self._audio_device_index, self._audio_loopback)
self._audio_stream = None
self._prev_spectrum = None
logger.info("AudioColorStripStream stopped")
def get_latest_colors(self) -> Optional[np.ndarray]:
with self._colors_lock:
return self._colors
def update_source(self, source) -> None:
from wled_controller.storage.color_strip_source import AudioColorStripSource
if isinstance(source, AudioColorStripSource):
old_device = self._audio_device_index
old_loopback = self._audio_loopback
prev_led_count = self._led_count if self._auto_size else None
self._update_from_source(source)
if prev_led_count and self._auto_size:
self._led_count = prev_led_count
# If audio device changed, swap capture stream
if self._running and (self._audio_device_index != old_device or self._audio_loopback != old_loopback):
self._audio_capture_manager.release(old_device, old_loopback)
self._audio_stream = self._audio_capture_manager.acquire(
self._audio_device_index, self._audio_loopback
)
logger.info(
f"AudioColorStripStream swapped audio device: "
f"{old_device}:{old_loopback}{self._audio_device_index}:{self._audio_loopback}"
)
logger.info("AudioColorStripStream params updated in-place")
# ── Main animation loop ─────────────────────────────────────────
def _animate_loop(self) -> None:
_pool_n = 0
_buf_a = _buf_b = None
_use_a = True
renderers = {
"spectrum": self._render_spectrum,
"beat_pulse": self._render_beat_pulse,
"vu_meter": self._render_vu_meter,
}
with high_resolution_timer():
while self._running:
loop_start = time.perf_counter()
frame_time = 1.0 / self._fps
n = self._led_count
# Rebuild scratch buffers when LED count changes
if n != _pool_n:
_pool_n = n
_buf_a = np.zeros((n, 3), dtype=np.uint8)
_buf_b = np.zeros((n, 3), dtype=np.uint8)
buf = _buf_a if _use_a else _buf_b
_use_a = not _use_a
# Get latest audio analysis
analysis = None
if self._audio_stream is not None:
analysis = self._audio_stream.get_latest_analysis()
render_fn = renderers.get(self._visualization_mode, self._render_spectrum)
render_fn(buf, n, analysis)
with self._colors_lock:
self._colors = buf
elapsed = time.perf_counter() - loop_start
time.sleep(max(frame_time - elapsed, 0.001))
# ── Spectrum Analyzer ──────────────────────────────────────────
def _render_spectrum(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
spectrum = analysis.spectrum
sensitivity = self._sensitivity
smoothing = self._smoothing
lut = self._palette_lut
num_bands = len(spectrum)
band_x = np.arange(num_bands, dtype=np.float32)
if self._mirror:
half = (n + 1) // 2
led_x = np.linspace(0, num_bands - 1, half)
amplitudes = np.interp(led_x, band_x, spectrum)
amplitudes *= sensitivity
np.clip(amplitudes, 0.0, 1.0, out=amplitudes)
# Temporal smoothing
if self._prev_spectrum is not None and len(self._prev_spectrum) == half:
amplitudes[:] = smoothing * self._prev_spectrum + (1.0 - smoothing) * amplitudes
self._prev_spectrum = amplitudes.copy()
# Mirror: center = bass, edges = treble
full_amp = np.empty(n, dtype=np.float32)
full_amp[:half] = amplitudes[::-1]
full_amp[half:] = amplitudes[: n - half]
else:
led_x = np.linspace(0, num_bands - 1, n)
amplitudes = np.interp(led_x, band_x, spectrum)
amplitudes *= sensitivity
np.clip(amplitudes, 0.0, 1.0, out=amplitudes)
# Temporal smoothing
if self._prev_spectrum is not None and len(self._prev_spectrum) == n:
amplitudes[:] = smoothing * self._prev_spectrum + (1.0 - smoothing) * amplitudes
self._prev_spectrum = amplitudes.copy()
full_amp = amplitudes
# Map to palette: amplitude → palette index → color
indices = (full_amp * 255).astype(np.int32)
np.clip(indices, 0, 255, out=indices)
colors = lut[indices] # (n, 3) uint8
# Scale brightness by amplitude
for ch in range(3):
buf[:, ch] = (colors[:, ch].astype(np.float32) * full_amp).astype(np.uint8)
# ── VU Meter ───────────────────────────────────────────────────
def _render_vu_meter(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
rms = analysis.rms * self._sensitivity
# Temporal smoothing on RMS
rms = self._smoothing * self._prev_rms + (1.0 - self._smoothing) * rms
self._prev_rms = rms
rms = min(1.0, rms)
fill_count = int(rms * n)
buf[:] = 0
if fill_count > 0:
base = np.array(self._color, dtype=np.float32)
peak = np.array(self._color_peak, dtype=np.float32)
# Gradient from base color to peak color
t = np.linspace(0, 1, n, dtype=np.float32)[:fill_count]
for ch in range(3):
buf[:fill_count, ch] = np.clip(
base[ch] + (peak[ch] - base[ch]) * t, 0, 255
).astype(np.uint8)
# ── Beat Pulse ─────────────────────────────────────────────────
def _render_beat_pulse(self, buf: np.ndarray, n: int, analysis) -> None:
if analysis is None:
buf[:] = 0
return
# On beat: flash to full brightness
if analysis.beat:
self._pulse_brightness = 1.0
else:
# Exponential decay — sensitivity controls decay speed
decay_rate = 0.05 + 0.15 * (1.0 / max(self._sensitivity, 0.1))
self._pulse_brightness = max(0.0, self._pulse_brightness - decay_rate)
brightness = self._pulse_brightness
if brightness < 0.01:
buf[:] = 0
return
# Color from palette based on beat intensity
palette_idx = int(analysis.beat_intensity * 255)
palette_idx = max(0, min(255, palette_idx))
base_color = self._palette_lut[palette_idx]
# Fill all LEDs with pulsing color
buf[:, 0] = int(base_color[0] * brightness)
buf[:, 1] = int(base_color[1] * brightness)
buf[:, 2] = int(base_color[2] * brightness)

View File

@@ -56,14 +56,16 @@ class ColorStripStreamManager:
keyed by ``{css_id}:{consumer_id}``.
"""
def __init__(self, color_strip_store, live_stream_manager):
def __init__(self, color_strip_store, live_stream_manager, audio_capture_manager=None):
"""
Args:
color_strip_store: ColorStripStore for resolving source configs
live_stream_manager: LiveStreamManager for acquiring picture streams
audio_capture_manager: AudioCaptureManager for audio-reactive sources
"""
self._color_strip_store = color_strip_store
self._live_stream_manager = live_stream_manager
self._audio_capture_manager = audio_capture_manager
self._streams: Dict[str, _ColorStripEntry] = {}
def _resolve_key(self, css_id: str, consumer_id: str) -> str:
@@ -100,7 +102,10 @@ class ColorStripStreamManager:
# Non-sharable: always create a fresh per-consumer instance
if not source.sharable:
if source.source_type == "composite":
if source.source_type == "audio":
from wled_controller.core.processing.audio_stream import AudioColorStripStream
css_stream = AudioColorStripStream(source, self._audio_capture_manager)
elif source.source_type == "composite":
from wled_controller.core.processing.composite_stream import CompositeColorStripStream
css_stream = CompositeColorStripStream(source, self)
else:

View File

@@ -13,6 +13,7 @@ from wled_controller.core.devices.led_client import (
create_led_client,
get_provider,
)
from wled_controller.core.audio.audio_capture import AudioCaptureManager
from wled_controller.core.processing.live_stream_manager import LiveStreamManager
from wled_controller.core.processing.color_strip_stream_manager import ColorStripStreamManager
from wled_controller.core.capture.screen_overlay import OverlayManager
@@ -79,9 +80,11 @@ class ProcessorManager:
self._live_stream_manager = LiveStreamManager(
picture_source_store, capture_template_store, pp_template_store
)
self._audio_capture_manager = AudioCaptureManager()
self._color_strip_stream_manager = ColorStripStreamManager(
color_strip_store=color_strip_store,
live_stream_manager=self._live_stream_manager,
audio_capture_manager=self._audio_capture_manager,
)
self._overlay_manager = OverlayManager()
self._event_queues: List[asyncio.Queue] = []