Add audio visualizer with spectrogram, beat-reactive art, and device selection

- New audio_analyzer service: loopback capture via soundcard + numpy FFT
- Real-time spectrogram bars below album art with accent color gradient
- Album art and vinyl pulse to bass energy beats
- WebSocket subscriber pattern for opt-in audio data streaming
- Audio device selection in Settings tab with auto-detect fallback
- Optimized FFT pipeline: vectorized cumsum bin grouping, pre-serialized JSON broadcast
- Visualizer config: enabled/fps/bins/device in config.yaml
- Optional deps: soundcard + numpy (graceful degradation if missing)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-27 21:42:19 +03:00
parent 8a8f00ff31
commit 0691e3d338
11 changed files with 919 additions and 2 deletions

View File

@@ -112,6 +112,28 @@ class Settings(BaseSettings):
description="Quick links displayed as icons in the header", description="Quick links displayed as icons in the header",
) )
# Audio visualizer
visualizer_enabled: bool = Field(
default=True,
description="Enable audio spectrum visualizer (requires soundcard + numpy)",
)
visualizer_fps: int = Field(
default=25,
description="Visualizer update rate in frames per second",
ge=10,
le=60,
)
visualizer_bins: int = Field(
default=32,
description="Number of frequency bins for the visualizer",
ge=8,
le=128,
)
visualizer_device: Optional[str] = Field(
default=None,
description="Loopback audio device name for visualizer (None = auto-detect)",
)
@classmethod @classmethod
def load_from_yaml(cls, path: Optional[Path] = None) -> "Settings": def load_from_yaml(cls, path: Optional[Path] = None) -> "Settings":
"""Load settings from a YAML configuration file.""" """Load settings from a YAML configuration file."""

View File

@@ -59,8 +59,32 @@ async def lifespan(app: FastAPI):
await ws_manager.start_status_monitor(controller.get_status) await ws_manager.start_status_monitor(controller.get_status)
logger.info("WebSocket status monitor started") logger.info("WebSocket status monitor started")
# Start audio visualizer (if enabled and dependencies available)
analyzer = None
if settings.visualizer_enabled:
from .services.audio_analyzer import get_audio_analyzer
analyzer = get_audio_analyzer(
num_bins=settings.visualizer_bins,
target_fps=settings.visualizer_fps,
device_name=settings.visualizer_device,
)
if analyzer.available:
if analyzer.start():
await ws_manager.start_audio_monitor(analyzer)
logger.info("Audio visualizer started")
else:
logger.warning("Audio visualizer failed to start (no loopback device?)")
else:
logger.info("Audio visualizer unavailable (install soundcard + numpy)")
yield yield
# Stop audio visualizer
await ws_manager.stop_audio_monitor()
if analyzer and analyzer.running:
analyzer.stop()
# Stop WebSocket status monitor # Stop WebSocket status monitor
await ws_manager.stop_status_monitor() await ws_manager.stop_status_monitor()
logger.info("Media Server shutting down") logger.info("Media Server shutting down")

View File

@@ -268,6 +268,59 @@ async def get_artwork(_: str = Depends(verify_token_or_query)) -> Response:
return Response(content=art_bytes, media_type=content_type) return Response(content=art_bytes, media_type=content_type)
@router.get("/visualizer/status")
async def visualizer_status(_: str = Depends(verify_token)) -> dict:
"""Check if audio visualizer is available and running."""
from ..services.audio_analyzer import get_audio_analyzer
analyzer = get_audio_analyzer()
return {
"available": analyzer.available,
"running": analyzer.running,
"current_device": analyzer.current_device,
}
@router.get("/visualizer/devices")
async def visualizer_devices(_: str = Depends(verify_token)) -> list[dict[str, str]]:
"""List available loopback audio devices for the visualizer."""
from ..services.audio_analyzer import AudioAnalyzer
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, AudioAnalyzer.list_loopback_devices)
@router.post("/visualizer/device")
async def set_visualizer_device(
request: dict,
_: str = Depends(verify_token),
) -> dict:
"""Set the loopback audio device for the visualizer.
Body: {"device_name": "Device Name" | null}
Passing null resets to auto-detect.
"""
from ..services.audio_analyzer import get_audio_analyzer
device_name = request.get("device_name")
analyzer = get_audio_analyzer()
# Restart with new device
was_running = analyzer.running
success = analyzer.set_device(device_name)
# Restart audio broadcast if needed
if was_running and success and analyzer.running:
await ws_manager.stop_audio_monitor()
await ws_manager.start_audio_monitor(analyzer)
return {
"success": success,
"current_device": analyzer.current_device,
"running": analyzer.running,
}
@router.websocket("/ws") @router.websocket("/ws")
async def websocket_endpoint( async def websocket_endpoint(
websocket: WebSocket, websocket: WebSocket,
@@ -321,6 +374,10 @@ async def websocket_endpoint(
if volume is not None: if volume is not None:
controller = get_media_controller() controller = get_media_controller()
await controller.set_volume(int(volume)) await controller.set_volume(int(volume))
elif data.get("type") == "enable_visualizer":
await ws_manager.subscribe_visualizer(websocket)
elif data.get("type") == "disable_visualizer":
await ws_manager.unsubscribe_visualizer(websocket)
except WebSocketDisconnect: except WebSocketDisconnect:
await ws_manager.disconnect(websocket) await ws_manager.disconnect(websocket)

View File

@@ -0,0 +1,315 @@
"""Audio spectrum analyzer service using system loopback capture."""
import logging
import platform
import threading
import time
logger = logging.getLogger(__name__)
_np = None
_sc = None
def _load_numpy():
global _np
if _np is None:
try:
import numpy as np
_np = np
except ImportError:
logger.info("numpy not installed - audio visualizer unavailable")
return _np
def _load_soundcard():
global _sc
if _sc is None:
try:
import soundcard as sc
_sc = sc
except ImportError:
logger.info("soundcard not installed - audio visualizer unavailable")
return _sc
class AudioAnalyzer:
"""Captures system audio loopback and performs real-time FFT analysis."""
def __init__(
self,
num_bins: int = 32,
sample_rate: int = 44100,
chunk_size: int = 2048,
target_fps: int = 25,
device_name: str | None = None,
):
self.num_bins = num_bins
self.sample_rate = sample_rate
self.chunk_size = chunk_size
self.target_fps = target_fps
self.device_name = device_name
self._running = False
self._thread: threading.Thread | None = None
self._lock = threading.Lock()
self._data: dict | None = None
self._current_device_name: str | None = None
# Pre-compute logarithmic bin edges
self._bin_edges = self._compute_bin_edges()
def _compute_bin_edges(self) -> list[int]:
"""Compute logarithmic frequency bin boundaries for perceptual grouping."""
np = _load_numpy()
if np is None:
return []
fft_size = self.chunk_size // 2 + 1
min_freq = 20.0
max_freq = min(16000.0, self.sample_rate / 2)
edges = []
for i in range(self.num_bins + 1):
freq = min_freq * (max_freq / min_freq) ** (i / self.num_bins)
bin_idx = int(freq * self.chunk_size / self.sample_rate)
edges.append(min(bin_idx, fft_size - 1))
return edges
@property
def available(self) -> bool:
"""Whether audio capture dependencies are available."""
return _load_numpy() is not None and _load_soundcard() is not None
@property
def running(self) -> bool:
"""Whether capture is currently active."""
return self._running
def start(self) -> bool:
"""Start audio capture in a background thread. Returns False if unavailable."""
if self._running:
return True
if not self.available:
return False
self._running = True
self._thread = threading.Thread(target=self._capture_loop, daemon=True)
self._thread.start()
return True
def stop(self) -> None:
"""Stop audio capture and cleanup."""
self._running = False
if self._thread:
self._thread.join(timeout=3.0)
self._thread = None
with self._lock:
self._data = None
def get_frequency_data(self) -> dict | None:
"""Return latest frequency data (thread-safe). None if not running."""
with self._lock:
return self._data
@staticmethod
def list_loopback_devices() -> list[dict[str, str]]:
"""List all available loopback audio devices."""
sc = _load_soundcard()
if sc is None:
return []
devices = []
try:
# COM may be needed on Windows for WASAPI
if platform.system() == "Windows":
try:
import comtypes
comtypes.CoInitializeEx(comtypes.COINIT_MULTITHREADED)
except Exception:
pass
loopback_mics = sc.all_microphones(include_loopback=True)
for mic in loopback_mics:
if mic.isloopback:
devices.append({"id": mic.id, "name": mic.name})
except Exception as e:
logger.warning("Failed to list loopback devices: %s", e)
return devices
def _find_loopback_device(self):
"""Find a loopback device for system audio capture."""
sc = _load_soundcard()
if sc is None:
return None
try:
loopback_mics = sc.all_microphones(include_loopback=True)
# If a specific device is requested, find it by name (partial match)
if self.device_name:
target = self.device_name.lower()
for mic in loopback_mics:
if mic.isloopback and target in mic.name.lower():
logger.info("Found requested loopback device: %s", mic.name)
self._current_device_name = mic.name
return mic
logger.warning("Requested device '%s' not found, falling back to default", self.device_name)
# Default: first loopback device
for mic in loopback_mics:
if mic.isloopback:
logger.info("Found loopback device: %s", mic.name)
self._current_device_name = mic.name
return mic
# Fallback: try to get default speaker's loopback
default_speaker = sc.default_speaker()
if default_speaker:
for mic in loopback_mics:
if default_speaker.name in mic.name:
logger.info("Found speaker loopback: %s", mic.name)
self._current_device_name = mic.name
return mic
except Exception as e:
logger.warning("Failed to find loopback device: %s", e)
return None
def set_device(self, device_name: str | None) -> bool:
"""Change the loopback device. Restarts capture if running. Returns True on success."""
was_running = self._running
if was_running:
self.stop()
self.device_name = device_name
self._current_device_name = None
if was_running:
return self.start()
return True
@property
def current_device(self) -> str | None:
"""Return the name of the currently active loopback device."""
return self._current_device_name
def _capture_loop(self) -> None:
"""Background thread: capture audio and compute FFT continuously."""
# Initialize COM on Windows (required for WASAPI/SoundCard)
if platform.system() == "Windows":
try:
import comtypes
comtypes.CoInitializeEx(comtypes.COINIT_MULTITHREADED)
except Exception:
try:
import ctypes
ctypes.windll.ole32.CoInitializeEx(0, 0)
except Exception as e:
logger.warning("Failed to initialize COM: %s", e)
np = _load_numpy()
sc = _load_soundcard()
if np is None or sc is None:
self._running = False
return
device = self._find_loopback_device()
if device is None:
logger.warning("No loopback audio device found - visualizer disabled")
self._running = False
return
interval = 1.0 / self.target_fps
window = np.hanning(self.chunk_size)
# Pre-compute bin edge pairs for vectorized grouping
edges = self._bin_edges
bin_starts = np.array([edges[i] for i in range(self.num_bins)], dtype=np.intp)
bin_ends = np.array([max(edges[i + 1], edges[i] + 1) for i in range(self.num_bins)], dtype=np.intp)
try:
with device.recorder(
samplerate=self.sample_rate,
channels=1,
blocksize=self.chunk_size,
) as recorder:
logger.info("Audio capture started on: %s", device.name)
while self._running:
t0 = time.monotonic()
try:
data = recorder.record(numframes=self.chunk_size)
except Exception as e:
logger.debug("Audio capture read error: %s", e)
time.sleep(interval)
continue
# Mono mix if needed
if data.ndim > 1:
mono = data.mean(axis=1)
else:
mono = data.ravel()
if len(mono) < self.chunk_size:
time.sleep(interval)
continue
# Apply window and compute FFT
windowed = mono[:self.chunk_size] * window
fft_mag = np.abs(np.fft.rfft(windowed))
# Group into logarithmic bins (vectorized via cumsum)
cumsum = np.concatenate(([0.0], np.cumsum(fft_mag)))
counts = bin_ends - bin_starts
bins = (cumsum[bin_ends] - cumsum[bin_starts]) / counts
# Normalize to 0-1
max_val = bins.max()
if max_val > 0:
bins *= (1.0 / max_val)
# Bass energy: average of first 4 bins (~20-200Hz)
bass = float(bins[:4].mean()) if self.num_bins >= 4 else 0.0
# Round for compact JSON
frequencies = np.round(bins, 3).tolist()
bass = round(bass, 3)
with self._lock:
self._data = {"frequencies": frequencies, "bass": bass}
# Throttle to target FPS
elapsed = time.monotonic() - t0
if elapsed < interval:
time.sleep(interval - elapsed)
except Exception as e:
logger.error("Audio capture loop error: %s", e)
finally:
self._running = False
logger.info("Audio capture stopped")
# Global singleton
_analyzer: AudioAnalyzer | None = None
def get_audio_analyzer(
num_bins: int = 32,
sample_rate: int = 44100,
target_fps: int = 25,
device_name: str | None = None,
) -> AudioAnalyzer:
"""Get or create the global AudioAnalyzer instance."""
global _analyzer
if _analyzer is None:
_analyzer = AudioAnalyzer(
num_bins=num_bins,
sample_rate=sample_rate,
target_fps=target_fps,
device_name=device_name,
)
return _analyzer

View File

@@ -1,6 +1,7 @@
"""WebSocket connection manager and status broadcaster.""" """WebSocket connection manager and status broadcaster."""
import asyncio import asyncio
import json
import logging import logging
import time import time
from typing import Any, Callable, Coroutine from typing import Any, Callable, Coroutine
@@ -23,6 +24,10 @@ class ConnectionManager:
self._position_broadcast_interval: float = 5.0 # Send position updates every 5s during playback self._position_broadcast_interval: float = 5.0 # Send position updates every 5s during playback
self._last_broadcast_time: float = 0.0 self._last_broadcast_time: float = 0.0
self._running: bool = False self._running: bool = False
# Audio visualizer
self._visualizer_subscribers: set[WebSocket] = set()
self._audio_task: asyncio.Task | None = None
self._audio_analyzer = None
async def connect(self, websocket: WebSocket) -> None: async def connect(self, websocket: WebSocket) -> None:
"""Accept a new WebSocket connection.""" """Accept a new WebSocket connection."""
@@ -44,6 +49,7 @@ class ConnectionManager:
"""Remove a WebSocket connection.""" """Remove a WebSocket connection."""
async with self._lock: async with self._lock:
self._active_connections.discard(websocket) self._active_connections.discard(websocket)
self._visualizer_subscribers.discard(websocket)
logger.info( logger.info(
"WebSocket client disconnected. Total: %d", len(self._active_connections) "WebSocket client disconnected. Total: %d", len(self._active_connections)
) )
@@ -83,6 +89,85 @@ class ConnectionManager:
await self.broadcast(message) await self.broadcast(message)
logger.info("Broadcast sent: links_changed") logger.info("Broadcast sent: links_changed")
async def subscribe_visualizer(self, websocket: WebSocket) -> None:
"""Subscribe a client to audio visualizer data."""
async with self._lock:
self._visualizer_subscribers.add(websocket)
logger.debug("Visualizer subscriber added. Total: %d", len(self._visualizer_subscribers))
async def unsubscribe_visualizer(self, websocket: WebSocket) -> None:
"""Unsubscribe a client from audio visualizer data."""
async with self._lock:
self._visualizer_subscribers.discard(websocket)
logger.debug("Visualizer subscriber removed. Total: %d", len(self._visualizer_subscribers))
async def start_audio_monitor(self, analyzer) -> None:
"""Start audio frequency broadcasting if analyzer is available."""
self._audio_analyzer = analyzer
if analyzer and analyzer.running:
self._audio_task = asyncio.create_task(self._audio_broadcast_loop())
logger.info("Audio visualizer broadcast started")
async def stop_audio_monitor(self) -> None:
"""Stop audio frequency broadcasting."""
if self._audio_task:
self._audio_task.cancel()
try:
await self._audio_task
except asyncio.CancelledError:
pass
self._audio_task = None
async def _audio_broadcast_loop(self) -> None:
"""Background loop: read frequency data from analyzer and broadcast to subscribers."""
from ..config import settings
interval = 1.0 / settings.visualizer_fps
_last_data = None
while True:
try:
async with self._lock:
subscribers = list(self._visualizer_subscribers)
if not subscribers or not self._audio_analyzer or not self._audio_analyzer.running:
await asyncio.sleep(interval)
continue
data = self._audio_analyzer.get_frequency_data()
if data is None or data is _last_data:
await asyncio.sleep(interval)
continue
_last_data = data
# Pre-serialize once for all subscribers (avoids per-client JSON encoding)
text = json.dumps({"type": "audio_data", "data": data}, separators=(',', ':'))
async def _send(ws: WebSocket) -> WebSocket | None:
try:
await ws.send_text(text)
return None
except Exception:
return ws
results = await asyncio.gather(*(_send(ws) for ws in subscribers))
failed = [ws for ws in results if ws is not None]
if failed:
async with self._lock:
for ws in failed:
self._visualizer_subscribers.discard(ws)
for ws in failed:
await self.disconnect(ws)
await asyncio.sleep(interval)
except asyncio.CancelledError:
break
except Exception as e:
logger.error("Error in audio broadcast: %s", e)
await asyncio.sleep(interval)
def status_changed( def status_changed(
self, old: dict[str, Any] | None, new: dict[str, Any] self, old: dict[str, Any] | None, new: dict[str, Any]
) -> bool: ) -> bool:

View File

@@ -628,8 +628,39 @@ h1 {
} }
@keyframes vinylSpin { @keyframes vinylSpin {
from { transform: rotate(var(--vinyl-offset, 0deg)); } from { transform: rotate(var(--vinyl-offset, 0deg)) scale(var(--vinyl-scale, 1)); }
to { transform: rotate(calc(var(--vinyl-offset, 0deg) + 360deg)); } to { transform: rotate(calc(var(--vinyl-offset, 0deg) + 360deg)) scale(var(--vinyl-scale, 1)); }
}
/* Audio Spectrogram Visualization */
.spectrogram-canvas {
position: absolute;
bottom: -4px;
left: 50%;
transform: translateX(-50%);
z-index: 2;
pointer-events: none;
opacity: 0;
transition: opacity 0.3s ease;
border-radius: 0 0 8px 8px;
}
.visualizer-active .spectrogram-canvas {
opacity: 1;
}
.visualizer-active #album-art {
transition: transform 0.08s ease-out;
}
.visualizer-active .album-art-glow {
transition: opacity 0.08s ease-out;
}
/* Adapt spectrogram for vinyl mode */
.album-art-container.vinyl .spectrogram-canvas {
bottom: -10px;
border-radius: 0 0 50% 50%;
} }
.track-info { .track-info {
@@ -1087,6 +1118,74 @@ button:disabled {
margin-bottom: 1rem; margin-bottom: 1rem;
} }
.audio-device-selector {
display: flex;
flex-direction: column;
gap: 0.75rem;
}
.audio-device-selector label {
display: flex;
flex-direction: column;
gap: 0.375rem;
}
.audio-device-selector label span {
font-size: 0.8125rem;
font-weight: 500;
color: var(--text-secondary);
}
.audio-device-selector select {
padding: 0.5rem 0.625rem;
border-radius: 8px;
border: 1px solid var(--border);
background: var(--bg-tertiary);
color: var(--text-primary);
font-size: 0.875rem;
cursor: pointer;
}
.audio-device-status {
font-size: 0.75rem;
display: flex;
align-items: center;
gap: 0.375rem;
}
.audio-device-status::before {
content: '';
display: inline-block;
width: 6px;
height: 6px;
border-radius: 50%;
}
.audio-device-status.active {
color: var(--accent);
}
.audio-device-status.active::before {
background: var(--accent);
}
.audio-device-status.available {
color: var(--text-secondary);
}
.audio-device-status.available::before {
background: var(--text-muted);
}
.audio-device-status.unavailable {
color: var(--text-muted);
}
.audio-device-status.unavailable::before {
background: var(--text-muted);
opacity: 0.5;
}
/* Link card in Quick Access */ /* Link card in Quick Access */
.link-card { .link-card {
text-decoration: none; text-decoration: none;

View File

@@ -131,6 +131,7 @@
<div class="album-art-container"> <div class="album-art-container">
<img id="album-art-glow" class="album-art-glow" src="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 300 300'%3E%3Crect fill='%23282828' width='300' height='300'/%3E%3C/svg%3E" alt="" aria-hidden="true"> <img id="album-art-glow" class="album-art-glow" src="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 300 300'%3E%3Crect fill='%23282828' width='300' height='300'/%3E%3C/svg%3E" alt="" aria-hidden="true">
<img id="album-art" src="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 300 300'%3E%3Crect fill='%23282828' width='300' height='300'/%3E%3Cpath fill='%236a6a6a' d='M150 80c-38.66 0-70 31.34-70 70s31.34 70 70 70 70-31.34 70-70-31.34-70-70-70zm0 20c27.614 0 50 22.386 50 50s-22.386 50-50 50-50-22.386-50-50 22.386-50 50-50zm0 30a20 20 0 100 40 20 20 0 000-40z'/%3E%3C/svg%3E" alt="Album Art"> <img id="album-art" src="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 300 300'%3E%3Crect fill='%23282828' width='300' height='300'/%3E%3Cpath fill='%236a6a6a' d='M150 80c-38.66 0-70 31.34-70 70s31.34 70 70 70 70-31.34 70-70-31.34-70-70-70zm0 20c27.614 0 50 22.386 50 50s-22.386 50-50 50-50-22.386-50-50 22.386-50 50-50zm0 30a20 20 0 100 40 20 20 0 000-40z'/%3E%3C/svg%3E" alt="Album Art">
<canvas id="spectrogram-canvas" class="spectrogram-canvas" width="300" height="64"></canvas>
</div> </div>
<div class="player-details"> <div class="player-details">
@@ -189,6 +190,9 @@
<button class="vinyl-toggle-btn" onclick="toggleVinylMode()" id="vinylToggle" data-i18n-title="player.vinyl" title="Vinyl mode"> <button class="vinyl-toggle-btn" onclick="toggleVinylMode()" id="vinylToggle" data-i18n-title="player.vinyl" title="Vinyl mode">
<svg viewBox="0 0 24 24" width="16" height="16"><circle cx="12" cy="12" r="10" fill="none" stroke="currentColor" stroke-width="1.5"/><circle cx="12" cy="12" r="3" fill="currentColor"/><circle cx="12" cy="12" r="6.5" fill="none" stroke="currentColor" stroke-width="0.5" opacity="0.5"/></svg> <svg viewBox="0 0 24 24" width="16" height="16"><circle cx="12" cy="12" r="10" fill="none" stroke="currentColor" stroke-width="1.5"/><circle cx="12" cy="12" r="3" fill="currentColor"/><circle cx="12" cy="12" r="6.5" fill="none" stroke="currentColor" stroke-width="0.5" opacity="0.5"/></svg>
</button> </button>
<button class="vinyl-toggle-btn" onclick="toggleVisualizer()" id="visualizerToggle" data-i18n-title="player.visualizer" title="Audio visualizer" style="display:none">
<svg viewBox="0 0 24 24" width="16" height="16"><path fill="currentColor" d="M3 18h2v-8H3v8zm4 0h2V6H7v12zm4 0h2V2h-2v16zm4 0h2v-6h-2v6zm4 0h2V9h-2v9z"/></svg>
</button>
</div> </div>
</div> </div>
</div> </div>
@@ -273,6 +277,24 @@
<!-- Settings Section (Scripts, Callbacks, Links management) --> <!-- Settings Section (Scripts, Callbacks, Links management) -->
<div class="settings-container" data-tab-content="settings" role="tabpanel" id="panel-settings"> <div class="settings-container" data-tab-content="settings" role="tabpanel" id="panel-settings">
<details class="settings-section" open id="audioDeviceSection" style="display: none;">
<summary data-i18n="settings.section.audio">Audio</summary>
<div class="settings-section-content">
<p class="settings-section-description" data-i18n="settings.audio.description">
Select which audio output device to capture for the visualizer.
</p>
<div class="audio-device-selector">
<label>
<span data-i18n="settings.audio.device">Loopback Device</span>
<select id="audioDeviceSelect" onchange="onAudioDeviceChanged()">
<option value="" data-i18n="settings.audio.auto">Auto-detect</option>
</select>
</label>
<div class="audio-device-status" id="audioDeviceStatus"></div>
</div>
</div>
</details>
<details class="settings-section" open> <details class="settings-section" open>
<summary data-i18n="settings.section.scripts">Scripts</summary> <summary data-i18n="settings.section.scripts">Scripts</summary>
<div class="settings-section-content"> <div class="settings-section-content">

View File

@@ -318,6 +318,260 @@
} }
} }
// Audio Visualizer
let visualizerEnabled = localStorage.getItem('visualizerEnabled') === 'true';
let visualizerAvailable = false;
let visualizerCtx = null;
let visualizerAnimFrame = null;
let frequencyData = null;
let smoothedFrequencies = null;
const VISUALIZER_SMOOTHING = 0.65;
async function checkVisualizerAvailability() {
try {
const token = localStorage.getItem('media_server_token');
const resp = await fetch('/api/media/visualizer/status', {
headers: { 'Authorization': `Bearer ${token}` }
});
if (resp.ok) {
const data = await resp.json();
visualizerAvailable = data.available && data.running;
}
} catch (e) {
visualizerAvailable = false;
}
const btn = document.getElementById('visualizerToggle');
if (btn) btn.style.display = visualizerAvailable ? '' : 'none';
}
function toggleVisualizer() {
visualizerEnabled = !visualizerEnabled;
localStorage.setItem('visualizerEnabled', visualizerEnabled);
applyVisualizerMode();
}
function applyVisualizerMode() {
const container = document.querySelector('.album-art-container');
const btn = document.getElementById('visualizerToggle');
if (!container) return;
if (visualizerEnabled && visualizerAvailable) {
container.classList.add('visualizer-active');
if (btn) btn.classList.add('active');
if (ws && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'enable_visualizer' }));
}
initVisualizerCanvas();
startVisualizerRender();
} else {
container.classList.remove('visualizer-active');
if (btn) btn.classList.remove('active');
if (ws && ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'disable_visualizer' }));
}
stopVisualizerRender();
}
}
function initVisualizerCanvas() {
const canvas = document.getElementById('spectrogram-canvas');
if (!canvas) return;
visualizerCtx = canvas.getContext('2d');
canvas.width = 300;
canvas.height = 64;
}
function startVisualizerRender() {
if (visualizerAnimFrame) return;
renderVisualizerFrame();
}
function stopVisualizerRender() {
if (visualizerAnimFrame) {
cancelAnimationFrame(visualizerAnimFrame);
visualizerAnimFrame = null;
}
const canvas = document.getElementById('spectrogram-canvas');
if (visualizerCtx && canvas) {
visualizerCtx.clearRect(0, 0, canvas.width, canvas.height);
}
// Reset album art / vinyl pulse
const art = document.getElementById('album-art');
if (art) {
art.style.transform = '';
art.style.removeProperty('--vinyl-scale');
}
const glow = document.getElementById('album-art-glow');
if (glow) glow.style.opacity = '';
frequencyData = null;
smoothedFrequencies = null;
}
function renderVisualizerFrame() {
visualizerAnimFrame = requestAnimationFrame(renderVisualizerFrame);
const canvas = document.getElementById('spectrogram-canvas');
if (!frequencyData || !visualizerCtx || !canvas) return;
const bins = frequencyData.frequencies;
const numBins = bins.length;
const w = canvas.width;
const h = canvas.height;
const gap = 2;
const barWidth = (w / numBins) - gap;
const accent = getComputedStyle(document.documentElement)
.getPropertyValue('--accent').trim();
// Smooth transitions
if (!smoothedFrequencies || smoothedFrequencies.length !== numBins) {
smoothedFrequencies = new Array(numBins).fill(0);
}
for (let i = 0; i < numBins; i++) {
smoothedFrequencies[i] = smoothedFrequencies[i] * VISUALIZER_SMOOTHING
+ bins[i] * (1 - VISUALIZER_SMOOTHING);
}
visualizerCtx.clearRect(0, 0, w, h);
for (let i = 0; i < numBins; i++) {
const barHeight = Math.max(1, smoothedFrequencies[i] * h);
const x = i * (barWidth + gap) + gap / 2;
const y = h - barHeight;
const grad = visualizerCtx.createLinearGradient(x, y, x, h);
grad.addColorStop(0, accent);
grad.addColorStop(1, accent + '30');
visualizerCtx.fillStyle = grad;
visualizerCtx.beginPath();
visualizerCtx.roundRect(x, y, barWidth, barHeight, 1.5);
visualizerCtx.fill();
}
// Album art / vinyl pulse based on bass energy
const bass = frequencyData.bass || 0;
const scale = 1 + bass * 0.03;
const art = document.getElementById('album-art');
if (art) {
if (vinylMode) {
// Use CSS custom property so it composes with the rotation animation
art.style.setProperty('--vinyl-scale', scale);
} else {
art.style.transform = `scale(${scale})`;
}
}
const glow = document.getElementById('album-art-glow');
if (glow) {
glow.style.opacity = (0.5 + bass * 0.3).toFixed(2);
}
}
// Audio device selection
async function loadAudioDevices() {
const section = document.getElementById('audioDeviceSection');
const select = document.getElementById('audioDeviceSelect');
if (!section || !select) return;
try {
const token = localStorage.getItem('media_server_token');
// Fetch devices and current status in parallel
const [devicesResp, statusResp] = await Promise.all([
fetch('/api/media/visualizer/devices', {
headers: { 'Authorization': `Bearer ${token}` }
}),
fetch('/api/media/visualizer/status', {
headers: { 'Authorization': `Bearer ${token}` }
})
]);
if (!devicesResp.ok || !statusResp.ok) return;
const devices = await devicesResp.json();
const status = await statusResp.json();
if (!status.available && devices.length === 0) {
section.style.display = 'none';
return;
}
// Show section
section.style.display = '';
// Populate dropdown (keep auto-detect as first option)
while (select.options.length > 1) select.remove(1);
for (const dev of devices) {
const opt = document.createElement('option');
opt.value = dev.name;
opt.textContent = dev.name;
select.appendChild(opt);
}
// Select current device
if (status.current_device) {
for (let i = 0; i < select.options.length; i++) {
if (select.options[i].value === status.current_device) {
select.selectedIndex = i;
break;
}
}
}
// Update status indicator
updateAudioDeviceStatus(status);
} catch (e) {
// Silently hide if unavailable
section.style.display = 'none';
}
}
function updateAudioDeviceStatus(status) {
const el = document.getElementById('audioDeviceStatus');
if (!el) return;
if (status.running) {
el.className = 'audio-device-status active';
el.textContent = t('settings.audio.status_active');
} else if (status.available) {
el.className = 'audio-device-status available';
el.textContent = t('settings.audio.status_available');
} else {
el.className = 'audio-device-status unavailable';
el.textContent = t('settings.audio.status_unavailable');
}
}
async function onAudioDeviceChanged() {
const select = document.getElementById('audioDeviceSelect');
if (!select) return;
const deviceName = select.value || null;
const token = localStorage.getItem('media_server_token');
try {
const resp = await fetch('/api/media/visualizer/device', {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({ device_name: deviceName })
});
if (resp.ok) {
const result = await resp.json();
updateAudioDeviceStatus(result);
// Re-check visualizer availability since device changed
await checkVisualizerAvailability();
if (visualizerEnabled) applyVisualizerMode();
showToast(t('settings.audio.device_changed'), 'success');
} else {
showToast(t('settings.audio.device_change_failed'), 'error');
}
} catch (e) {
showToast(t('settings.audio.device_change_failed'), 'error');
}
}
// Locale management // Locale management
let currentLocale = 'en'; let currentLocale = 'en';
let translations = {}; let translations = {};
@@ -574,6 +828,13 @@
// Initialize vinyl mode // Initialize vinyl mode
applyVinylMode(); applyVinylMode();
// Initialize audio visualizer
checkVisualizerAvailability().then(() => {
if (visualizerEnabled && visualizerAvailable) {
applyVisualizerMode();
}
});
// Initialize locale (async - loads JSON file) // Initialize locale (async - loads JSON file)
await initLocale(); await initLocale();
@@ -587,6 +848,7 @@
loadScriptsTable(); loadScriptsTable();
loadCallbacksTable(); loadCallbacksTable();
loadLinksTable(); loadLinksTable();
loadAudioDevices();
} else { } else {
showAuthForm(); showAuthForm();
} }
@@ -897,6 +1159,11 @@
loadCallbacksTable(); loadCallbacksTable();
loadLinksTable(); loadLinksTable();
loadHeaderLinks(); loadHeaderLinks();
loadAudioDevices();
// Re-enable visualizer subscription on reconnect
if (visualizerEnabled && visualizerAvailable) {
ws.send(JSON.stringify({ type: 'enable_visualizer' }));
}
}; };
ws.onmessage = (event) => { ws.onmessage = (event) => {
@@ -913,6 +1180,8 @@
loadHeaderLinks(); loadHeaderLinks();
loadLinksTable(); loadLinksTable();
displayQuickAccess(); displayQuickAccess();
} else if (msg.type === 'audio_data') {
frequencyData = msg.data;
} else if (msg.type === 'error') { } else if (msg.type === 'error') {
console.error('WebSocket error:', msg.message); console.error('WebSocket error:', msg.message);
} }

View File

@@ -23,6 +23,7 @@
"player.source": "Source:", "player.source": "Source:",
"player.unknown_source": "Unknown", "player.unknown_source": "Unknown",
"player.vinyl": "Vinyl mode", "player.vinyl": "Vinyl mode",
"player.visualizer": "Audio visualizer",
"state.playing": "Playing", "state.playing": "Playing",
"state.paused": "Paused", "state.paused": "Paused",
"state.stopped": "Stopped", "state.stopped": "Stopped",
@@ -123,6 +124,15 @@
"settings.section.scripts": "Scripts", "settings.section.scripts": "Scripts",
"settings.section.callbacks": "Callbacks", "settings.section.callbacks": "Callbacks",
"settings.section.links": "Links", "settings.section.links": "Links",
"settings.section.audio": "Audio",
"settings.audio.description": "Select which audio output device to capture for the visualizer.",
"settings.audio.device": "Loopback Device",
"settings.audio.auto": "Auto-detect",
"settings.audio.status_active": "Capturing audio",
"settings.audio.status_available": "Available, not capturing",
"settings.audio.status_unavailable": "Unavailable",
"settings.audio.device_changed": "Audio device changed",
"settings.audio.device_change_failed": "Failed to change audio device",
"quick_access.no_items": "No quick actions or links configured", "quick_access.no_items": "No quick actions or links configured",
"display.loading": "Loading monitors...", "display.loading": "Loading monitors...",
"display.error": "Failed to load monitors", "display.error": "Failed to load monitors",

View File

@@ -23,6 +23,7 @@
"player.source": "Источник:", "player.source": "Источник:",
"player.unknown_source": "Неизвестно", "player.unknown_source": "Неизвестно",
"player.vinyl": "Режим винила", "player.vinyl": "Режим винила",
"player.visualizer": "Аудио визуализатор",
"state.playing": "Воспроизведение", "state.playing": "Воспроизведение",
"state.paused": "Пауза", "state.paused": "Пауза",
"state.stopped": "Остановлено", "state.stopped": "Остановлено",
@@ -123,6 +124,15 @@
"settings.section.scripts": "Скрипты", "settings.section.scripts": "Скрипты",
"settings.section.callbacks": "Колбэки", "settings.section.callbacks": "Колбэки",
"settings.section.links": "Ссылки", "settings.section.links": "Ссылки",
"settings.section.audio": "Аудио",
"settings.audio.description": "Выберите аудиоустройство для захвата звука визуализатора.",
"settings.audio.device": "Устройство захвата",
"settings.audio.auto": "Автоопределение",
"settings.audio.status_active": "Захват аудио",
"settings.audio.status_available": "Доступно, не захватывает",
"settings.audio.status_unavailable": "Недоступно",
"settings.audio.device_changed": "Аудиоустройство изменено",
"settings.audio.device_change_failed": "Не удалось изменить аудиоустройство",
"quick_access.no_items": "Быстрые действия и ссылки не настроены", "quick_access.no_items": "Быстрые действия и ссылки не настроены",
"display.loading": "Загрузка мониторов...", "display.loading": "Загрузка мониторов...",
"display.error": "Не удалось загрузить мониторы", "display.error": "Не удалось загрузить мониторы",

View File

@@ -43,6 +43,10 @@ windows = [
"screen-brightness-control>=0.20.0", "screen-brightness-control>=0.20.0",
"monitorcontrol>=3.0.0", "monitorcontrol>=3.0.0",
] ]
visualizer = [
"soundcard>=0.4.0",
"numpy>=1.24.0",
]
dev = [ dev = [
"pytest>=7.0", "pytest>=7.0",
"pytest-asyncio>=0.21", "pytest-asyncio>=0.21",