Add value source test modal, auto-gain, brightness always-show, shared value streams

- Add real-time value source test: WebSocket endpoint streams get_value() at
  ~20Hz, frontend renders scrolling time-series chart with min/max/current stats
- Add auto-gain for audio value sources: rolling peak normalization with slow
  decay, sensitivity range increased to 0.1-20.0
- Always show brightness overlay on LED preview when brightness source is set
- Refactor ValueStreamManager to shared ref-counted streams (value streams
  produce scalars, not LED-count-dependent, so sharing is correct)
- Simplify acquire/release API: remove consumer_id parameter since streams
  are no longer consumer-dependent

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 15:48:45 +03:00
parent a164abe774
commit 88b3ecd5e1
18 changed files with 477 additions and 56 deletions

View File

@@ -1,8 +1,10 @@
"""Value source routes: CRUD for value sources.""" """Value source routes: CRUD for value sources."""
import asyncio
import secrets
from typing import Optional from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, Query from fastapi import APIRouter, Depends, HTTPException, Query, WebSocket, WebSocketDisconnect
from wled_controller.api.auth import AuthRequired from wled_controller.api.auth import AuthRequired
from wled_controller.api.dependencies import ( from wled_controller.api.dependencies import (
@@ -10,6 +12,7 @@ from wled_controller.api.dependencies import (
get_processor_manager, get_processor_manager,
get_value_source_store, get_value_source_store,
) )
from wled_controller.config import get_config
from wled_controller.api.schemas.value_sources import ( from wled_controller.api.schemas.value_sources import (
ValueSourceCreate, ValueSourceCreate,
ValueSourceListResponse, ValueSourceListResponse,
@@ -43,6 +46,7 @@ def _to_response(source: ValueSource) -> ValueSourceResponse:
mode=d.get("mode"), mode=d.get("mode"),
sensitivity=d.get("sensitivity"), sensitivity=d.get("sensitivity"),
smoothing=d.get("smoothing"), smoothing=d.get("smoothing"),
auto_gain=d.get("auto_gain"),
schedule=d.get("schedule"), schedule=d.get("schedule"),
picture_source_id=d.get("picture_source_id"), picture_source_id=d.get("picture_source_id"),
scene_behavior=d.get("scene_behavior"), scene_behavior=d.get("scene_behavior"),
@@ -92,6 +96,7 @@ async def create_value_source(
schedule=data.schedule, schedule=data.schedule,
picture_source_id=data.picture_source_id, picture_source_id=data.picture_source_id,
scene_behavior=data.scene_behavior, scene_behavior=data.scene_behavior,
auto_gain=data.auto_gain,
) )
return _to_response(source) return _to_response(source)
except ValueError as e: except ValueError as e:
@@ -138,6 +143,7 @@ async def update_value_source(
schedule=data.schedule, schedule=data.schedule,
picture_source_id=data.picture_source_id, picture_source_id=data.picture_source_id,
scene_behavior=data.scene_behavior, scene_behavior=data.scene_behavior,
auto_gain=data.auto_gain,
) )
# Hot-reload running value streams # Hot-reload running value streams
pm.update_value_source(source_id) pm.update_value_source(source_id)
@@ -168,3 +174,68 @@ async def delete_value_source(
return {"status": "deleted", "id": source_id} return {"status": "deleted", "id": source_id}
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))
# ===== REAL-TIME VALUE SOURCE TEST WEBSOCKET =====
@router.websocket("/api/v1/value-sources/{source_id}/test/ws")
async def test_value_source_ws(
websocket: WebSocket,
source_id: str,
token: str = Query(""),
):
"""WebSocket for real-time value source output. Auth via ?token=<api_key>.
Acquires a ValueStream for the given source, polls get_value() at ~20 Hz,
and streams {value: float} JSON to the client.
"""
# Authenticate
authenticated = False
cfg = get_config()
if token and cfg.auth.api_keys:
for _label, api_key in cfg.auth.api_keys.items():
if secrets.compare_digest(token, api_key):
authenticated = True
break
if not authenticated:
await websocket.close(code=4001, reason="Unauthorized")
return
# Validate source exists
store = get_value_source_store()
try:
store.get_source(source_id)
except ValueError as e:
await websocket.close(code=4004, reason=str(e))
return
# Acquire a value stream
manager = get_processor_manager()
vsm = manager.value_stream_manager
if vsm is None:
await websocket.close(code=4003, reason="Value stream manager not available")
return
try:
stream = vsm.acquire(source_id)
except Exception as e:
await websocket.close(code=4003, reason=str(e))
return
await websocket.accept()
logger.info(f"Value source test WebSocket connected for {source_id}")
try:
while True:
value = stream.get_value()
await websocket.send_json({"value": round(value, 4)})
await asyncio.sleep(0.05)
except WebSocketDisconnect:
pass
except Exception as e:
logger.error(f"Value source test WebSocket error for {source_id}: {e}")
finally:
vsm.release(source_id)
logger.info(f"Value source test WebSocket disconnected for {source_id}")

View File

@@ -21,8 +21,9 @@ class ValueSourceCreate(BaseModel):
# audio fields # audio fields
audio_source_id: Optional[str] = Field(None, description="Mono audio source ID") audio_source_id: Optional[str] = Field(None, description="Mono audio source ID")
mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat") mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat")
sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-5.0)", ge=0.1, le=5.0) sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-20.0)", ge=0.1, le=20.0)
smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0) smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0)
auto_gain: Optional[bool] = Field(None, description="Auto-normalize audio levels to full range")
# adaptive fields # adaptive fields
schedule: Optional[list] = Field(None, description="Time-of-day schedule: [{time: 'HH:MM', value: 0.0-1.0}]") schedule: Optional[list] = Field(None, description="Time-of-day schedule: [{time: 'HH:MM', value: 0.0-1.0}]")
picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode") picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode")
@@ -44,8 +45,9 @@ class ValueSourceUpdate(BaseModel):
# audio fields # audio fields
audio_source_id: Optional[str] = Field(None, description="Mono audio source ID") audio_source_id: Optional[str] = Field(None, description="Mono audio source ID")
mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat") mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat")
sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-5.0)", ge=0.1, le=5.0) sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-20.0)", ge=0.1, le=20.0)
smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0) smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0)
auto_gain: Optional[bool] = Field(None, description="Auto-normalize audio levels to full range")
# adaptive fields # adaptive fields
schedule: Optional[list] = Field(None, description="Time-of-day schedule") schedule: Optional[list] = Field(None, description="Time-of-day schedule")
picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode") picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode")
@@ -68,6 +70,7 @@ class ValueSourceResponse(BaseModel):
mode: Optional[str] = Field(None, description="Audio mode") mode: Optional[str] = Field(None, description="Audio mode")
sensitivity: Optional[float] = Field(None, description="Gain multiplier") sensitivity: Optional[float] = Field(None, description="Gain multiplier")
smoothing: Optional[float] = Field(None, description="Temporal smoothing") smoothing: Optional[float] = Field(None, description="Temporal smoothing")
auto_gain: Optional[bool] = Field(None, description="Auto-normalize audio levels")
schedule: Optional[list] = Field(None, description="Time-of-day schedule") schedule: Optional[list] = Field(None, description="Time-of-day schedule")
picture_source_id: Optional[str] = Field(None, description="Picture source ID") picture_source_id: Optional[str] = Field(None, description="Picture source ID")
scene_behavior: Optional[str] = Field(None, description="Scene behavior") scene_behavior: Optional[str] = Field(None, description="Scene behavior")

View File

@@ -162,7 +162,7 @@ class KCTargetProcessor(TargetProcessor):
if self._brightness_vs_id and self._ctx.value_stream_manager: if self._brightness_vs_id and self._ctx.value_stream_manager:
try: try:
self._value_stream = self._ctx.value_stream_manager.acquire( self._value_stream = self._ctx.value_stream_manager.acquire(
self._brightness_vs_id, self._target_id self._brightness_vs_id
) )
except Exception as e: except Exception as e:
logger.warning(f"Failed to acquire value stream {self._brightness_vs_id}: {e}") logger.warning(f"Failed to acquire value stream {self._brightness_vs_id}: {e}")
@@ -207,7 +207,7 @@ class KCTargetProcessor(TargetProcessor):
# Release value stream # Release value stream
if self._value_stream is not None and self._ctx.value_stream_manager: if self._value_stream is not None and self._ctx.value_stream_manager:
try: try:
self._ctx.value_stream_manager.release(self._brightness_vs_id, self._target_id) self._ctx.value_stream_manager.release(self._brightness_vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Error releasing value stream: {e}") logger.warning(f"Error releasing value stream: {e}")
self._value_stream = None self._value_stream = None
@@ -235,7 +235,7 @@ class KCTargetProcessor(TargetProcessor):
# Release old stream # Release old stream
if self._value_stream is not None and old_vs_id: if self._value_stream is not None and old_vs_id:
try: try:
vs_mgr.release(old_vs_id, self._target_id) vs_mgr.release(old_vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Error releasing old value stream {old_vs_id}: {e}") logger.warning(f"Error releasing old value stream {old_vs_id}: {e}")
self._value_stream = None self._value_stream = None
@@ -243,7 +243,7 @@ class KCTargetProcessor(TargetProcessor):
# Acquire new stream # Acquire new stream
if vs_id: if vs_id:
try: try:
self._value_stream = vs_mgr.acquire(vs_id, self._target_id) self._value_stream = vs_mgr.acquire(vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Failed to acquire value stream {vs_id}: {e}") logger.warning(f"Failed to acquire value stream {vs_id}: {e}")
self._value_stream = None self._value_stream = None

View File

@@ -109,6 +109,10 @@ class ProcessorManager:
def audio_capture_manager(self) -> AudioCaptureManager: def audio_capture_manager(self) -> AudioCaptureManager:
return self._audio_capture_manager return self._audio_capture_manager
@property
def value_stream_manager(self) -> Optional[ValueStreamManager]:
return self._value_stream_manager
@property @property
def metrics_history(self) -> MetricsHistory: def metrics_history(self) -> MetricsHistory:
return self._metrics_history return self._metrics_history

View File

@@ -158,6 +158,7 @@ class AudioValueStream(ValueStream):
smoothing: float = 0.3, smoothing: float = 0.3,
min_value: float = 0.0, min_value: float = 0.0,
max_value: float = 1.0, max_value: float = 1.0,
auto_gain: bool = False,
audio_capture_manager: Optional["AudioCaptureManager"] = None, audio_capture_manager: Optional["AudioCaptureManager"] = None,
audio_source_store: Optional["AudioSourceStore"] = None, audio_source_store: Optional["AudioSourceStore"] = None,
audio_template_store=None, audio_template_store=None,
@@ -168,6 +169,9 @@ class AudioValueStream(ValueStream):
self._smoothing = smoothing self._smoothing = smoothing
self._min = min_value self._min = min_value
self._max = max_value self._max = max_value
self._auto_gain = auto_gain
self._rolling_peak = 0.0 # tracks observed max raw audio value
self._rolling_decay = 0.995 # slow decay (~5-10s adaptation)
self._audio_capture_manager = audio_capture_manager self._audio_capture_manager = audio_capture_manager
self._audio_source_store = audio_source_store self._audio_source_store = audio_source_store
self._audio_template_store = audio_template_store self._audio_template_store = audio_template_store
@@ -237,6 +241,13 @@ class AudioValueStream(ValueStream):
return self._prev_value return self._prev_value
raw = self._extract_raw(analysis) raw = self._extract_raw(analysis)
# Auto-gain: normalize raw against rolling observed peak
if self._auto_gain:
self._rolling_peak = max(raw, self._rolling_peak * self._rolling_decay)
if self._rolling_peak > 0.001:
raw = raw / self._rolling_peak
raw = min(1.0, raw * self._sensitivity) raw = min(1.0, raw * self._sensitivity)
# Temporal smoothing # Temporal smoothing
@@ -284,12 +295,18 @@ class AudioValueStream(ValueStream):
return return
old_source_id = self._audio_source_id old_source_id = self._audio_source_id
old_auto_gain = self._auto_gain
self._audio_source_id = source.audio_source_id self._audio_source_id = source.audio_source_id
self._mode = source.mode self._mode = source.mode
self._sensitivity = source.sensitivity self._sensitivity = source.sensitivity
self._smoothing = source.smoothing self._smoothing = source.smoothing
self._min = source.min_value self._min = source.min_value
self._max = source.max_value self._max = source.max_value
self._auto_gain = source.auto_gain
# Reset rolling peak when auto-gain is toggled on
if self._auto_gain and not old_auto_gain:
self._rolling_peak = 0.0
# If audio source changed, re-resolve and swap capture stream # If audio source changed, re-resolve and swap capture stream
if source.audio_source_id != old_source_id: if source.audio_source_id != old_source_id:
@@ -525,15 +542,13 @@ class SceneValueStream(ValueStream):
# Manager # Manager
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
def _make_key(vs_id: str, consumer_id: str) -> str:
return f"{vs_id}:{consumer_id}"
class ValueStreamManager: class ValueStreamManager:
"""Owns running ValueStream instances, keyed by ``vs_id:consumer_id``. """Owns running ValueStream instances, shared and ref-counted by vs_id.
Each consumer (target processor) gets its own stream instance — Value streams produce scalars (not LED-count-dependent), so a single
no sharing or ref-counting needed since streams are cheap. stream instance is shared across all consumers that use the same
ValueSource. Ref-counting ensures the stream is stopped only when
the last consumer releases it.
""" """
def __init__( def __init__(
@@ -549,59 +564,66 @@ class ValueStreamManager:
self._audio_source_store = audio_source_store self._audio_source_store = audio_source_store
self._live_stream_manager = live_stream_manager self._live_stream_manager = live_stream_manager
self._audio_template_store = audio_template_store self._audio_template_store = audio_template_store
self._streams: Dict[str, ValueStream] = {} self._streams: Dict[str, ValueStream] = {} # vs_id → stream
self._ref_counts: Dict[str, int] = {} # vs_id → ref count
def acquire(self, vs_id: str, consumer_id: str) -> ValueStream: def acquire(self, vs_id: str) -> ValueStream:
"""Create and start a ValueStream for the given ValueSource. """Get or create a shared ValueStream for the given ValueSource.
Args: Increments the ref count. The stream is stopped only when all
vs_id: ID of the ValueSource config consumers have called :meth:`release`.
consumer_id: Unique consumer identifier (target_id)
Returns:
Running ValueStream instance
""" """
key = _make_key(vs_id, consumer_id) if vs_id in self._streams:
if key in self._streams: self._ref_counts[vs_id] += 1
return self._streams[key] logger.info(f"Shared value stream {vs_id} (refs={self._ref_counts[vs_id]})")
return self._streams[vs_id]
source = self._value_source_store.get_source(vs_id) source = self._value_source_store.get_source(vs_id)
stream = self._create_stream(source) stream = self._create_stream(source)
stream.start() stream.start()
self._streams[key] = stream self._streams[vs_id] = stream
logger.info(f"Acquired value stream {key} (type={source.source_type})") self._ref_counts[vs_id] = 1
logger.info(f"Acquired value stream {vs_id} (type={source.source_type})")
return stream return stream
def release(self, vs_id: str, consumer_id: str) -> None: def release(self, vs_id: str) -> None:
"""Stop and remove a ValueStream.""" """Decrement ref count; stop the stream when it reaches zero."""
key = _make_key(vs_id, consumer_id) if vs_id not in self._ref_counts:
stream = self._streams.pop(key, None) return
if stream:
stream.stop() self._ref_counts[vs_id] -= 1
logger.info(f"Released value stream {key}") refs = self._ref_counts[vs_id]
if refs <= 0:
stream = self._streams.pop(vs_id, None)
if stream:
stream.stop()
del self._ref_counts[vs_id]
logger.info(f"Released value stream {vs_id} (last ref)")
else:
logger.info(f"Released ref for value stream {vs_id} (refs={refs})")
def update_source(self, vs_id: str) -> None: def update_source(self, vs_id: str) -> None:
"""Hot-update all running streams that use the given ValueSource.""" """Hot-update the shared stream for the given ValueSource."""
try: try:
source = self._value_source_store.get_source(vs_id) source = self._value_source_store.get_source(vs_id)
except ValueError: except ValueError:
return return
prefix = f"{vs_id}:" stream = self._streams.get(vs_id)
for key, stream in self._streams.items(): if stream:
if key.startswith(prefix): stream.update_source(source)
stream.update_source(source) logger.debug(f"Updated value stream {vs_id}")
logger.debug(f"Updated running value streams for source {vs_id}")
def release_all(self) -> None: def release_all(self) -> None:
"""Stop and remove all managed streams. Called on shutdown.""" """Stop and remove all managed streams. Called on shutdown."""
for key, stream in self._streams.items(): for vs_id, stream in self._streams.items():
try: try:
stream.stop() stream.stop()
except Exception as e: except Exception as e:
logger.error(f"Error stopping value stream {key}: {e}") logger.error(f"Error stopping value stream {vs_id}: {e}")
self._streams.clear() self._streams.clear()
self._ref_counts.clear()
logger.info("Released all value streams") logger.info("Released all value streams")
def _create_stream(self, source: "ValueSource") -> ValueStream: def _create_stream(self, source: "ValueSource") -> ValueStream:
@@ -632,6 +654,7 @@ class ValueStreamManager:
smoothing=source.smoothing, smoothing=source.smoothing,
min_value=source.min_value, min_value=source.min_value,
max_value=source.max_value, max_value=source.max_value,
auto_gain=source.auto_gain,
audio_capture_manager=self._audio_capture_manager, audio_capture_manager=self._audio_capture_manager,
audio_source_store=self._audio_source_store, audio_source_store=self._audio_source_store,
audio_template_store=self._audio_template_store, audio_template_store=self._audio_template_store,

View File

@@ -136,7 +136,7 @@ class WledTargetProcessor(TargetProcessor):
if self._brightness_vs_id and self._ctx.value_stream_manager: if self._brightness_vs_id and self._ctx.value_stream_manager:
try: try:
self._value_stream = self._ctx.value_stream_manager.acquire( self._value_stream = self._ctx.value_stream_manager.acquire(
self._brightness_vs_id, self._target_id self._brightness_vs_id
) )
except Exception as e: except Exception as e:
logger.warning(f"Failed to acquire value stream {self._brightness_vs_id}: {e}") logger.warning(f"Failed to acquire value stream {self._brightness_vs_id}: {e}")
@@ -190,7 +190,7 @@ class WledTargetProcessor(TargetProcessor):
# Release value stream # Release value stream
if self._value_stream is not None and self._ctx.value_stream_manager: if self._value_stream is not None and self._ctx.value_stream_manager:
try: try:
self._ctx.value_stream_manager.release(self._brightness_vs_id, self._target_id) self._ctx.value_stream_manager.release(self._brightness_vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Error releasing value stream: {e}") logger.warning(f"Error releasing value stream: {e}")
self._value_stream = None self._value_stream = None
@@ -270,7 +270,7 @@ class WledTargetProcessor(TargetProcessor):
# Release old stream # Release old stream
if self._value_stream is not None and old_vs_id: if self._value_stream is not None and old_vs_id:
try: try:
vs_mgr.release(old_vs_id, self._target_id) vs_mgr.release(old_vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Error releasing old value stream {old_vs_id}: {e}") logger.warning(f"Error releasing old value stream {old_vs_id}: {e}")
self._value_stream = None self._value_stream = None
@@ -278,7 +278,7 @@ class WledTargetProcessor(TargetProcessor):
# Acquire new stream # Acquire new stream
if vs_id: if vs_id:
try: try:
self._value_stream = vs_mgr.acquire(vs_id, self._target_id) self._value_stream = vs_mgr.acquire(vs_id)
except Exception as e: except Exception as e:
logger.warning(f"Failed to acquire value stream {vs_id}: {e}") logger.warning(f"Failed to acquire value stream {vs_id}: {e}")
self._value_stream = None self._value_stream = None

View File

@@ -72,6 +72,51 @@
font-size: 0.9em; font-size: 0.9em;
} }
/* Value source test chart canvas */
.vs-test-canvas {
display: block;
width: 100%;
height: 200px;
background: #111;
border-radius: 6px;
}
.vs-test-stats {
display: flex;
gap: 20px;
align-items: center;
padding: 10px 0 0;
font-family: monospace;
}
.vs-test-stat {
display: flex;
align-items: center;
gap: 6px;
}
.vs-test-stat-label {
color: var(--text-muted, #888);
font-size: 0.85em;
}
.vs-test-stat-value {
font-weight: 600;
min-width: 50px;
}
.vs-test-value-large {
font-size: 1.3em;
color: #4caf50;
}
.vs-test-status {
text-align: center;
padding: 8px 0;
color: var(--text-muted, #888);
font-size: 0.9em;
}
@keyframes fadeIn { @keyframes fadeIn {
from { opacity: 0; } from { opacity: 0; }
to { opacity: 1; } to { opacity: 1; }

View File

@@ -120,6 +120,7 @@ import {
showValueSourceModal, closeValueSourceModal, saveValueSource, showValueSourceModal, closeValueSourceModal, saveValueSource,
editValueSource, cloneValueSource, deleteValueSource, onValueSourceTypeChange, editValueSource, cloneValueSource, deleteValueSource, onValueSourceTypeChange,
addSchedulePoint, addSchedulePoint,
testValueSource, closeTestValueSourceModal,
} from './features/value-sources.js'; } from './features/value-sources.js';
// Layer 5: calibration // Layer 5: calibration
@@ -360,6 +361,8 @@ Object.assign(window, {
deleteValueSource, deleteValueSource,
onValueSourceTypeChange, onValueSourceTypeChange,
addSchedulePoint, addSchedulePoint,
testValueSource,
closeTestValueSourceModal,
// calibration // calibration
showCalibration, showCalibration,

View File

@@ -868,7 +868,7 @@ export function createTargetCard(target, deviceMap, colorStripSourceMap, valueSo
</div> </div>
<div id="led-preview-panel-${target.id}" class="led-preview-panel" style="display:${ledPreviewWebSockets[target.id] ? '' : 'none'}"> <div id="led-preview-panel-${target.id}" class="led-preview-panel" style="display:${ledPreviewWebSockets[target.id] ? '' : 'none'}">
<canvas id="led-preview-canvas-${target.id}" class="led-preview-canvas"></canvas> <canvas id="led-preview-canvas-${target.id}" class="led-preview-canvas"></canvas>
<span id="led-preview-brightness-${target.id}" class="led-preview-brightness" style="display:none"></span> <span id="led-preview-brightness-${target.id}" class="led-preview-brightness" style="display:none"${bvsId ? ' data-has-bvs="1"' : ''}></span>
</div> </div>
<div class="card-actions"> <div class="card-actions">
${isProcessing ? ` ${isProcessing ? `
@@ -1071,11 +1071,11 @@ function connectLedPreviewWS(targetId) {
_ledPreviewLastFrame[targetId] = frame; _ledPreviewLastFrame[targetId] = frame;
const canvas = document.getElementById(`led-preview-canvas-${targetId}`); const canvas = document.getElementById(`led-preview-canvas-${targetId}`);
if (canvas) _renderLedStrip(canvas, frame); if (canvas) _renderLedStrip(canvas, frame);
// Show brightness label when below 100% // Show brightness label: always when a brightness source is set, otherwise only below 100%
const bLabel = document.getElementById(`led-preview-brightness-${targetId}`); const bLabel = document.getElementById(`led-preview-brightness-${targetId}`);
if (bLabel) { if (bLabel) {
const pct = Math.round(brightness / 255 * 100); const pct = Math.round(brightness / 255 * 100);
if (pct < 100) { if (pct < 100 || bLabel.dataset.hasBvs) {
bLabel.textContent = `${pct}%`; bLabel.textContent = `${pct}%`;
bLabel.style.display = ''; bLabel.style.display = '';
} else { } else {

View File

@@ -10,12 +10,12 @@
* This module manages the editor modal and API operations. * This module manages the editor modal and API operations.
*/ */
import { _cachedValueSources, set_cachedValueSources, _cachedAudioSources, _cachedStreams } from '../core/state.js'; import { _cachedValueSources, set_cachedValueSources, _cachedAudioSources, _cachedStreams, apiKey } from '../core/state.js';
import { fetchWithAuth, escapeHtml } from '../core/api.js'; import { API_BASE, fetchWithAuth, escapeHtml } from '../core/api.js';
import { t } from '../core/i18n.js'; import { t } from '../core/i18n.js';
import { showToast, showConfirm } from '../core/ui.js'; import { showToast, showConfirm } from '../core/ui.js';
import { Modal } from '../core/modal.js'; import { Modal } from '../core/modal.js';
import { getValueSourceIcon, ICON_CLONE, ICON_EDIT } from '../core/icons.js'; import { getValueSourceIcon, ICON_CLONE, ICON_EDIT, ICON_TEST } from '../core/icons.js';
import { loadPictureSources } from './streams.js'; import { loadPictureSources } from './streams.js';
export { getValueSourceIcon }; export { getValueSourceIcon };
@@ -38,6 +38,7 @@ class ValueSourceModal extends Modal {
mode: document.getElementById('value-source-mode').value, mode: document.getElementById('value-source-mode').value,
sensitivity: document.getElementById('value-source-sensitivity').value, sensitivity: document.getElementById('value-source-sensitivity').value,
smoothing: document.getElementById('value-source-smoothing').value, smoothing: document.getElementById('value-source-smoothing').value,
autoGain: document.getElementById('value-source-auto-gain').checked,
adaptiveMin: document.getElementById('value-source-adaptive-min-value').value, adaptiveMin: document.getElementById('value-source-adaptive-min-value').value,
adaptiveMax: document.getElementById('value-source-adaptive-max-value').value, adaptiveMax: document.getElementById('value-source-adaptive-max-value').value,
pictureSource: document.getElementById('value-source-picture-source').value, pictureSource: document.getElementById('value-source-picture-source').value,
@@ -80,6 +81,7 @@ export async function showValueSourceModal(editData) {
} else if (editData.source_type === 'audio') { } else if (editData.source_type === 'audio') {
_populateAudioSourceDropdown(editData.audio_source_id || ''); _populateAudioSourceDropdown(editData.audio_source_id || '');
document.getElementById('value-source-mode').value = editData.mode || 'rms'; document.getElementById('value-source-mode').value = editData.mode || 'rms';
document.getElementById('value-source-auto-gain').checked = !!editData.auto_gain;
_setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0); _setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0);
_setSlider('value-source-smoothing', editData.smoothing ?? 0.3); _setSlider('value-source-smoothing', editData.smoothing ?? 0.3);
_setSlider('value-source-audio-min-value', editData.min_value ?? 0); _setSlider('value-source-audio-min-value', editData.min_value ?? 0);
@@ -108,6 +110,7 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-waveform').value = 'sine'; document.getElementById('value-source-waveform').value = 'sine';
_populateAudioSourceDropdown(''); _populateAudioSourceDropdown('');
document.getElementById('value-source-mode').value = 'rms'; document.getElementById('value-source-mode').value = 'rms';
document.getElementById('value-source-auto-gain').checked = false;
_setSlider('value-source-sensitivity', 1.0); _setSlider('value-source-sensitivity', 1.0);
_setSlider('value-source-smoothing', 0.3); _setSlider('value-source-smoothing', 0.3);
_setSlider('value-source-audio-min-value', 0); _setSlider('value-source-audio-min-value', 0);
@@ -181,6 +184,7 @@ export async function saveValueSource() {
} else if (sourceType === 'audio') { } else if (sourceType === 'audio') {
payload.audio_source_id = document.getElementById('value-source-audio-source').value; payload.audio_source_id = document.getElementById('value-source-audio-source').value;
payload.mode = document.getElementById('value-source-mode').value; payload.mode = document.getElementById('value-source-mode').value;
payload.auto_gain = document.getElementById('value-source-auto-gain').checked;
payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value); payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value);
payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value); payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value);
payload.min_value = parseFloat(document.getElementById('value-source-audio-min-value').value); payload.min_value = parseFloat(document.getElementById('value-source-audio-min-value').value);
@@ -272,6 +276,194 @@ export async function deleteValueSource(sourceId) {
} }
} }
// ── Value Source Test (real-time output chart) ────────────────
const VS_HISTORY_SIZE = 200;
let _testVsWs = null;
let _testVsAnimFrame = null;
let _testVsLatest = null;
let _testVsHistory = [];
let _testVsMinObserved = Infinity;
let _testVsMaxObserved = -Infinity;
const testVsModal = new Modal('test-value-source-modal', { backdrop: true, lock: true });
export function testValueSource(sourceId) {
const statusEl = document.getElementById('vs-test-status');
if (statusEl) {
statusEl.textContent = t('value_source.test.connecting');
statusEl.style.display = '';
}
// Reset state
_testVsLatest = null;
_testVsHistory = [];
_testVsMinObserved = Infinity;
_testVsMaxObserved = -Infinity;
document.getElementById('vs-test-current').textContent = '---';
document.getElementById('vs-test-min').textContent = '---';
document.getElementById('vs-test-max').textContent = '---';
testVsModal.open();
// Size canvas to container
const canvas = document.getElementById('vs-test-canvas');
_sizeVsCanvas(canvas);
// Connect WebSocket
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
const wsUrl = `${protocol}//${window.location.host}${API_BASE}/value-sources/${sourceId}/test/ws?token=${encodeURIComponent(apiKey)}`;
try {
_testVsWs = new WebSocket(wsUrl);
_testVsWs.onopen = () => {
if (statusEl) statusEl.style.display = 'none';
};
_testVsWs.onmessage = (event) => {
try {
const data = JSON.parse(event.data);
_testVsLatest = data.value;
_testVsHistory.push(data.value);
if (_testVsHistory.length > VS_HISTORY_SIZE) {
_testVsHistory.shift();
}
if (data.value < _testVsMinObserved) _testVsMinObserved = data.value;
if (data.value > _testVsMaxObserved) _testVsMaxObserved = data.value;
} catch {}
};
_testVsWs.onclose = () => {
_testVsWs = null;
};
_testVsWs.onerror = () => {
showToast(t('value_source.test.error'), 'error');
_cleanupVsTest();
};
} catch {
showToast(t('value_source.test.error'), 'error');
_cleanupVsTest();
return;
}
// Start render loop
_testVsAnimFrame = requestAnimationFrame(_renderVsTestLoop);
}
export function closeTestValueSourceModal() {
_cleanupVsTest();
testVsModal.forceClose();
}
function _cleanupVsTest() {
if (_testVsAnimFrame) {
cancelAnimationFrame(_testVsAnimFrame);
_testVsAnimFrame = null;
}
if (_testVsWs) {
_testVsWs.onclose = null;
_testVsWs.close();
_testVsWs = null;
}
_testVsLatest = null;
}
function _sizeVsCanvas(canvas) {
const rect = canvas.parentElement.getBoundingClientRect();
const dpr = window.devicePixelRatio || 1;
canvas.width = rect.width * dpr;
canvas.height = 200 * dpr;
canvas.style.height = '200px';
canvas.getContext('2d').scale(dpr, dpr);
}
function _renderVsTestLoop() {
_renderVsChart();
if (testVsModal.isOpen) {
_testVsAnimFrame = requestAnimationFrame(_renderVsTestLoop);
}
}
function _renderVsChart() {
const canvas = document.getElementById('vs-test-canvas');
if (!canvas) return;
const ctx = canvas.getContext('2d');
const dpr = window.devicePixelRatio || 1;
const w = canvas.width / dpr;
const h = canvas.height / dpr;
ctx.setTransform(dpr, 0, 0, dpr, 0, 0);
ctx.clearRect(0, 0, w, h);
// Draw horizontal guide lines at 0.0, 0.5, 1.0
ctx.strokeStyle = 'rgba(255, 255, 255, 0.1)';
ctx.setLineDash([4, 4]);
ctx.lineWidth = 1;
for (const frac of [0, 0.5, 1.0]) {
const y = h - frac * h;
ctx.beginPath();
ctx.moveTo(0, y);
ctx.lineTo(w, y);
ctx.stroke();
}
ctx.setLineDash([]);
// Draw Y-axis labels
ctx.fillStyle = 'rgba(255, 255, 255, 0.3)';
ctx.font = '10px monospace';
ctx.textAlign = 'left';
ctx.fillText('1.0', 4, 12);
ctx.fillText('0.5', 4, h / 2 - 2);
ctx.fillText('0.0', 4, h - 4);
const history = _testVsHistory;
if (history.length < 2) return;
// Draw filled area under the line
ctx.beginPath();
const stepX = w / (VS_HISTORY_SIZE - 1);
const startOffset = VS_HISTORY_SIZE - history.length;
ctx.moveTo(startOffset * stepX, h);
for (let i = 0; i < history.length; i++) {
const x = (startOffset + i) * stepX;
const y = h - history[i] * h;
ctx.lineTo(x, y);
}
ctx.lineTo((startOffset + history.length - 1) * stepX, h);
ctx.closePath();
ctx.fillStyle = 'rgba(76, 175, 80, 0.15)';
ctx.fill();
// Draw the line
ctx.beginPath();
for (let i = 0; i < history.length; i++) {
const x = (startOffset + i) * stepX;
const y = h - history[i] * h;
if (i === 0) ctx.moveTo(x, y);
else ctx.lineTo(x, y);
}
ctx.strokeStyle = '#4caf50';
ctx.lineWidth = 2;
ctx.stroke();
// Update stats
if (_testVsLatest !== null) {
document.getElementById('vs-test-current').textContent = (_testVsLatest * 100).toFixed(1) + '%';
}
if (_testVsMinObserved !== Infinity) {
document.getElementById('vs-test-min').textContent = (_testVsMinObserved * 100).toFixed(1) + '%';
}
if (_testVsMaxObserved !== -Infinity) {
document.getElementById('vs-test-max').textContent = (_testVsMaxObserved * 100).toFixed(1) + '%';
}
}
// ── Card rendering (used by streams.js) ─────────────────────── // ── Card rendering (used by streams.js) ───────────────────────
export function createValueSourceCard(src) { export function createValueSourceCard(src) {
@@ -320,6 +512,7 @@ export function createValueSourceCard(src) {
<div class="stream-card-props">${propsHtml}</div> <div class="stream-card-props">${propsHtml}</div>
${src.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(src.description)}</div>` : ''} ${src.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(src.description)}</div>` : ''}
<div class="template-card-actions"> <div class="template-card-actions">
<button class="btn btn-icon btn-secondary" onclick="testValueSource('${src.id}')" title="${t('value_source.test')}">${ICON_TEST}</button>
<button class="btn btn-icon btn-secondary" onclick="cloneValueSource('${src.id}')" title="${t('common.clone')}">${ICON_CLONE}</button> <button class="btn btn-icon btn-secondary" onclick="cloneValueSource('${src.id}')" title="${t('common.clone')}">${ICON_CLONE}</button>
<button class="btn btn-icon btn-secondary" onclick="editValueSource('${src.id}')" title="${t('common.edit')}">${ICON_EDIT}</button> <button class="btn btn-icon btn-secondary" onclick="editValueSource('${src.id}')" title="${t('common.edit')}">${ICON_EDIT}</button>
</div> </div>

View File

@@ -862,6 +862,9 @@
"value_source.mode.rms": "RMS (Volume)", "value_source.mode.rms": "RMS (Volume)",
"value_source.mode.peak": "Peak", "value_source.mode.peak": "Peak",
"value_source.mode.beat": "Beat", "value_source.mode.beat": "Beat",
"value_source.auto_gain": "Auto Gain:",
"value_source.auto_gain.hint": "Automatically normalize audio levels so output uses the full range, regardless of input volume",
"value_source.auto_gain.enable": "Enable auto-gain",
"value_source.sensitivity": "Sensitivity:", "value_source.sensitivity": "Sensitivity:",
"value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)", "value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)",
"value_source.smoothing": "Smoothing:", "value_source.smoothing": "Smoothing:",
@@ -893,6 +896,13 @@
"value_source.deleted": "Value source deleted", "value_source.deleted": "Value source deleted",
"value_source.delete.confirm": "Are you sure you want to delete this value source?", "value_source.delete.confirm": "Are you sure you want to delete this value source?",
"value_source.error.name_required": "Please enter a name", "value_source.error.name_required": "Please enter a name",
"value_source.test": "Test",
"value_source.test.title": "Test Value Source",
"value_source.test.connecting": "Connecting...",
"value_source.test.error": "Failed to connect",
"value_source.test.current": "Current",
"value_source.test.min": "Min",
"value_source.test.max": "Max",
"targets.brightness_vs": "Brightness Source:", "targets.brightness_vs": "Brightness Source:",
"targets.brightness_vs.hint": "Optional value source that dynamically controls brightness each frame (overrides device brightness)", "targets.brightness_vs.hint": "Optional value source that dynamically controls brightness each frame (overrides device brightness)",
"targets.brightness_vs.none": "None (device brightness)", "targets.brightness_vs.none": "None (device brightness)",

View File

@@ -862,6 +862,9 @@
"value_source.mode.rms": "RMS (Громкость)", "value_source.mode.rms": "RMS (Громкость)",
"value_source.mode.peak": "Пик", "value_source.mode.peak": "Пик",
"value_source.mode.beat": "Бит", "value_source.mode.beat": "Бит",
"value_source.auto_gain": "Авто-усиление:",
"value_source.auto_gain.hint": "Автоматически нормализует уровни звука, чтобы выходное значение использовало полный диапазон независимо от громкости входного сигнала",
"value_source.auto_gain.enable": "Включить авто-усиление",
"value_source.sensitivity": "Чувствительность:", "value_source.sensitivity": "Чувствительность:",
"value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)", "value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)",
"value_source.smoothing": "Сглаживание:", "value_source.smoothing": "Сглаживание:",
@@ -893,6 +896,13 @@
"value_source.deleted": "Источник значений удалён", "value_source.deleted": "Источник значений удалён",
"value_source.delete.confirm": "Удалить этот источник значений?", "value_source.delete.confirm": "Удалить этот источник значений?",
"value_source.error.name_required": "Введите название", "value_source.error.name_required": "Введите название",
"value_source.test": "Тест",
"value_source.test.title": "Тест источника значений",
"value_source.test.connecting": "Подключение...",
"value_source.test.error": "Не удалось подключиться",
"value_source.test.current": "Текущее",
"value_source.test.min": "Мин",
"value_source.test.max": "Макс",
"targets.brightness_vs": "Источник яркости:", "targets.brightness_vs": "Источник яркости:",
"targets.brightness_vs.hint": "Необязательный источник значений для динамического управления яркостью каждый кадр (переопределяет яркость устройства)", "targets.brightness_vs.hint": "Необязательный источник значений для динамического управления яркостью каждый кадр (переопределяет яркость устройства)",
"targets.brightness_vs.none": "Нет (яркость устройства)", "targets.brightness_vs.none": "Нет (яркость устройства)",

View File

@@ -862,6 +862,9 @@
"value_source.mode.rms": "RMS音量", "value_source.mode.rms": "RMS音量",
"value_source.mode.peak": "峰值", "value_source.mode.peak": "峰值",
"value_source.mode.beat": "节拍", "value_source.mode.beat": "节拍",
"value_source.auto_gain": "自动增益:",
"value_source.auto_gain.hint": "自动归一化音频电平,使输出使用完整范围,无论输入音量大小",
"value_source.auto_gain.enable": "启用自动增益",
"value_source.sensitivity": "灵敏度:", "value_source.sensitivity": "灵敏度:",
"value_source.sensitivity.hint": "音频信号的增益倍数(越高反应越灵敏)", "value_source.sensitivity.hint": "音频信号的增益倍数(越高反应越灵敏)",
"value_source.smoothing": "平滑:", "value_source.smoothing": "平滑:",
@@ -893,6 +896,13 @@
"value_source.deleted": "值源已删除", "value_source.deleted": "值源已删除",
"value_source.delete.confirm": "确定要删除此值源吗?", "value_source.delete.confirm": "确定要删除此值源吗?",
"value_source.error.name_required": "请输入名称", "value_source.error.name_required": "请输入名称",
"value_source.test": "测试",
"value_source.test.title": "测试值源",
"value_source.test.connecting": "连接中...",
"value_source.test.error": "连接失败",
"value_source.test.current": "当前",
"value_source.test.min": "最小",
"value_source.test.max": "最大",
"targets.brightness_vs": "亮度源:", "targets.brightness_vs": "亮度源:",
"targets.brightness_vs.hint": "可选的值源,每帧动态控制亮度(覆盖设备亮度)", "targets.brightness_vs.hint": "可选的值源,每帧动态控制亮度(覆盖设备亮度)",
"targets.brightness_vs.none": "无(设备亮度)", "targets.brightness_vs.none": "无(设备亮度)",

View File

@@ -45,6 +45,7 @@ class ValueSource:
"mode": None, "mode": None,
"sensitivity": None, "sensitivity": None,
"smoothing": None, "smoothing": None,
"auto_gain": None,
"schedule": None, "schedule": None,
"picture_source_id": None, "picture_source_id": None,
"scene_behavior": None, "scene_behavior": None,
@@ -93,6 +94,7 @@ class ValueSource:
smoothing=float(data.get("smoothing") or 0.3), smoothing=float(data.get("smoothing") or 0.3),
min_value=float(data.get("min_value") or 0.0), min_value=float(data.get("min_value") or 0.0),
max_value=float(data["max_value"]) if data.get("max_value") is not None else 1.0, max_value=float(data["max_value"]) if data.get("max_value") is not None else 1.0,
auto_gain=bool(data.get("auto_gain", False)),
) )
if source_type == "adaptive_time": if source_type == "adaptive_time":
@@ -171,10 +173,11 @@ class AudioValueSource(ValueSource):
audio_source_id: str = "" # references an audio source (mono or multichannel) audio_source_id: str = "" # references an audio source (mono or multichannel)
mode: str = "rms" # rms | peak | beat mode: str = "rms" # rms | peak | beat
sensitivity: float = 1.0 # gain multiplier (0.15.0) sensitivity: float = 1.0 # gain multiplier (0.120.0)
smoothing: float = 0.3 # temporal smoothing (0.01.0) smoothing: float = 0.3 # temporal smoothing (0.01.0)
min_value: float = 0.0 # minimum output (0.01.0) min_value: float = 0.0 # minimum output (0.01.0)
max_value: float = 1.0 # maximum output (0.01.0) max_value: float = 1.0 # maximum output (0.01.0)
auto_gain: bool = False # auto-normalize audio levels to full range
def to_dict(self) -> dict: def to_dict(self) -> dict:
d = super().to_dict() d = super().to_dict()
@@ -184,6 +187,7 @@ class AudioValueSource(ValueSource):
d["smoothing"] = self.smoothing d["smoothing"] = self.smoothing
d["min_value"] = self.min_value d["min_value"] = self.min_value
d["max_value"] = self.max_value d["max_value"] = self.max_value
d["auto_gain"] = self.auto_gain
return d return d

View File

@@ -105,6 +105,7 @@ class ValueSourceStore:
schedule: Optional[list] = None, schedule: Optional[list] = None,
picture_source_id: Optional[str] = None, picture_source_id: Optional[str] = None,
scene_behavior: Optional[str] = None, scene_behavior: Optional[str] = None,
auto_gain: Optional[bool] = None,
) -> ValueSource: ) -> ValueSource:
if not name or not name.strip(): if not name or not name.strip():
raise ValueError("Name is required") raise ValueError("Name is required")
@@ -144,6 +145,7 @@ class ValueSourceStore:
smoothing=smoothing if smoothing is not None else 0.3, smoothing=smoothing if smoothing is not None else 0.3,
min_value=min_value if min_value is not None else 0.0, min_value=min_value if min_value is not None else 0.0,
max_value=max_value if max_value is not None else 1.0, max_value=max_value if max_value is not None else 1.0,
auto_gain=bool(auto_gain) if auto_gain is not None else False,
) )
elif source_type == "adaptive_time": elif source_type == "adaptive_time":
schedule_data = schedule or [] schedule_data = schedule or []
@@ -191,6 +193,7 @@ class ValueSourceStore:
schedule: Optional[list] = None, schedule: Optional[list] = None,
picture_source_id: Optional[str] = None, picture_source_id: Optional[str] = None,
scene_behavior: Optional[str] = None, scene_behavior: Optional[str] = None,
auto_gain: Optional[bool] = None,
) -> ValueSource: ) -> ValueSource:
if source_id not in self._sources: if source_id not in self._sources:
raise ValueError(f"Value source not found: {source_id}") raise ValueError(f"Value source not found: {source_id}")
@@ -231,6 +234,8 @@ class ValueSourceStore:
source.min_value = min_value source.min_value = min_value
if max_value is not None: if max_value is not None:
source.max_value = max_value source.max_value = max_value
if auto_gain is not None:
source.auto_gain = auto_gain
elif isinstance(source, AdaptiveValueSource): elif isinstance(source, AdaptiveValueSource):
if schedule is not None: if schedule is not None:
if source.source_type == "adaptive_time" and len(schedule) < 2: if source.source_type == "adaptive_time" and len(schedule) < 2:

View File

@@ -127,6 +127,7 @@
{% include 'modals/audio-template.html' %} {% include 'modals/audio-template.html' %}
{% include 'modals/test-audio-template.html' %} {% include 'modals/test-audio-template.html' %}
{% include 'modals/value-source-editor.html' %} {% include 'modals/value-source-editor.html' %}
{% include 'modals/test-value-source.html' %}
{% include 'partials/tutorial-overlay.html' %} {% include 'partials/tutorial-overlay.html' %}
{% include 'partials/image-lightbox.html' %} {% include 'partials/image-lightbox.html' %}

View File

@@ -0,0 +1,27 @@
<!-- Test Value Source Modal -->
<div id="test-value-source-modal" class="modal" role="dialog" aria-modal="true" aria-labelledby="test-value-source-modal-title">
<div class="modal-content">
<div class="modal-header">
<h2 id="test-value-source-modal-title" data-i18n="value_source.test.title">Test Value Source</h2>
<button class="modal-close-btn" onclick="closeTestValueSourceModal()" title="Close" data-i18n-aria-label="aria.close">&#x2715;</button>
</div>
<div class="modal-body">
<canvas id="vs-test-canvas" class="vs-test-canvas"></canvas>
<div class="vs-test-stats">
<span class="vs-test-stat vs-test-stat-current">
<span class="vs-test-stat-label" data-i18n="value_source.test.current">Current</span>
<span class="vs-test-stat-value vs-test-value-large" id="vs-test-current">---</span>
</span>
<span class="vs-test-stat">
<span class="vs-test-stat-label" data-i18n="value_source.test.min">Min</span>
<span class="vs-test-stat-value" id="vs-test-min">---</span>
</span>
<span class="vs-test-stat">
<span class="vs-test-stat-label" data-i18n="value_source.test.max">Max</span>
<span class="vs-test-stat-value" id="vs-test-max">---</span>
</span>
</div>
<div id="vs-test-status" class="vs-test-status" data-i18n="value_source.test.connecting">Connecting...</div>
</div>
</div>
</div>

View File

@@ -123,13 +123,25 @@
</select> </select>
</div> </div>
<div class="form-group">
<div class="label-row">
<label for="value-source-auto-gain" data-i18n="value_source.auto_gain">Auto Gain:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.auto_gain.hint">Automatically normalize audio levels so output uses the full range, regardless of input volume</small>
<label class="toggle-label">
<input type="checkbox" id="value-source-auto-gain">
<span data-i18n="value_source.auto_gain.enable">Enable auto-gain</span>
</label>
</div>
<div class="form-group"> <div class="form-group">
<div class="label-row"> <div class="label-row">
<label for="value-source-sensitivity"><span data-i18n="value_source.sensitivity">Sensitivity:</span> <span id="value-source-sensitivity-display">1.0</span></label> <label for="value-source-sensitivity"><span data-i18n="value_source.sensitivity">Sensitivity:</span> <span id="value-source-sensitivity-display">1.0</span></label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button> <button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div> </div>
<small class="input-hint" style="display:none" data-i18n="value_source.sensitivity.hint">Gain multiplier for the audio signal (higher = more reactive)</small> <small class="input-hint" style="display:none" data-i18n="value_source.sensitivity.hint">Gain multiplier for the audio signal (higher = more reactive)</small>
<input type="range" id="value-source-sensitivity" min="0.1" max="5" step="0.1" value="1.0" <input type="range" id="value-source-sensitivity" min="0.1" max="20" step="0.1" value="1.0"
oninput="document.getElementById('value-source-sensitivity-display').textContent = this.value"> oninput="document.getElementById('value-source-sensitivity-display').textContent = this.value">
</div> </div>