Add adaptive brightness value source with time-of-day and scene modes

New "adaptive" value source type that automatically adjusts brightness
based on external conditions. Two sub-modes: time-of-day (schedule-based
interpolation with midnight wrap) and scene brightness (frame luminance
analysis via numpy BT.601 subsampling with EMA smoothing).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-24 15:14:30 +03:00
parent 48651f0a4e
commit d339dd3f90
11 changed files with 643 additions and 19 deletions

View File

@@ -43,6 +43,10 @@ def _to_response(source: ValueSource) -> ValueSourceResponse:
mode=d.get("mode"),
sensitivity=d.get("sensitivity"),
smoothing=d.get("smoothing"),
adaptive_mode=d.get("adaptive_mode"),
schedule=d.get("schedule"),
picture_source_id=d.get("picture_source_id"),
scene_behavior=d.get("scene_behavior"),
description=d.get("description"),
created_at=source.created_at,
updated_at=source.updated_at,
@@ -86,6 +90,10 @@ async def create_value_source(
sensitivity=data.sensitivity,
smoothing=data.smoothing,
description=data.description,
adaptive_mode=data.adaptive_mode,
schedule=data.schedule,
picture_source_id=data.picture_source_id,
scene_behavior=data.scene_behavior,
)
return _to_response(source)
except ValueError as e:
@@ -129,6 +137,10 @@ async def update_value_source(
sensitivity=data.sensitivity,
smoothing=data.smoothing,
description=data.description,
adaptive_mode=data.adaptive_mode,
schedule=data.schedule,
picture_source_id=data.picture_source_id,
scene_behavior=data.scene_behavior,
)
# Hot-reload running value streams
pm.update_value_source(source_id)

View File

@@ -10,7 +10,7 @@ class ValueSourceCreate(BaseModel):
"""Request to create a value source."""
name: str = Field(description="Source name", min_length=1, max_length=100)
source_type: Literal["static", "animated", "audio"] = Field(description="Source type")
source_type: Literal["static", "animated", "audio", "adaptive"] = Field(description="Source type")
# static fields
value: Optional[float] = Field(None, description="Constant value (0.0-1.0)", ge=0.0, le=1.0)
# animated fields
@@ -23,6 +23,11 @@ class ValueSourceCreate(BaseModel):
mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat")
sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-5.0)", ge=0.1, le=5.0)
smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0)
# adaptive fields
adaptive_mode: Optional[str] = Field(None, description="Adaptive mode: time_of_day|scene")
schedule: Optional[list] = Field(None, description="Time-of-day schedule: [{time: 'HH:MM', value: 0.0-1.0}]")
picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode")
scene_behavior: Optional[str] = Field(None, description="Scene behavior: complement|match")
description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -42,6 +47,11 @@ class ValueSourceUpdate(BaseModel):
mode: Optional[str] = Field(None, description="Audio mode: rms|peak|beat")
sensitivity: Optional[float] = Field(None, description="Gain multiplier (0.1-5.0)", ge=0.1, le=5.0)
smoothing: Optional[float] = Field(None, description="Temporal smoothing (0.0-1.0)", ge=0.0, le=1.0)
# adaptive fields
adaptive_mode: Optional[str] = Field(None, description="Adaptive mode: time_of_day|scene")
schedule: Optional[list] = Field(None, description="Time-of-day schedule")
picture_source_id: Optional[str] = Field(None, description="Picture source ID for scene mode")
scene_behavior: Optional[str] = Field(None, description="Scene behavior: complement|match")
description: Optional[str] = Field(None, description="Optional description", max_length=500)
@@ -50,7 +60,7 @@ class ValueSourceResponse(BaseModel):
id: str = Field(description="Source ID")
name: str = Field(description="Source name")
source_type: str = Field(description="Source type: static, animated, or audio")
source_type: str = Field(description="Source type: static, animated, audio, or adaptive")
value: Optional[float] = Field(None, description="Static value")
waveform: Optional[str] = Field(None, description="Waveform type")
speed: Optional[float] = Field(None, description="Cycles per minute")
@@ -60,6 +70,10 @@ class ValueSourceResponse(BaseModel):
mode: Optional[str] = Field(None, description="Audio mode")
sensitivity: Optional[float] = Field(None, description="Gain multiplier")
smoothing: Optional[float] = Field(None, description="Temporal smoothing")
adaptive_mode: Optional[str] = Field(None, description="Adaptive mode")
schedule: Optional[list] = Field(None, description="Time-of-day schedule")
picture_source_id: Optional[str] = Field(None, description="Picture source ID")
scene_behavior: Optional[str] = Field(None, description="Scene behavior")
description: Optional[str] = Field(None, description="Description")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")

View File

@@ -95,6 +95,7 @@ class ProcessorManager:
value_source_store=value_source_store,
audio_capture_manager=self._audio_capture_manager,
audio_source_store=audio_source_store,
live_stream_manager=self._live_stream_manager,
) if value_source_store else None
self._overlay_manager = OverlayManager()
self._event_queues: List[asyncio.Queue] = []

View File

@@ -1,12 +1,14 @@
"""Value stream — runtime scalar signal generators.
A ValueStream wraps a ValueSource config and computes a float (0.01.0)
on demand via ``get_value()``. Three concrete types:
on demand via ``get_value()``. Five concrete types:
StaticValueStream — returns a constant
AnimatedValueStream — evaluates a periodic waveform (sine/triangle/square/sawtooth)
AudioValueStream — polls audio analysis for RMS/peak/beat, applies
sensitivity and temporal smoothing
StaticValueStream — returns a constant
AnimatedValueStream — evaluates a periodic waveform (sine/triangle/square/sawtooth)
AudioValueStream — polls audio analysis for RMS/peak/beat, applies
sensitivity and temporal smoothing
TimeOfDayValueStream — interpolates brightness along a 24h schedule
SceneValueStream — derives brightness from a picture source's frame luminance
ValueStreams are cheap (trivial math or single poll), so they compute inline
in the caller's processing loop — no background threads required.
@@ -19,12 +21,16 @@ from __future__ import annotations
import math
import time
from typing import TYPE_CHECKING, Dict, Optional
from datetime import datetime
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import numpy as np
from wled_controller.utils import get_logger
if TYPE_CHECKING:
from wled_controller.core.audio.audio_capture import AudioCaptureManager
from wled_controller.core.processing.live_stream_manager import LiveStreamManager
from wled_controller.storage.audio_source_store import AudioSourceStore
from wled_controller.storage.value_source import ValueSource
from wled_controller.storage.value_source_store import ValueSourceStore
@@ -277,6 +283,214 @@ class AudioValueStream(ValueStream):
)
# ---------------------------------------------------------------------------
# Time of Day
# ---------------------------------------------------------------------------
_MINUTES_PER_DAY = 1440 # 24 * 60
class TimeOfDayValueStream(ValueStream):
"""Interpolates brightness along a 24-hour schedule.
Schedule is a list of {"time": "HH:MM", "value": 0.01.0} dicts.
At runtime, finds the two surrounding points and linearly interpolates.
The schedule wraps around midnight.
"""
def __init__(
self,
schedule: List[dict],
min_value: float = 0.0,
max_value: float = 1.0,
):
self._points: List[Tuple[float, float]] = [] # (minutes, value)
self._min = min_value
self._max = max_value
self._parse_schedule(schedule)
def _parse_schedule(self, schedule: List[dict]) -> None:
"""Parse schedule into sorted (minutes, value) tuples."""
points = []
for entry in schedule:
t_str = entry.get("time", "00:00")
parts = t_str.split(":")
h = int(parts[0])
m = int(parts[1]) if len(parts) > 1 else 0
minutes = h * 60 + m
val = max(0.0, min(1.0, float(entry.get("value", 1.0))))
points.append((minutes, val))
points.sort(key=lambda p: p[0])
self._points = points
def get_value(self) -> float:
if len(self._points) < 2:
return self._max # fallback: full brightness
now = datetime.now()
current = now.hour * 60 + now.minute + now.second / 60.0
points = self._points
n = len(points)
# Find the first point whose time is > current
right_idx = 0
for i, (t, _) in enumerate(points):
if t > current:
right_idx = i
break
else:
right_idx = 0 # all points <= current: wrap to first
left_idx = (right_idx - 1) % n
t_left, v_left = points[left_idx]
t_right, v_right = points[right_idx]
# Compute interval length (handling midnight wrap)
if t_right > t_left:
interval = t_right - t_left
elapsed = current - t_left
else:
interval = (_MINUTES_PER_DAY - t_left) + t_right
elapsed = (current - t_left) if current >= t_left else (_MINUTES_PER_DAY - t_left) + current
frac = elapsed / interval if interval > 0 else 0.0
raw = v_left + frac * (v_right - v_left)
# Map to output range
return self._min + max(0.0, min(1.0, raw)) * (self._max - self._min)
def update_source(self, source: "ValueSource") -> None:
from wled_controller.storage.value_source import AdaptiveValueSource
if isinstance(source, AdaptiveValueSource) and source.adaptive_mode == "time_of_day":
self._parse_schedule(source.schedule)
self._min = source.min_value
self._max = source.max_value
# ---------------------------------------------------------------------------
# Scene
# ---------------------------------------------------------------------------
class SceneValueStream(ValueStream):
"""Derives brightness from a picture source's average frame luminance.
Acquires a LiveStream from LiveStreamManager on start(). On each
get_value() call, reads the latest frame, subsamples, computes mean
luminance (BT.601), applies sensitivity/smoothing/mapping.
Behaviors:
complement — dark scene → high brightness (ambient backlight visibility)
match — bright scene → high brightness
"""
def __init__(
self,
picture_source_id: str,
scene_behavior: str = "complement",
sensitivity: float = 1.0,
smoothing: float = 0.3,
min_value: float = 0.0,
max_value: float = 1.0,
live_stream_manager: Optional["LiveStreamManager"] = None,
):
self._picture_source_id = picture_source_id
self._behavior = scene_behavior
self._sensitivity = sensitivity
self._smoothing = smoothing
self._min = min_value
self._max = max_value
self._live_stream_manager = live_stream_manager
self._live_stream = None
self._prev_value = 0.5 # neutral start
def start(self) -> None:
if self._live_stream_manager and self._picture_source_id:
try:
self._live_stream = self._live_stream_manager.acquire(
self._picture_source_id
)
logger.info(
f"SceneValueStream acquired live stream for {self._picture_source_id}"
)
except Exception as e:
logger.warning(f"SceneValueStream failed to acquire live stream: {e}")
self._live_stream = None
def stop(self) -> None:
if self._live_stream is not None and self._live_stream_manager:
try:
self._live_stream_manager.release(self._picture_source_id)
except Exception as e:
logger.warning(f"SceneValueStream failed to release live stream: {e}")
self._live_stream = None
self._prev_value = 0.5
def get_value(self) -> float:
if self._live_stream is None:
return self._prev_value
frame = self._live_stream.get_latest_frame()
if frame is None:
return self._prev_value
# Fast luminance: subsample to ~64x64 via numpy stride (zero-copy view)
img = frame.image
h, w = img.shape[:2]
step_h = max(1, h // 64)
step_w = max(1, w // 64)
sampled = img[::step_h, ::step_w].astype(np.float32)
# BT.601 weighted luminance, normalized to [0, 1]
luminance = float(
(0.299 * sampled[:, :, 0] + 0.587 * sampled[:, :, 1] + 0.114 * sampled[:, :, 2]).mean()
) / 255.0
# Apply sensitivity
raw = min(1.0, luminance * self._sensitivity)
# Apply behavior
if self._behavior == "complement":
raw = 1.0 - raw
# Temporal smoothing (EMA)
smoothed = self._smoothing * self._prev_value + (1.0 - self._smoothing) * raw
self._prev_value = smoothed
# Map to output range
clamped = max(0.0, min(1.0, smoothed))
return self._min + clamped * (self._max - self._min)
def update_source(self, source: "ValueSource") -> None:
from wled_controller.storage.value_source import AdaptiveValueSource
if not isinstance(source, AdaptiveValueSource) or source.adaptive_mode != "scene":
return
self._behavior = source.scene_behavior
self._sensitivity = source.sensitivity
self._smoothing = source.smoothing
self._min = source.min_value
self._max = source.max_value
# If picture source changed, swap live streams
if source.picture_source_id != self._picture_source_id:
old_id = self._picture_source_id
self._picture_source_id = source.picture_source_id
if self._live_stream is not None and self._live_stream_manager:
self._live_stream_manager.release(old_id)
try:
self._live_stream = self._live_stream_manager.acquire(
self._picture_source_id
)
logger.info(
f"SceneValueStream swapped live stream: {old_id}"
f"{self._picture_source_id}"
)
except Exception as e:
logger.warning(f"SceneValueStream failed to swap live stream: {e}")
self._live_stream = None
# ---------------------------------------------------------------------------
# Manager
# ---------------------------------------------------------------------------
@@ -297,10 +511,12 @@ class ValueStreamManager:
value_source_store: "ValueSourceStore",
audio_capture_manager: Optional["AudioCaptureManager"] = None,
audio_source_store: Optional["AudioSourceStore"] = None,
live_stream_manager: Optional["LiveStreamManager"] = None,
):
self._value_source_store = value_source_store
self._audio_capture_manager = audio_capture_manager
self._audio_source_store = audio_source_store
self._live_stream_manager = live_stream_manager
self._streams: Dict[str, ValueStream] = {}
def acquire(self, vs_id: str, consumer_id: str) -> ValueStream:
@@ -359,6 +575,7 @@ class ValueStreamManager:
def _create_stream(self, source: "ValueSource") -> ValueStream:
"""Factory: create the appropriate ValueStream for a ValueSource."""
from wled_controller.storage.value_source import (
AdaptiveValueSource,
AnimatedValueSource,
AudioValueSource,
StaticValueSource,
@@ -385,5 +602,23 @@ class ValueStreamManager:
audio_source_store=self._audio_source_store,
)
if isinstance(source, AdaptiveValueSource):
if source.adaptive_mode == "scene":
return SceneValueStream(
picture_source_id=source.picture_source_id,
scene_behavior=source.scene_behavior,
sensitivity=source.sensitivity,
smoothing=source.smoothing,
min_value=source.min_value,
max_value=source.max_value,
live_stream_manager=self._live_stream_manager,
)
# Default: time_of_day
return TimeOfDayValueStream(
schedule=source.schedule,
min_value=source.min_value,
max_value=source.max_value,
)
# Fallback
return StaticValueStream(value=1.0)

View File

@@ -112,6 +112,7 @@ import {
import {
showValueSourceModal, closeValueSourceModal, saveValueSource,
editValueSource, deleteValueSource, onValueSourceTypeChange,
onAdaptiveModeChange, addSchedulePoint,
} from './features/value-sources.js';
// Layer 5: calibration
@@ -330,6 +331,8 @@ Object.assign(window, {
editValueSource,
deleteValueSource,
onValueSourceTypeChange,
onAdaptiveModeChange,
addSchedulePoint,
// calibration
showCalibration,

View File

@@ -1,15 +1,15 @@
/**
* Value Sources — CRUD for scalar value sources (static, animated, audio).
* Value Sources — CRUD for scalar value sources (static, animated, audio, adaptive).
*
* Value sources produce a float 0.0-1.0 used for dynamic brightness control
* on LED targets. Three subtypes: static (constant), animated (waveform),
* audio (audio-reactive).
* on LED targets. Four subtypes: static (constant), animated (waveform),
* audio (audio-reactive), adaptive (time-of-day schedule or scene brightness).
*
* Card rendering is handled by streams.js (Value tab).
* This module manages the editor modal and API operations.
*/
import { _cachedValueSources, set_cachedValueSources, _cachedAudioSources } from '../core/state.js';
import { _cachedValueSources, set_cachedValueSources, _cachedAudioSources, _cachedStreams } from '../core/state.js';
import { fetchWithAuth, escapeHtml } from '../core/api.js';
import { t } from '../core/i18n.js';
import { showToast, showConfirm } from '../core/ui.js';
@@ -49,6 +49,16 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = editData.mode || 'rms';
_setSlider('value-source-sensitivity', editData.sensitivity ?? 1.0);
_setSlider('value-source-smoothing', editData.smoothing ?? 0.3);
} else if (editData.source_type === 'adaptive') {
document.getElementById('value-source-adaptive-mode').value = editData.adaptive_mode || 'time_of_day';
onAdaptiveModeChange();
_populateScheduleUI(editData.schedule);
_populatePictureSourceDropdown(editData.picture_source_id || '');
document.getElementById('value-source-scene-behavior').value = editData.scene_behavior || 'complement';
_setSlider('value-source-scene-sensitivity', editData.sensitivity ?? 1.0);
_setSlider('value-source-scene-smoothing', editData.smoothing ?? 0.3);
_setSlider('value-source-adaptive-min-value', editData.min_value ?? 0);
_setSlider('value-source-adaptive-max-value', editData.max_value ?? 1);
}
} else {
document.getElementById('value-source-name').value = '';
@@ -64,6 +74,15 @@ export async function showValueSourceModal(editData) {
document.getElementById('value-source-mode').value = 'rms';
_setSlider('value-source-sensitivity', 1.0);
_setSlider('value-source-smoothing', 0.3);
// Adaptive defaults
document.getElementById('value-source-adaptive-mode').value = 'time_of_day';
_populateScheduleUI([]);
_populatePictureSourceDropdown('');
document.getElementById('value-source-scene-behavior').value = 'complement';
_setSlider('value-source-scene-sensitivity', 1.0);
_setSlider('value-source-scene-smoothing', 0.3);
_setSlider('value-source-adaptive-min-value', 0);
_setSlider('value-source-adaptive-max-value', 1);
}
valueSourceModal.open();
@@ -78,6 +97,7 @@ export function onValueSourceTypeChange() {
document.getElementById('value-source-static-section').style.display = type === 'static' ? '' : 'none';
document.getElementById('value-source-animated-section').style.display = type === 'animated' ? '' : 'none';
document.getElementById('value-source-audio-section').style.display = type === 'audio' ? '' : 'none';
document.getElementById('value-source-adaptive-section').style.display = type === 'adaptive' ? '' : 'none';
// Populate audio dropdown when switching to audio type
if (type === 'audio') {
@@ -86,6 +106,18 @@ export function onValueSourceTypeChange() {
_populateAudioSourceDropdown('');
}
}
// Initialize adaptive sub-sections
if (type === 'adaptive') {
onAdaptiveModeChange();
_populatePictureSourceDropdown('');
}
}
export function onAdaptiveModeChange() {
const mode = document.getElementById('value-source-adaptive-mode').value;
document.getElementById('value-source-tod-section').style.display = mode === 'time_of_day' ? '' : 'none';
document.getElementById('value-source-scene-section').style.display = mode === 'scene' ? '' : 'none';
}
// ── Save ──────────────────────────────────────────────────────
@@ -117,6 +149,23 @@ export async function saveValueSource() {
payload.mode = document.getElementById('value-source-mode').value;
payload.sensitivity = parseFloat(document.getElementById('value-source-sensitivity').value);
payload.smoothing = parseFloat(document.getElementById('value-source-smoothing').value);
} else if (sourceType === 'adaptive') {
payload.adaptive_mode = document.getElementById('value-source-adaptive-mode').value;
payload.min_value = parseFloat(document.getElementById('value-source-adaptive-min-value').value);
payload.max_value = parseFloat(document.getElementById('value-source-adaptive-max-value').value);
if (payload.adaptive_mode === 'time_of_day') {
payload.schedule = _getScheduleFromUI();
if (payload.schedule.length < 2) {
errorEl.textContent = t('value_source.error.schedule_min');
errorEl.style.display = '';
return;
}
} else if (payload.adaptive_mode === 'scene') {
payload.picture_source_id = document.getElementById('value-source-picture-source').value;
payload.scene_behavior = document.getElementById('value-source-scene-behavior').value;
payload.sensitivity = parseFloat(document.getElementById('value-source-scene-sensitivity').value);
payload.smoothing = parseFloat(document.getElementById('value-source-scene-smoothing').value);
}
}
try {
@@ -175,7 +224,7 @@ export async function deleteValueSource(sourceId) {
// ── Card rendering (used by streams.js) ───────────────────────
export function createValueSourceCard(src) {
const typeIcons = { static: '📊', animated: '🔄', audio: '🎵' };
const typeIcons = { static: '📊', animated: '🔄', audio: '🎵', adaptive: '🌤️' };
const icon = typeIcons[src.source_type] || '🎚️';
let propsHtml = '';
@@ -196,6 +245,23 @@ export function createValueSourceCard(src) {
<span class="stream-card-prop" title="${escapeHtml(t('value_source.audio_source'))}">${escapeHtml(audioName)}</span>
<span class="stream-card-prop">${modeLabel.toUpperCase()}</span>
`;
} else if (src.source_type === 'adaptive') {
if (src.adaptive_mode === 'scene') {
const ps = _cachedStreams.find(s => s.id === src.picture_source_id);
const psName = ps ? ps.name : (src.picture_source_id || '-');
propsHtml = `
<span class="stream-card-prop">${t('value_source.adaptive_mode.scene')}</span>
<span class="stream-card-prop">${escapeHtml(psName)}</span>
<span class="stream-card-prop">${src.scene_behavior || 'complement'}</span>
`;
} else {
const pts = (src.schedule || []).length;
propsHtml = `
<span class="stream-card-prop">${t('value_source.adaptive_mode.time_of_day')}</span>
<span class="stream-card-prop">${pts} ${t('value_source.schedule.points')}</span>
<span class="stream-card-prop">${src.min_value ?? 0}${src.max_value ?? 1}</span>
`;
}
}
return `
@@ -232,3 +298,52 @@ function _populateAudioSourceDropdown(selectedId) {
`<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}</option>`
).join('');
}
// ── Adaptive helpers ──────────────────────────────────────────
function _populatePictureSourceDropdown(selectedId) {
const select = document.getElementById('value-source-picture-source');
if (!select) return;
select.innerHTML = _cachedStreams.map(s =>
`<option value="${s.id}"${s.id === selectedId ? ' selected' : ''}>${escapeHtml(s.name)}</option>`
).join('');
}
export function addSchedulePoint(time = '', value = 1.0) {
const list = document.getElementById('value-source-schedule-list');
if (!list) return;
const row = document.createElement('div');
row.className = 'schedule-row';
row.innerHTML = `
<input type="time" class="schedule-time" value="${time || '12:00'}">
<input type="range" class="schedule-value" min="0" max="1" step="0.01" value="${value}"
oninput="this.nextElementSibling.textContent = this.value">
<span class="schedule-value-display">${value}</span>
<button type="button" class="btn btn-icon btn-danger btn-sm" onclick="this.parentElement.remove()">&#x2715;</button>
`;
list.appendChild(row);
}
function _getScheduleFromUI() {
const rows = document.querySelectorAll('#value-source-schedule-list .schedule-row');
const schedule = [];
rows.forEach(row => {
const time = row.querySelector('.schedule-time').value;
const value = parseFloat(row.querySelector('.schedule-value').value);
if (time) schedule.push({ time, value });
});
return schedule;
}
function _populateScheduleUI(schedule) {
const list = document.getElementById('value-source-schedule-list');
if (!list) return;
list.innerHTML = '';
if (!schedule || schedule.length === 0) {
// Default: morning bright, night dim
addSchedulePoint('08:00', 1.0);
addSchedulePoint('22:00', 0.3);
} else {
schedule.forEach(p => addSchedulePoint(p.time, p.value));
}
}

View File

@@ -774,10 +774,11 @@
"value_source.name.placeholder": "Brightness Pulse",
"value_source.name.hint": "A descriptive name for this value source",
"value_source.type": "Type:",
"value_source.type.hint": "Static outputs a constant value. Animated cycles through a waveform. Audio reacts to sound input.",
"value_source.type.hint": "Static outputs a constant value. Animated cycles through a waveform. Audio reacts to sound input. Adaptive adjusts based on time of day or scene brightness.",
"value_source.type.static": "Static",
"value_source.type.animated": "Animated",
"value_source.type.audio": "Audio",
"value_source.type.adaptive": "Adaptive",
"value_source.value": "Value:",
"value_source.value.hint": "Constant output value (0.0 = off, 1.0 = full brightness)",
"value_source.waveform": "Waveform:",
@@ -803,6 +804,25 @@
"value_source.sensitivity.hint": "Gain multiplier for the audio signal (higher = more reactive)",
"value_source.smoothing": "Smoothing:",
"value_source.smoothing.hint": "Temporal smoothing (0 = instant response, 1 = very smooth/slow)",
"value_source.adaptive_mode": "Adaptive Mode:",
"value_source.adaptive_mode.hint": "Time of Day adjusts brightness on a daily schedule. Scene analyzes picture brightness in real time.",
"value_source.adaptive_mode.time_of_day": "Time of Day",
"value_source.adaptive_mode.scene": "Scene Brightness",
"value_source.schedule": "Schedule:",
"value_source.schedule.hint": "Define at least 2 time points. Brightness interpolates linearly between them, wrapping at midnight.",
"value_source.schedule.add": "+ Add Point",
"value_source.schedule.points": "points",
"value_source.picture_source": "Picture Source:",
"value_source.picture_source.hint": "The picture source whose frames will be analyzed for average brightness.",
"value_source.scene_behavior": "Behavior:",
"value_source.scene_behavior.hint": "Complement: dark scene = high brightness (ideal for ambient backlight). Match: bright scene = high brightness.",
"value_source.scene_behavior.complement": "Complement (dark → bright)",
"value_source.scene_behavior.match": "Match (bright → bright)",
"value_source.adaptive_min_value": "Min Value:",
"value_source.adaptive_min_value.hint": "Minimum output brightness",
"value_source.adaptive_max_value": "Max Value:",
"value_source.adaptive_max_value.hint": "Maximum output brightness",
"value_source.error.schedule_min": "Schedule requires at least 2 time points",
"value_source.description": "Description (optional):",
"value_source.description.placeholder": "Describe this value source...",
"value_source.description.hint": "Optional notes about this value source",

View File

@@ -774,10 +774,11 @@
"value_source.name.placeholder": "Пульс яркости",
"value_source.name.hint": "Описательное имя для этого источника значений",
"value_source.type": "Тип:",
"value_source.type.hint": "Статический выдаёт постоянное значение. Анимированный циклически меняет форму волны. Аудио реагирует на звук.",
"value_source.type.hint": "Статический выдаёт постоянное значение. Анимированный циклически меняет форму волны. Аудио реагирует на звук. Адаптивный подстраивается под время суток или яркость сцены.",
"value_source.type.static": "Статический",
"value_source.type.animated": "Анимированный",
"value_source.type.audio": "Аудио",
"value_source.type.adaptive": "Адаптивный",
"value_source.value": "Значение:",
"value_source.value.hint": "Постоянное выходное значение (0.0 = выкл, 1.0 = полная яркость)",
"value_source.waveform": "Форма волны:",
@@ -803,6 +804,25 @@
"value_source.sensitivity.hint": "Множитель усиления аудиосигнала (выше = более реактивный)",
"value_source.smoothing": "Сглаживание:",
"value_source.smoothing.hint": "Временное сглаживание (0 = мгновенный отклик, 1 = очень плавный/медленный)",
"value_source.adaptive_mode": "Адаптивный режим:",
"value_source.adaptive_mode.hint": "Время суток регулирует яркость по дневному расписанию. Сцена анализирует яркость изображения в реальном времени.",
"value_source.adaptive_mode.time_of_day": "Время суток",
"value_source.adaptive_mode.scene": "Яркость сцены",
"value_source.schedule": "Расписание:",
"value_source.schedule.hint": "Определите минимум 2 временные точки. Яркость линейно интерполируется между ними, с переходом через полночь.",
"value_source.schedule.add": "+ Добавить точку",
"value_source.schedule.points": "точек",
"value_source.picture_source": "Источник изображения:",
"value_source.picture_source.hint": "Источник изображения, кадры которого будут анализироваться на среднюю яркость.",
"value_source.scene_behavior": "Поведение:",
"value_source.scene_behavior.hint": "Дополнение: тёмная сцена = высокая яркость (для фоновой подсветки). Совпадение: яркая сцена = высокая яркость.",
"value_source.scene_behavior.complement": "Дополнение (тёмный → ярко)",
"value_source.scene_behavior.match": "Совпадение (яркий → ярко)",
"value_source.adaptive_min_value": "Мин. значение:",
"value_source.adaptive_min_value.hint": "Минимальная выходная яркость",
"value_source.adaptive_max_value": "Макс. значение:",
"value_source.adaptive_max_value.hint": "Максимальная выходная яркость",
"value_source.error.schedule_min": "Расписание требует минимум 2 временные точки",
"value_source.description": "Описание (необязательно):",
"value_source.description.placeholder": "Опишите этот источник значений...",
"value_source.description.hint": "Необязательные заметки об этом источнике значений",

View File

@@ -1,15 +1,16 @@
"""Value source data model with inheritance-based source types.
A ValueSource produces a scalar float (0.01.0) that can drive target
parameters like brightness. Three types:
parameters like brightness. Four types:
StaticValueSource — constant float value
AnimatedValueSource — periodic waveform (sine, triangle, square, sawtooth)
AudioValueSource — audio-reactive scalar (RMS, peak, beat detection)
AdaptiveValueSource — adapts to external conditions (time of day, scene brightness)
"""
from dataclasses import dataclass
from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
from typing import List, Optional
@dataclass
@@ -42,6 +43,10 @@ class ValueSource:
"mode": None,
"sensitivity": None,
"smoothing": None,
"adaptive_mode": None,
"schedule": None,
"picture_source_id": None,
"scene_behavior": None,
}
@staticmethod
@@ -87,6 +92,20 @@ class ValueSource:
smoothing=float(data.get("smoothing") or 0.3),
)
if source_type == "adaptive":
return AdaptiveValueSource(
id=sid, name=name, source_type="adaptive",
created_at=created_at, updated_at=updated_at, description=description,
adaptive_mode=data.get("adaptive_mode") or "time_of_day",
schedule=data.get("schedule") or [],
picture_source_id=data.get("picture_source_id") or "",
scene_behavior=data.get("scene_behavior") or "complement",
sensitivity=float(data.get("sensitivity") or 1.0),
smoothing=float(data.get("smoothing") or 0.3),
min_value=float(data.get("min_value") or 0.0),
max_value=float(data["max_value"]) if data.get("max_value") is not None else 1.0,
)
# Default: "static" type
return StaticValueSource(
id=sid, name=name, source_type="static",
@@ -152,3 +171,34 @@ class AudioValueSource(ValueSource):
d["sensitivity"] = self.sensitivity
d["smoothing"] = self.smoothing
return d
@dataclass
class AdaptiveValueSource(ValueSource):
"""Value source that adapts to external conditions.
Two sub-modes:
time_of_day — interpolates brightness along a 24-hour schedule
scene — derives brightness from a picture source's frame luminance
"""
adaptive_mode: str = "time_of_day" # "time_of_day" | "scene"
schedule: List[dict] = field(default_factory=list) # [{time: "HH:MM", value: 0.0-1.0}]
picture_source_id: str = "" # for scene mode
scene_behavior: str = "complement" # "complement" | "match"
sensitivity: float = 1.0 # gain multiplier (0.1-5.0)
smoothing: float = 0.3 # temporal smoothing (0.0-1.0)
min_value: float = 0.0 # output range min
max_value: float = 1.0 # output range max
def to_dict(self) -> dict:
d = super().to_dict()
d["adaptive_mode"] = self.adaptive_mode
d["schedule"] = self.schedule
d["picture_source_id"] = self.picture_source_id
d["scene_behavior"] = self.scene_behavior
d["sensitivity"] = self.sensitivity
d["smoothing"] = self.smoothing
d["min_value"] = self.min_value
d["max_value"] = self.max_value
return d

View File

@@ -7,6 +7,7 @@ from pathlib import Path
from typing import Dict, List, Optional
from wled_controller.storage.value_source import (
AdaptiveValueSource,
AnimatedValueSource,
AudioValueSource,
StaticValueSource,
@@ -101,11 +102,15 @@ class ValueSourceStore:
sensitivity: Optional[float] = None,
smoothing: Optional[float] = None,
description: Optional[str] = None,
adaptive_mode: Optional[str] = None,
schedule: Optional[list] = None,
picture_source_id: Optional[str] = None,
scene_behavior: Optional[str] = None,
) -> ValueSource:
if not name or not name.strip():
raise ValueError("Name is required")
if source_type not in ("static", "animated", "audio"):
if source_type not in ("static", "animated", "audio", "adaptive"):
raise ValueError(f"Invalid source type: {source_type}")
for source in self._sources.values():
@@ -139,6 +144,23 @@ class ValueSourceStore:
sensitivity=sensitivity if sensitivity is not None else 1.0,
smoothing=smoothing if smoothing is not None else 0.3,
)
elif source_type == "adaptive":
am = adaptive_mode or "time_of_day"
schedule_data = schedule or []
if am == "time_of_day" and len(schedule_data) < 2:
raise ValueError("Time of day schedule requires at least 2 points")
source = AdaptiveValueSource(
id=sid, name=name, source_type="adaptive",
created_at=now, updated_at=now, description=description,
adaptive_mode=am,
schedule=schedule_data,
picture_source_id=picture_source_id or "",
scene_behavior=scene_behavior or "complement",
sensitivity=sensitivity if sensitivity is not None else 1.0,
smoothing=smoothing if smoothing is not None else 0.3,
min_value=min_value if min_value is not None else 0.0,
max_value=max_value if max_value is not None else 1.0,
)
self._sources[sid] = source
self._save()
@@ -160,6 +182,10 @@ class ValueSourceStore:
sensitivity: Optional[float] = None,
smoothing: Optional[float] = None,
description: Optional[str] = None,
adaptive_mode: Optional[str] = None,
schedule: Optional[list] = None,
picture_source_id: Optional[str] = None,
scene_behavior: Optional[str] = None,
) -> ValueSource:
if source_id not in self._sources:
raise ValueError(f"Value source not found: {source_id}")
@@ -196,6 +222,25 @@ class ValueSourceStore:
source.sensitivity = sensitivity
if smoothing is not None:
source.smoothing = smoothing
elif isinstance(source, AdaptiveValueSource):
if adaptive_mode is not None:
source.adaptive_mode = adaptive_mode
if schedule is not None:
if source.adaptive_mode == "time_of_day" and len(schedule) < 2:
raise ValueError("Time of day schedule requires at least 2 points")
source.schedule = schedule
if picture_source_id is not None:
source.picture_source_id = picture_source_id
if scene_behavior is not None:
source.scene_behavior = scene_behavior
if sensitivity is not None:
source.sensitivity = sensitivity
if smoothing is not None:
source.smoothing = smoothing
if min_value is not None:
source.min_value = min_value
if max_value is not None:
source.max_value = max_value
source.updated_at = datetime.utcnow()
self._save()

View File

@@ -32,6 +32,7 @@
<option value="static" data-i18n="value_source.type.static">Static</option>
<option value="animated" data-i18n="value_source.type.animated">Animated</option>
<option value="audio" data-i18n="value_source.type.audio">Audio</option>
<option value="adaptive" data-i18n="value_source.type.adaptive">Adaptive</option>
</select>
</div>
@@ -160,6 +161,114 @@
</div>
</div>
<!-- Adaptive fields -->
<div id="value-source-adaptive-section" style="display:none">
<!-- Sub-mode selector -->
<div class="form-group">
<div class="label-row">
<label for="value-source-adaptive-mode" data-i18n="value_source.adaptive_mode">Adaptive Mode:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.adaptive_mode.hint">Time of Day adjusts brightness on a daily schedule. Scene analyzes picture brightness in real time.</small>
<select id="value-source-adaptive-mode" onchange="onAdaptiveModeChange()">
<option value="time_of_day" data-i18n="value_source.adaptive_mode.time_of_day">Time of Day</option>
<option value="scene" data-i18n="value_source.adaptive_mode.scene">Scene Brightness</option>
</select>
</div>
<!-- Time of Day sub-section -->
<div id="value-source-tod-section">
<div class="form-group">
<div class="label-row">
<label data-i18n="value_source.schedule">Schedule:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.schedule.hint">Define at least 2 time points. Brightness interpolates linearly between them, wrapping at midnight.</small>
<div id="value-source-schedule-list" class="schedule-list"></div>
<button type="button" class="btn btn-secondary btn-sm" onclick="addSchedulePoint()" data-i18n="value_source.schedule.add">+ Add Point</button>
</div>
</div>
<!-- Scene sub-section -->
<div id="value-source-scene-section" style="display:none">
<div class="form-group">
<div class="label-row">
<label for="value-source-picture-source" data-i18n="value_source.picture_source">Picture Source:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.picture_source.hint">The picture source whose frames will be analyzed for average brightness.</small>
<select id="value-source-picture-source">
<!-- populated dynamically -->
</select>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-scene-behavior" data-i18n="value_source.scene_behavior">Behavior:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.scene_behavior.hint">Complement: dark scene = high brightness (ideal for ambient backlight). Match: bright scene = high brightness.</small>
<select id="value-source-scene-behavior">
<option value="complement" data-i18n="value_source.scene_behavior.complement">Complement (dark → bright)</option>
<option value="match" data-i18n="value_source.scene_behavior.match">Match (bright → bright)</option>
</select>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-scene-sensitivity" data-i18n="value_source.sensitivity">Sensitivity:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.scene_behavior.hint">Gain multiplier for the luminance signal (higher = more reactive to brightness changes)</small>
<div class="range-with-value">
<input type="range" id="value-source-scene-sensitivity" min="0.1" max="5" step="0.1" value="1.0"
oninput="document.getElementById('value-source-scene-sensitivity-display').textContent = this.value">
<span id="value-source-scene-sensitivity-display">1.0</span>
</div>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-scene-smoothing" data-i18n="value_source.smoothing">Smoothing:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.smoothing.hint">Temporal smoothing (0 = instant response, 1 = very smooth transitions)</small>
<div class="range-with-value">
<input type="range" id="value-source-scene-smoothing" min="0" max="1" step="0.05" value="0.3"
oninput="document.getElementById('value-source-scene-smoothing-display').textContent = this.value">
<span id="value-source-scene-smoothing-display">0.3</span>
</div>
</div>
</div>
<!-- Shared: output range -->
<div class="form-group">
<div class="label-row">
<label for="value-source-adaptive-min-value" data-i18n="value_source.adaptive_min_value">Min Value:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.adaptive_min_value.hint">Minimum output brightness</small>
<div class="range-with-value">
<input type="range" id="value-source-adaptive-min-value" min="0" max="1" step="0.01" value="0"
oninput="document.getElementById('value-source-adaptive-min-value-display').textContent = this.value">
<span id="value-source-adaptive-min-value-display">0</span>
</div>
</div>
<div class="form-group">
<div class="label-row">
<label for="value-source-adaptive-max-value" data-i18n="value_source.adaptive_max_value">Max Value:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?" data-i18n-aria-label="aria.hint">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="value_source.adaptive_max_value.hint">Maximum output brightness</small>
<div class="range-with-value">
<input type="range" id="value-source-adaptive-max-value" min="0" max="1" step="0.01" value="1"
oninput="document.getElementById('value-source-adaptive-max-value-display').textContent = this.value">
<span id="value-source-adaptive-max-value-display">1</span>
</div>
</div>
</div>
<!-- Description -->
<div class="form-group">
<div class="label-row">