Add video picture source: file, URL, YouTube, sync clock, trim, test preview

Backend:
- VideoCaptureSource dataclass with url, loop, playback_speed, start/end_time,
  resolution_limit, clock_id, target_fps fields
- VideoCaptureStream: OpenCV decode thread with frame-accurate sync clock seeking,
  loop, trim range, resolution downscale at decode time
- YouTube URL resolution via yt-dlp (auto-detects youtube.com, youtu.be, shorts)
- Thumbnail extraction from first frame (GET /picture-sources/{id}/thumbnail)
- Video test WS preview: streams JPEG frames with elapsed/frame_count metadata
- Run video_stream.start() in executor to avoid blocking event loop during
  yt-dlp resolution
- Full CRUD via existing picture source API (stream_type: "video")
- Wired into LiveStreamManager for target streaming

Frontend:
- Video icon (film) in picture source type map and graph node subtypes
- Video tree nav node in Sources tab with CardSection
- Video fields in stream add/edit modal: URL, loop toggle, playback speed slider,
  target FPS, start/end trim times, resolution limit
- Video card rendering with URL, FPS, loop, speed badges
- Clone data support for video sources
- i18n keys for video source in en/ru/zh

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-15 23:48:43 +03:00
parent 0bbaf81e26
commit 0bb4d7c3aa
14 changed files with 826 additions and 23 deletions

View File

@@ -43,6 +43,19 @@ def _encode_jpeg(pil_image: Image.Image, quality: int = 85) -> str:
return f"data:image/jpeg;base64,{b64}"
def encode_preview_frame(image: np.ndarray, max_width: int = None, quality: int = 80) -> bytes:
"""Encode a numpy RGB image to JPEG bytes, optionally downscaling."""
import cv2
if max_width and image.shape[1] > max_width:
scale = max_width / image.shape[1]
new_h = int(image.shape[0] * scale)
image = cv2.resize(image, (max_width, new_h), interpolation=cv2.INTER_AREA)
# RGB → BGR for OpenCV JPEG encoding
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
_, buf = cv2.imencode('.jpg', bgr, [cv2.IMWRITE_JPEG_QUALITY, quality])
return buf.tobytes()
def _make_thumbnail(pil_image: Image.Image, max_width: int) -> Image.Image:
"""Create a thumbnail copy of the image, preserving aspect ratio."""
thumb = pil_image.copy()

View File

@@ -39,7 +39,7 @@ from wled_controller.storage.output_target_store import OutputTargetStore
from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore
from wled_controller.storage.picture_source_store import PictureSourceStore
from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource
from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource, VideoCaptureSource
from wled_controller.utils import get_logger
logger = get_logger(__name__)
@@ -63,6 +63,14 @@ def _stream_to_response(s) -> PictureSourceResponse:
updated_at=s.updated_at,
description=s.description,
tags=getattr(s, 'tags', []),
# Video fields
url=getattr(s, "url", None),
loop=getattr(s, "loop", None),
playback_speed=getattr(s, "playback_speed", None),
start_time=getattr(s, "start_time", None),
end_time=getattr(s, "end_time", None),
resolution_limit=getattr(s, "resolution_limit", None),
clock_id=getattr(s, "clock_id", None),
)
@@ -207,6 +215,14 @@ async def create_picture_source(
image_source=data.image_source,
description=data.description,
tags=data.tags,
# Video fields
url=data.url,
loop=data.loop,
playback_speed=data.playback_speed,
start_time=data.start_time,
end_time=data.end_time,
resolution_limit=data.resolution_limit,
clock_id=data.clock_id,
)
fire_entity_event("picture_source", "created", stream.id)
return _stream_to_response(stream)
@@ -253,6 +269,14 @@ async def update_picture_source(
image_source=data.image_source,
description=data.description,
tags=data.tags,
# Video fields
url=data.url,
loop=data.loop,
playback_speed=data.playback_speed,
start_time=data.start_time,
end_time=data.end_time,
resolution_limit=data.resolution_limit,
clock_id=data.clock_id,
)
fire_entity_event("picture_source", "updated", stream_id)
return _stream_to_response(stream)
@@ -292,6 +316,52 @@ async def delete_picture_source(
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/picture-sources/{stream_id}/thumbnail", tags=["Picture Sources"])
async def get_video_thumbnail(
stream_id: str,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
):
"""Get a thumbnail for a video picture source (first frame)."""
import base64
from io import BytesIO
from PIL import Image
from wled_controller.core.processing.video_stream import extract_thumbnail
from wled_controller.storage.picture_source import VideoCaptureSource
try:
source = store.get_stream(stream_id)
if not isinstance(source, VideoCaptureSource):
raise HTTPException(status_code=400, detail="Not a video source")
frame = await asyncio.get_event_loop().run_in_executor(
None, extract_thumbnail, source.url, source.resolution_limit
)
if frame is None:
raise HTTPException(status_code=404, detail="Could not extract thumbnail")
# Encode as JPEG
pil_img = Image.fromarray(frame)
# Resize to max 320px wide for thumbnail
if pil_img.width > 320:
ratio = 320 / pil_img.width
pil_img = pil_img.resize((320, int(pil_img.height * ratio)), Image.LANCZOS)
buf = BytesIO()
pil_img.save(buf, format="JPEG", quality=80)
b64 = base64.b64encode(buf.getvalue()).decode()
return {"thumbnail": f"data:image/jpeg;base64,{b64}", "width": pil_img.width, "height": pil_img.height}
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to extract video thumbnail: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/picture-sources/{stream_id}/test", response_model=TemplateTestResponse, tags=["Picture Sources"])
async def test_picture_source(
stream_id: str,
@@ -530,6 +600,86 @@ async def test_picture_source_ws(
await websocket.close(code=4003, reason="Static image streams don't support live test")
return
# Video sources: use VideoCaptureLiveStream for test preview
if isinstance(raw_stream, VideoCaptureSource):
from wled_controller.core.processing.video_stream import VideoCaptureLiveStream
await websocket.accept()
logger.info(f"Video source test WS connected for {stream_id} ({duration}s)")
video_stream = VideoCaptureLiveStream(
url=raw_stream.url,
loop=raw_stream.loop,
playback_speed=raw_stream.playback_speed,
start_time=raw_stream.start_time,
end_time=raw_stream.end_time,
resolution_limit=raw_stream.resolution_limit,
target_fps=raw_stream.target_fps,
)
def _encode_video_frame(image, pw):
"""Encode numpy RGB image as JPEG base64 data URI."""
from PIL import Image as PILImage
pil = PILImage.fromarray(image)
if pw and pil.width > pw:
ratio = pw / pil.width
pil = pil.resize((pw, int(pil.height * ratio)), PILImage.LANCZOS)
buf = io.BytesIO()
pil.save(buf, format="JPEG", quality=80)
b64 = base64.b64encode(buf.getvalue()).decode()
return f"data:image/jpeg;base64,{b64}", pil.width, pil.height
try:
await asyncio.get_event_loop().run_in_executor(None, video_stream.start)
import time as _time
fps = min(raw_stream.target_fps or 30, 30)
frame_time = 1.0 / fps
end_at = _time.monotonic() + duration
frame_count = 0
last_frame = None
while _time.monotonic() < end_at:
frame = video_stream.get_latest_frame()
if frame is not None and frame.image is not None and frame is not last_frame:
last_frame = frame
frame_count += 1
thumb, w, h = await asyncio.get_event_loop().run_in_executor(
None, _encode_video_frame, frame.image, preview_width or None,
)
elapsed = duration - (end_at - _time.monotonic())
await websocket.send_json({
"type": "frame",
"thumbnail": thumb,
"width": w, "height": h,
"elapsed": round(elapsed, 1),
"frame_count": frame_count,
})
await asyncio.sleep(frame_time)
# Send final result
if last_frame is not None:
full_img, fw, fh = await asyncio.get_event_loop().run_in_executor(
None, _encode_video_frame, last_frame.image, None,
)
await websocket.send_json({
"type": "result",
"full_image": full_img,
"width": fw, "height": fh,
"total_frames": frame_count,
"duration": duration,
"avg_fps": round(frame_count / max(duration, 0.001), 1),
})
except WebSocketDisconnect:
pass
except Exception as e:
logger.error(f"Video source test WS error for {stream_id}: {e}")
try:
await websocket.send_json({"type": "error", "detail": str(e)})
except Exception:
pass
finally:
video_stream.stop()
logger.info(f"Video source test WS disconnected for {stream_id}")
return
if not isinstance(raw_stream, ScreenCapturePictureSource):
await websocket.close(code=4003, reason="Unsupported stream type for live test")
return

View File

@@ -10,15 +10,23 @@ class PictureSourceCreate(BaseModel):
"""Request to create a picture source."""
name: str = Field(description="Stream name", min_length=1, max_length=100)
stream_type: Literal["raw", "processed", "static_image"] = Field(description="Stream type")
stream_type: Literal["raw", "processed", "static_image", "video"] = Field(description="Stream type")
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=1, le=90)
target_fps: Optional[int] = Field(None, description="Target FPS", ge=1, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
tags: List[str] = Field(default_factory=list, description="User-defined tags")
# Video fields
url: Optional[str] = Field(None, description="Video URL, file path, or YouTube URL")
loop: bool = Field(True, description="Loop video playback")
playback_speed: float = Field(1.0, description="Playback speed multiplier", ge=0.1, le=10.0)
start_time: Optional[float] = Field(None, description="Trim start time in seconds", ge=0)
end_time: Optional[float] = Field(None, description="Trim end time in seconds", ge=0)
resolution_limit: Optional[int] = Field(None, description="Max width in pixels for decode downscale", ge=64, le=7680)
clock_id: Optional[str] = Field(None, description="Sync clock ID for frame-accurate timing")
class PictureSourceUpdate(BaseModel):
@@ -27,12 +35,20 @@ class PictureSourceUpdate(BaseModel):
name: Optional[str] = Field(None, description="Stream name", min_length=1, max_length=100)
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=1, le=90)
target_fps: Optional[int] = Field(None, description="Target FPS", ge=1, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
tags: Optional[List[str]] = None
# Video fields
url: Optional[str] = Field(None, description="Video URL, file path, or YouTube URL")
loop: Optional[bool] = Field(None, description="Loop video playback")
playback_speed: Optional[float] = Field(None, description="Playback speed multiplier", ge=0.1, le=10.0)
start_time: Optional[float] = Field(None, description="Trim start time in seconds", ge=0)
end_time: Optional[float] = Field(None, description="Trim end time in seconds", ge=0)
resolution_limit: Optional[int] = Field(None, description="Max width in pixels for decode downscale", ge=64, le=7680)
clock_id: Optional[str] = Field(None, description="Sync clock ID for frame-accurate timing")
class PictureSourceResponse(BaseModel):
@@ -40,7 +56,7 @@ class PictureSourceResponse(BaseModel):
id: str = Field(description="Stream ID")
name: str = Field(description="Stream name")
stream_type: str = Field(description="Stream type (raw, processed, or static_image)")
stream_type: str = Field(description="Stream type (raw, processed, static_image, or video)")
display_index: Optional[int] = Field(None, description="Display index")
capture_template_id: Optional[str] = Field(None, description="Capture template ID")
target_fps: Optional[int] = Field(None, description="Target FPS")
@@ -51,6 +67,14 @@ class PictureSourceResponse(BaseModel):
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Stream description")
# Video fields
url: Optional[str] = Field(None, description="Video URL")
loop: Optional[bool] = Field(None, description="Loop video playback")
playback_speed: Optional[float] = Field(None, description="Playback speed multiplier")
start_time: Optional[float] = Field(None, description="Trim start time in seconds")
end_time: Optional[float] = Field(None, description="Trim end time in seconds")
resolution_limit: Optional[int] = Field(None, description="Max width for decode")
clock_id: Optional[str] = Field(None, description="Sync clock ID")
class PictureSourceListResponse(BaseModel):

View File

@@ -22,6 +22,7 @@ from wled_controller.core.processing.live_stream import (
ScreenCaptureLiveStream,
StaticImageLiveStream,
)
from wled_controller.core.processing.video_stream import VideoCaptureLiveStream
from wled_controller.utils import get_logger
logger = get_logger(__name__)
@@ -178,6 +179,7 @@ class LiveStreamManager:
ProcessedPictureSource,
ScreenCapturePictureSource,
StaticImagePictureSource,
VideoCaptureSource,
)
stream_config = self._picture_source_store.get_stream(picture_source_id)
@@ -191,6 +193,9 @@ class LiveStreamManager:
elif isinstance(stream_config, StaticImagePictureSource):
return self._create_static_image_live_stream(stream_config), None
elif isinstance(stream_config, VideoCaptureSource):
return self._create_video_live_stream(stream_config), None
else:
raise ValueError(f"Unknown picture source type: {type(stream_config)}")
@@ -259,6 +264,31 @@ class LiveStreamManager:
logger.warning(f"Skipping unknown filter '{fi.filter_id}': {e}")
return resolved
def _create_video_live_stream(self, config) -> VideoCaptureLiveStream:
"""Create a VideoCaptureLiveStream from a VideoCaptureSource config."""
stream = VideoCaptureLiveStream(
url=config.url,
loop=config.loop,
playback_speed=config.playback_speed,
start_time=config.start_time,
end_time=config.end_time,
resolution_limit=config.resolution_limit,
target_fps=config.target_fps,
)
# Attach sync clock if configured
if config.clock_id:
try:
from wled_controller.core.processing.processor_manager import ProcessorManager
manager = ProcessorManager.instance()
if manager and hasattr(manager, '_sync_clock_manager'):
clock = manager._sync_clock_manager.acquire(config.clock_id)
stream.set_clock(clock)
except Exception as e:
logger.warning(f"Could not attach clock {config.clock_id} to video stream: {e}")
return stream
def _create_static_image_live_stream(self, config) -> StaticImageLiveStream:
"""Create a StaticImageLiveStream from a StaticImagePictureSource config."""
image = self._load_static_image(config.image_source)

View File

@@ -0,0 +1,356 @@
"""Video file/URL live stream — decodes video frames via OpenCV.
Supports local files, HTTP URLs, RTSP streams, and YouTube URLs (via yt-dlp).
Optional sync clock integration for frame-accurate seeking.
"""
import re
import threading
import time
from typing import Optional
import cv2
import numpy as np
from wled_controller.core.capture_engines.base import ScreenCapture
from wled_controller.core.processing.live_stream import LiveStream
from wled_controller.utils import get_logger
logger = get_logger(__name__)
# YouTube URL patterns
_YT_PATTERNS = [
re.compile(r"(?:https?://)?(?:www\.)?youtube\.com/watch\?v=([a-zA-Z0-9_-]{11})"),
re.compile(r"(?:https?://)?youtu\.be/([a-zA-Z0-9_-]{11})"),
re.compile(r"(?:https?://)?(?:www\.)?youtube\.com/shorts/([a-zA-Z0-9_-]{11})"),
re.compile(r"youtube://([a-zA-Z0-9_-]{11})"),
]
def is_youtube_url(url: str) -> bool:
return any(p.search(url) for p in _YT_PATTERNS)
def resolve_youtube_url(url: str, resolution_limit: Optional[int] = None) -> str:
"""Resolve a YouTube URL to a direct stream URL using yt-dlp."""
try:
import yt_dlp
except ImportError:
raise RuntimeError("yt-dlp is required for YouTube support: pip install yt-dlp")
max_h = resolution_limit or 720
format_spec = f"bestvideo[height<={max_h}][ext=mp4]/best[height<={max_h}][ext=mp4]/best[height<={max_h}]/best"
ydl_opts = {
"format": format_spec,
"quiet": True,
"no_warnings": True,
"extract_flat": False,
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=False)
stream_url = info.get("url")
if not stream_url:
formats = info.get("requested_formats") or []
for f in formats:
if f.get("vcodec") != "none":
stream_url = f["url"]
break
if not stream_url:
raise RuntimeError(f"Could not extract video stream URL from: {url}")
logger.info(
f"Resolved YouTube URL: {info.get('title', '?')} "
f"({info.get('width', '?')}x{info.get('height', '?')})"
)
return stream_url
def extract_thumbnail(url: str, resolution_limit: Optional[int] = None) -> Optional[np.ndarray]:
"""Extract the first frame of a video as a thumbnail (RGB numpy array).
For YouTube URLs, resolves via yt-dlp first.
Returns None on failure.
"""
try:
actual_url = url
if is_youtube_url(url):
actual_url = resolve_youtube_url(url, resolution_limit)
cap = cv2.VideoCapture(actual_url)
if not cap.isOpened():
return None
ret, frame = cap.read()
cap.release()
if not ret or frame is None:
return None
# Convert BGR → RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Downscale if needed
if resolution_limit and frame.shape[1] > resolution_limit:
scale = resolution_limit / frame.shape[1]
new_w = resolution_limit
new_h = int(frame.shape[0] * scale)
frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
return frame
except Exception as e:
logger.warning(f"Failed to extract thumbnail from {url}: {e}")
return None
class VideoCaptureLiveStream(LiveStream):
"""Live stream that decodes video frames from a file, URL, or YouTube link.
A background thread decodes frames at the video's native FPS (or target FPS).
Supports loop, trim (start_time/end_time), playback speed, resolution limit,
and optional sync clock for frame-accurate seeking.
When a sync clock is attached:
- clock.get_time() determines the current playback position
- clock.speed overrides playback_speed
- clock pause/resume pauses/resumes playback
"""
def __init__(
self,
url: str,
loop: bool = True,
playback_speed: float = 1.0,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
resolution_limit: Optional[int] = None,
target_fps: int = 30,
):
self._original_url = url
self._resolved_url: Optional[str] = None
self._loop = loop
self._playback_speed = playback_speed
self._start_time = start_time or 0.0
self._end_time = end_time
self._resolution_limit = resolution_limit
self._target_fps = target_fps
self._cap: Optional[cv2.VideoCapture] = None
self._video_fps: float = 30.0
self._total_frames: int = 0
self._video_duration: float = 0.0
self._video_width: int = 0
self._video_height: int = 0
self._latest_frame: Optional[ScreenCapture] = None
self._frame_lock = threading.Lock()
self._running = False
self._thread: Optional[threading.Thread] = None
# Sync clock (set externally)
self._clock = None
@property
def target_fps(self) -> int:
return self._target_fps
@property
def display_index(self) -> Optional[int]:
return None # Not a screen capture
def set_clock(self, clock) -> None:
"""Attach a SyncClockRuntime for frame-accurate seek."""
self._clock = clock
def start(self) -> None:
if self._running:
return
# Resolve YouTube URL if needed
actual_url = self._original_url
if is_youtube_url(actual_url):
actual_url = resolve_youtube_url(actual_url, self._resolution_limit)
self._resolved_url = actual_url
# Open capture
self._cap = cv2.VideoCapture(actual_url)
if not self._cap.isOpened():
raise RuntimeError(f"Failed to open video: {self._original_url}")
self._video_fps = self._cap.get(cv2.CAP_PROP_FPS) or 30.0
self._total_frames = int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
self._video_width = int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self._video_height = int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if self._total_frames > 0 and self._video_fps > 0:
self._video_duration = self._total_frames / self._video_fps
else:
self._video_duration = 0.0 # Live stream or unknown
# Use video FPS as target if not overridden, capped at 60
if self._target_fps <= 0:
self._target_fps = min(int(self._video_fps), 60)
# Seek to start_time if set
if self._start_time > 0:
self._seek_to(self._start_time)
self._running = True
self._thread = threading.Thread(
target=self._decode_loop,
name="video-capture",
daemon=True,
)
self._thread.start()
logger.info(
f"VideoCaptureLiveStream started: {self._original_url} "
f"({self._video_width}x{self._video_height} @ {self._video_fps:.1f}fps, "
f"duration={self._video_duration:.1f}s)"
)
def stop(self) -> None:
if not self._running:
return
self._running = False
if self._thread:
self._thread.join(timeout=5.0)
self._thread = None
if self._cap:
self._cap.release()
self._cap = None
self._latest_frame = None
logger.info(f"VideoCaptureLiveStream stopped: {self._original_url}")
def get_latest_frame(self) -> Optional[ScreenCapture]:
with self._frame_lock:
return self._latest_frame
def _seek_to(self, time_sec: float) -> None:
"""Seek to a specific time in seconds."""
if self._cap and self._total_frames > 0:
self._cap.set(cv2.CAP_PROP_POS_MSEC, time_sec * 1000.0)
def _get_effective_end_time(self) -> float:
"""Get the effective end time (end_time or video duration)."""
if self._end_time is not None:
return self._end_time
if self._video_duration > 0:
return self._video_duration
return float("inf")
def _decode_loop(self) -> None:
"""Background thread: decode frames, apply speed/clock, handle loop."""
frame_time = 1.0 / self._target_fps if self._target_fps > 0 else 1.0 / 30
playback_start = time.perf_counter()
last_seek_time = -1.0
buf: Optional[np.ndarray] = None
consecutive_errors = 0
try:
while self._running:
loop_start = time.perf_counter()
# Determine current playback position
if self._clock is not None:
clock_time = self._clock.get_time()
current_time = self._start_time + clock_time
else:
wall_elapsed = time.perf_counter() - playback_start
current_time = self._start_time + wall_elapsed * self._playback_speed
end_time = self._get_effective_end_time()
# Handle end of range
if current_time >= end_time:
if self._loop:
# Reset
if self._clock is not None:
# Can't control clock, just wrap
current_time = self._start_time + (
(current_time - self._start_time)
% max(end_time - self._start_time, 0.001)
)
else:
playback_start = time.perf_counter()
current_time = self._start_time
self._seek_to(current_time)
last_seek_time = -1.0
else:
# End — hold last frame
time.sleep(frame_time)
continue
# Clock-based seeking: seek when clock position changes significantly
if self._clock is not None and self._total_frames > 0:
# Only seek if position jumped more than 2 frames
threshold = 2.0 / self._video_fps
if abs(current_time - last_seek_time) > threshold or last_seek_time < 0:
self._seek_to(current_time)
last_seek_time = current_time
# Decode next frame
try:
ret, frame = self._cap.read()
if not ret or frame is None:
if self._loop and self._total_frames > 0:
self._seek_to(self._start_time)
if self._clock is None:
playback_start = time.perf_counter()
last_seek_time = -1.0
continue
else:
time.sleep(frame_time)
continue
consecutive_errors = 0
# BGR → RGB
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, dst=frame)
# Downscale if resolution limit set
if self._resolution_limit and frame.shape[1] > self._resolution_limit:
scale = self._resolution_limit / frame.shape[1]
new_w = self._resolution_limit
new_h = int(frame.shape[0] * scale)
frame = cv2.resize(
frame, (new_w, new_h), interpolation=cv2.INTER_AREA
)
h, w = frame.shape[:2]
# Reuse buffer if shape matches
if buf is None or buf.shape != frame.shape:
buf = frame
else:
np.copyto(buf, frame)
sc = ScreenCapture(
image=buf, width=w, height=h, display_index=-1
)
with self._frame_lock:
self._latest_frame = sc
except Exception as e:
consecutive_errors += 1
logger.error(f"Video decode error: {e}")
if consecutive_errors > 10:
backoff = min(1.0, 0.1 * (consecutive_errors - 10))
time.sleep(backoff)
continue
# Throttle to target FPS
elapsed = time.perf_counter() - loop_start
remaining = frame_time - elapsed
if remaining > 0:
time.sleep(remaining)
except Exception as e:
logger.error(f"Fatal video decode loop error: {e}", exc_info=True)
finally:
self._running = False

View File

@@ -50,7 +50,7 @@ const SUBTYPE_ICONS = {
api_input: P.send, notification: P.bellRing, daylight: P.sun, candlelight: P.flame,
processed: P.sparkles,
},
picture_source: { raw: P.monitor, processed: P.palette, static_image: P.image },
picture_source: { raw: P.monitor, processed: P.palette, static_image: P.image, video: P.film },
value_source: {
static: P.layoutDashboard, animated: P.refreshCw, audio: P.music,
adaptive_time: P.clock, adaptive_scene: P.cloudSun, daylight: P.sun,

View File

@@ -15,7 +15,7 @@ const _svg = (d) => `<svg class="icon" viewBox="0 0 24 24">${d}</svg>`;
// ── Type-resolution maps (private) ──────────────────────────
const _targetTypeIcons = { led: _svg(P.lightbulb), wled: _svg(P.lightbulb), key_colors: _svg(P.palette) };
const _pictureSourceTypeIcons = { raw: _svg(P.monitor), processed: _svg(P.palette), static_image: _svg(P.image) };
const _pictureSourceTypeIcons = { raw: _svg(P.monitor), processed: _svg(P.palette), static_image: _svg(P.image), video: _svg(P.film) };
const _colorStripTypeIcons = {
picture_advanced: _svg(P.monitor),
static: _svg(P.palette), color_cycle: _svg(P.refreshCw), gradient: _svg(P.rainbow),

View File

@@ -77,6 +77,7 @@ const csProcTemplates = new CardSection('proc-templates', { titleKey: 'postproce
const csAudioMulti = new CardSection('audio-multi', { titleKey: 'audio_source.group.multichannel', gridClass: 'templates-grid', addCardOnclick: "showAudioSourceModal('multichannel')", keyAttr: 'data-id' });
const csAudioMono = new CardSection('audio-mono', { titleKey: 'audio_source.group.mono', gridClass: 'templates-grid', addCardOnclick: "showAudioSourceModal('mono')", keyAttr: 'data-id' });
const csStaticStreams = new CardSection('static-streams', { titleKey: 'streams.group.static_image', gridClass: 'templates-grid', addCardOnclick: "showAddStreamModal('static_image')", keyAttr: 'data-stream-id' });
const csVideoStreams = new CardSection('video-streams', { titleKey: 'streams.group.video', gridClass: 'templates-grid', addCardOnclick: "showAddStreamModal('video')", keyAttr: 'data-stream-id' });
const csAudioTemplates = new CardSection('audio-templates', { titleKey: 'audio_template.title', gridClass: 'templates-grid', addCardOnclick: "showAddAudioTemplateModal()", keyAttr: 'data-audio-template-id' });
const csColorStrips = new CardSection('color-strips', { titleKey: 'targets.section.color_strips', gridClass: 'templates-grid', addCardOnclick: "showCSSEditor()", keyAttr: 'data-css-id' });
const csValueSources = new CardSection('value-sources', { titleKey: 'value_source.group.title', gridClass: 'templates-grid', addCardOnclick: "showValueSourceModal()", keyAttr: 'data-id' });
@@ -1250,6 +1251,7 @@ const _streamSectionMap = {
raw: [csRawStreams],
raw_templates: [csRawTemplates],
static_image: [csStaticStreams],
video: [csVideoStreams],
processed: [csProcStreams],
proc_templates: [csProcTemplates],
css_processing: [csCSPTemplates],
@@ -1307,6 +1309,15 @@ function renderPictureSourcesList(streams) {
detailsHtml = `<div class="stream-card-props">
<span class="stream-card-prop stream-card-prop-full" title="${escapeHtml(src)}">${ICON_WEB} ${escapeHtml(src)}</span>
</div>`;
} else if (stream.stream_type === 'video') {
const url = stream.url || '';
const shortUrl = url.length > 40 ? url.slice(0, 37) + '...' : url;
detailsHtml = `<div class="stream-card-props">
<span class="stream-card-prop stream-card-prop-full" title="${escapeHtml(url)}">${ICON_WEB} ${escapeHtml(shortUrl)}</span>
<span class="stream-card-prop" title="${t('streams.target_fps')}">${ICON_FPS} ${stream.target_fps ?? 30}</span>
${stream.loop !== false ? `<span class="stream-card-prop">↻</span>` : ''}
${stream.playback_speed && stream.playback_speed !== 1.0 ? `<span class="stream-card-prop">${stream.playback_speed}×</span>` : ''}
</div>`;
}
return wrapCard({
@@ -1427,6 +1438,7 @@ function renderPictureSourcesList(streams) {
const rawStreams = streams.filter(s => s.stream_type === 'raw');
const processedStreams = streams.filter(s => s.stream_type === 'processed');
const staticImageStreams = streams.filter(s => s.stream_type === 'static_image');
const videoStreams = streams.filter(s => s.stream_type === 'video');
const multichannelSources = _cachedAudioSources.filter(s => s.source_type === 'multichannel');
const monoSources = _cachedAudioSources.filter(s => s.source_type === 'mono');
@@ -1445,6 +1457,7 @@ function renderPictureSourcesList(streams) {
{ key: 'raw', icon: getPictureSourceIcon('raw'), titleKey: 'streams.group.raw', count: rawStreams.length },
{ key: 'raw_templates', icon: ICON_CAPTURE_TEMPLATE, titleKey: 'streams.group.raw_templates', count: _cachedCaptureTemplates.length },
{ key: 'static_image', icon: getPictureSourceIcon('static_image'), titleKey: 'streams.group.static_image', count: staticImageStreams.length },
{ key: 'video', icon: getPictureSourceIcon('video'), titleKey: 'streams.group.video', count: videoStreams.length },
{ key: 'processed', icon: getPictureSourceIcon('processed'), titleKey: 'streams.group.processed', count: processedStreams.length },
{ key: 'proc_templates', icon: ICON_PP_TEMPLATE, titleKey: 'streams.group.proc_templates', count: _cachedPPTemplates.length },
{ key: 'css_processing', icon: ICON_CSPT, titleKey: 'streams.group.css_processing', count: csptTemplates.length },
@@ -1467,6 +1480,10 @@ function renderPictureSourcesList(streams) {
key: 'static_image', icon: getPictureSourceIcon('static_image'), titleKey: 'streams.group.static_image',
count: staticImageStreams.length,
},
{
key: 'video', icon: getPictureSourceIcon('video'), titleKey: 'streams.group.video',
count: videoStreams.length,
},
{
key: 'processing_group', icon: getPictureSourceIcon('processed'), titleKey: 'tree.group.processing',
children: [
@@ -1590,6 +1607,7 @@ function renderPictureSourcesList(streams) {
const monoItems = csAudioMono.applySortOrder(monoSources.map(s => ({ key: s.id, html: renderAudioSourceCard(s) })));
const audioTemplateItems = csAudioTemplates.applySortOrder(_cachedAudioTemplates.map(t => ({ key: t.id, html: renderAudioTemplateCard(t) })));
const staticItems = csStaticStreams.applySortOrder(staticImageStreams.map(s => ({ key: s.id, html: renderStreamCard(s) })));
const videoItems = csVideoStreams.applySortOrder(videoStreams.map(s => ({ key: s.id, html: renderStreamCard(s) })));
const colorStripItems = csColorStrips.applySortOrder(colorStrips.map(s => ({ key: s.id, html: createColorStripCard(s, pictureSourceMap, audioSourceMap) })));
const valueItems = csValueSources.applySortOrder(_cachedValueSources.map(s => ({ key: s.id, html: createValueSourceCard(s) })));
const syncClockItems = csSyncClocks.applySortOrder(_cachedSyncClocks.map(s => ({ key: s.id, html: createSyncClockCard(s) })));
@@ -1601,6 +1619,7 @@ function renderPictureSourcesList(streams) {
raw: rawStreams.length,
raw_templates: _cachedCaptureTemplates.length,
static_image: staticImageStreams.length,
video: videoStreams.length,
processed: processedStreams.length,
proc_templates: _cachedPPTemplates.length,
css_processing: csptTemplates.length,
@@ -1619,6 +1638,7 @@ function renderPictureSourcesList(streams) {
csAudioMono.reconcile(monoItems);
csAudioTemplates.reconcile(audioTemplateItems);
csStaticStreams.reconcile(staticItems);
csVideoStreams.reconcile(videoItems);
csValueSources.reconcile(valueItems);
csSyncClocks.reconcile(syncClockItems);
} else {
@@ -1634,12 +1654,13 @@ function renderPictureSourcesList(streams) {
else if (tab.key === 'audio') panelContent = csAudioMulti.render(multiItems) + csAudioMono.render(monoItems) + csAudioTemplates.render(audioTemplateItems);
else if (tab.key === 'value') panelContent = csValueSources.render(valueItems);
else if (tab.key === 'sync') panelContent = csSyncClocks.render(syncClockItems);
else if (tab.key === 'video') panelContent = csVideoStreams.render(videoItems);
else panelContent = csStaticStreams.render(staticItems);
return `<div class="stream-tab-panel${tab.key === activeTab ? ' active' : ''}" id="stream-tab-${tab.key}">${panelContent}</div>`;
}).join('');
container.innerHTML = panels;
CardSection.bindAll([csRawStreams, csRawTemplates, csProcStreams, csProcTemplates, csCSPTemplates, csColorStrips, csAudioMulti, csAudioMono, csAudioTemplates, csStaticStreams, csValueSources, csSyncClocks]);
CardSection.bindAll([csRawStreams, csRawTemplates, csProcStreams, csProcTemplates, csCSPTemplates, csColorStrips, csAudioMulti, csAudioMono, csAudioTemplates, csStaticStreams, csVideoStreams, csValueSources, csSyncClocks]);
// Render tree sidebar with expand/collapse buttons
_streamsTree.setExtraHtml(`<button class="btn-expand-collapse" onclick="expandAllStreamSections()" data-i18n-title="section.expand_all" title="${t('section.expand_all')}">⊞</button><button class="btn-expand-collapse" onclick="collapseAllStreamSections()" data-i18n-title="section.collapse_all" title="${t('section.collapse_all')}">⊟</button><button class="tutorial-trigger-btn" onclick="startSourcesTutorial()" data-i18n-title="tour.restart" title="${t('tour.restart')}">${ICON_HELP}</button>`);
@@ -1647,6 +1668,7 @@ function renderPictureSourcesList(streams) {
_streamsTree.observeSections('streams-list', {
'raw-streams': 'raw', 'raw-templates': 'raw_templates',
'static-streams': 'static_image',
'video-streams': 'video',
'proc-streams': 'processed', 'proc-templates': 'proc_templates',
'css-proc-templates': 'css_processing',
'color-strips': 'color_strip',
@@ -1662,6 +1684,7 @@ export function onStreamTypeChange() {
document.getElementById('stream-raw-fields').style.display = streamType === 'raw' ? '' : 'none';
document.getElementById('stream-processed-fields').style.display = streamType === 'processed' ? '' : 'none';
document.getElementById('stream-static-image-fields').style.display = streamType === 'static_image' ? '' : 'none';
document.getElementById('stream-video-fields').style.display = streamType === 'video' ? '' : 'none';
}
export function onStreamDisplaySelected(displayIndex, display) {
@@ -1705,7 +1728,7 @@ function _autoGenerateStreamName() {
export async function showAddStreamModal(presetType, cloneData = null) {
const streamType = (cloneData && cloneData.stream_type) || presetType || 'raw';
const titleKeys = { raw: 'streams.add.raw', processed: 'streams.add.processed', static_image: 'streams.add.static_image' };
const titleKeys = { raw: 'streams.add.raw', processed: 'streams.add.processed', static_image: 'streams.add.static_image', video: 'streams.add.video' };
document.getElementById('stream-modal-title').innerHTML = `${getPictureSourceIcon(streamType)} ${t(titleKeys[streamType] || 'streams.add')}`;
document.getElementById('stream-form').reset();
document.getElementById('stream-id').value = '';
@@ -1754,6 +1777,16 @@ export async function showAddStreamModal(presetType, cloneData = null) {
} else if (streamType === 'static_image') {
document.getElementById('stream-image-source').value = cloneData.image_source || '';
if (cloneData.image_source) validateStaticImage();
} else if (streamType === 'video') {
document.getElementById('stream-video-url').value = cloneData.url || '';
document.getElementById('stream-video-loop').checked = cloneData.loop !== false;
document.getElementById('stream-video-speed').value = cloneData.playback_speed || 1.0;
const cloneSpeedLabel = document.getElementById('stream-video-speed-value');
if (cloneSpeedLabel) cloneSpeedLabel.textContent = cloneData.playback_speed || 1.0;
document.getElementById('stream-video-fps').value = cloneData.target_fps || 30;
document.getElementById('stream-video-start').value = cloneData.start_time || '';
document.getElementById('stream-video-end').value = cloneData.end_time || '';
document.getElementById('stream-video-resolution').value = cloneData.resolution_limit || '';
}
}
@@ -1780,7 +1813,7 @@ export async function editStream(streamId) {
if (!response.ok) throw new Error(`Failed to load stream: ${response.status}`);
const stream = await response.json();
const editTitleKeys = { raw: 'streams.edit.raw', processed: 'streams.edit.processed', static_image: 'streams.edit.static_image' };
const editTitleKeys = { raw: 'streams.edit.raw', processed: 'streams.edit.processed', static_image: 'streams.edit.static_image', video: 'streams.edit.video' };
document.getElementById('stream-modal-title').innerHTML = `${getPictureSourceIcon(stream.stream_type)} ${t(editTitleKeys[stream.stream_type] || 'streams.edit')}`;
document.getElementById('stream-id').value = streamId;
document.getElementById('stream-name').value = stream.name;
@@ -1814,6 +1847,16 @@ export async function editStream(streamId) {
} else if (stream.stream_type === 'static_image') {
document.getElementById('stream-image-source').value = stream.image_source || '';
if (stream.image_source) validateStaticImage();
} else if (stream.stream_type === 'video') {
document.getElementById('stream-video-url').value = stream.url || '';
document.getElementById('stream-video-loop').checked = stream.loop !== false;
document.getElementById('stream-video-speed').value = stream.playback_speed || 1.0;
const speedLabel = document.getElementById('stream-video-speed-value');
if (speedLabel) speedLabel.textContent = stream.playback_speed || 1.0;
document.getElementById('stream-video-fps').value = stream.target_fps || 30;
document.getElementById('stream-video-start').value = stream.start_time || '';
document.getElementById('stream-video-end').value = stream.end_time || '';
document.getElementById('stream-video-resolution').value = stream.resolution_limit || '';
}
_showStreamModalLoading(false);
@@ -1993,6 +2036,19 @@ export async function saveStream() {
const imageSource = document.getElementById('stream-image-source').value.trim();
if (!imageSource) { showToast(t('streams.error.required'), 'error'); return; }
payload.image_source = imageSource;
} else if (streamType === 'video') {
const url = document.getElementById('stream-video-url').value.trim();
if (!url) { showToast(t('streams.error.required'), 'error'); return; }
payload.url = url;
payload.loop = document.getElementById('stream-video-loop').checked;
payload.playback_speed = parseFloat(document.getElementById('stream-video-speed').value) || 1.0;
payload.target_fps = parseInt(document.getElementById('stream-video-fps').value) || 30;
const startTime = parseFloat(document.getElementById('stream-video-start').value);
if (!isNaN(startTime) && startTime > 0) payload.start_time = startTime;
const endTime = parseFloat(document.getElementById('stream-video-end').value);
if (!isNaN(endTime) && endTime > 0) payload.end_time = endTime;
const resLimit = parseInt(document.getElementById('stream-video-resolution').value);
if (!isNaN(resLimit) && resLimit > 0) payload.resolution_limit = resLimit;
}
try {

View File

@@ -553,6 +553,20 @@
"streams.add.static_image": "Add Static Image Source",
"streams.edit.static_image": "Edit Static Image Source",
"streams.type.static_image": "Static Image",
"streams.group.video": "Video",
"streams.add.video": "Add Video Source",
"streams.edit.video": "Edit Video Source",
"picture_source.type.video": "Video",
"picture_source.type.video.desc": "Stream frames from video file, URL, or YouTube",
"picture_source.video.url": "Video URL:",
"picture_source.video.url.hint": "Local file path, HTTP URL, or YouTube URL",
"picture_source.video.url.placeholder": "https://example.com/video.mp4",
"picture_source.video.loop": "Loop:",
"picture_source.video.speed": "Playback Speed:",
"picture_source.video.start_time": "Start Time (s):",
"picture_source.video.end_time": "End Time (s):",
"picture_source.video.resolution_limit": "Max Width (px):",
"picture_source.video.resolution_limit.hint": "Downscale video at decode time for performance",
"streams.image_source": "Image Source:",
"streams.image_source.placeholder": "https://example.com/image.jpg or C:\\path\\to\\image.png",
"streams.image_source.hint": "Enter a URL (http/https) or local file path to an image",

View File

@@ -553,6 +553,20 @@
"streams.add.static_image": "Добавить статическое изображение (источник)",
"streams.edit.static_image": "Редактировать статическое изображение (источник)",
"streams.type.static_image": "Статическое изображение",
"streams.group.video": "Видео",
"streams.add.video": "Добавить видеоисточник",
"streams.edit.video": "Редактировать видеоисточник",
"picture_source.type.video": "Видео",
"picture_source.type.video.desc": "Потоковые кадры из видеофайла, URL или YouTube",
"picture_source.video.url": "URL видео:",
"picture_source.video.url.hint": "Локальный файл, HTTP URL или YouTube URL",
"picture_source.video.url.placeholder": "https://example.com/video.mp4",
"picture_source.video.loop": "Зацикливание:",
"picture_source.video.speed": "Скорость воспроизведения:",
"picture_source.video.start_time": "Время начала (с):",
"picture_source.video.end_time": "Время окончания (с):",
"picture_source.video.resolution_limit": "Макс. ширина (px):",
"picture_source.video.resolution_limit.hint": "Уменьшение видео при декодировании для производительности",
"streams.image_source": "Источник изображения:",
"streams.image_source.placeholder": "https://example.com/image.jpg или C:\\path\\to\\image.png",
"streams.image_source.hint": "Введите URL (http/https) или локальный путь к изображению",

View File

@@ -553,6 +553,20 @@
"streams.add.static_image": "添加静态图片源",
"streams.edit.static_image": "编辑静态图片源",
"streams.type.static_image": "静态图片",
"streams.group.video": "视频",
"streams.add.video": "添加视频源",
"streams.edit.video": "编辑视频源",
"picture_source.type.video": "视频",
"picture_source.type.video.desc": "从视频文件、URL或YouTube流式传输帧",
"picture_source.video.url": "视频URL",
"picture_source.video.url.hint": "本地文件路径、HTTP URL或YouTube URL",
"picture_source.video.url.placeholder": "https://example.com/video.mp4",
"picture_source.video.loop": "循环:",
"picture_source.video.speed": "播放速度:",
"picture_source.video.start_time": "开始时间(秒):",
"picture_source.video.end_time": "结束时间(秒):",
"picture_source.video.resolution_limit": "最大宽度(像素):",
"picture_source.video.resolution_limit.hint": "解码时缩小视频以提高性能",
"streams.image_source": "图片源:",
"streams.image_source.placeholder": "https://example.com/image.jpg 或 C:\\path\\to\\image.png",
"streams.image_source.hint": "输入图片的 URLhttp/https或本地文件路径",

View File

@@ -13,11 +13,12 @@ class PictureSource:
- "raw": captures from a display using a capture engine template at a target FPS
- "processed": applies postprocessing to another picture source
- "static_image": returns a static frame from a URL or local file path
- "video": decodes frames from a video file, URL, or YouTube link
"""
id: str
name: str
stream_type: str # "raw", "processed", or "static_image"
stream_type: str # "raw", "processed", "static_image", or "video"
created_at: datetime
updated_at: datetime
description: Optional[str] = None
@@ -40,6 +41,14 @@ class PictureSource:
"source_stream_id": None,
"postprocessing_template_id": None,
"image_source": None,
# Video fields
"url": None,
"loop": None,
"playback_speed": None,
"start_time": None,
"end_time": None,
"resolution_limit": None,
"clock_id": None,
}
@staticmethod
@@ -79,6 +88,19 @@ class PictureSource:
created_at=created_at, updated_at=updated_at, description=description, tags=tags,
image_source=data.get("image_source") or "",
)
elif stream_type == "video":
return VideoCaptureSource(
id=sid, name=name, stream_type=stream_type,
created_at=created_at, updated_at=updated_at, description=description, tags=tags,
url=data.get("url") or "",
loop=data.get("loop", True),
playback_speed=data.get("playback_speed", 1.0),
start_time=data.get("start_time"),
end_time=data.get("end_time"),
resolution_limit=data.get("resolution_limit"),
clock_id=data.get("clock_id"),
target_fps=data.get("target_fps") or 30,
)
else:
return ScreenCapturePictureSource(
id=sid, name=name, stream_type=stream_type,
@@ -129,3 +151,29 @@ class StaticImagePictureSource(PictureSource):
d = super().to_dict()
d["image_source"] = self.image_source
return d
@dataclass
class VideoCaptureSource(PictureSource):
"""A video stream from a file, HTTP URL, or YouTube link."""
url: str = ""
loop: bool = True
playback_speed: float = 1.0
start_time: Optional[float] = None
end_time: Optional[float] = None
resolution_limit: Optional[int] = None
clock_id: Optional[str] = None
target_fps: int = 30
def to_dict(self) -> dict:
d = super().to_dict()
d["url"] = self.url
d["loop"] = self.loop
d["playback_speed"] = self.playback_speed
d["start_time"] = self.start_time
d["end_time"] = self.end_time
d["resolution_limit"] = self.resolution_limit
d["clock_id"] = self.clock_id
d["target_fps"] = self.target_fps
return d

View File

@@ -11,6 +11,7 @@ from wled_controller.storage.picture_source import (
ProcessedPictureSource,
ScreenCapturePictureSource,
StaticImagePictureSource,
VideoCaptureSource,
)
from wled_controller.utils import get_logger
@@ -83,24 +84,21 @@ class PictureSourceStore(BaseJsonStore[PictureSource]):
image_source: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
# Video fields
url: Optional[str] = None,
loop: bool = True,
playback_speed: float = 1.0,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
resolution_limit: Optional[int] = None,
clock_id: Optional[str] = None,
) -> PictureSource:
"""Create a new picture source.
Args:
name: Stream name
stream_type: "raw", "processed", or "static_image"
display_index: Display index (raw streams)
capture_template_id: Capture template ID (raw streams)
target_fps: Target FPS (raw streams)
source_stream_id: Source stream ID (processed streams)
postprocessing_template_id: Postprocessing template ID (processed streams)
image_source: URL or file path (static_image streams)
description: Optional description
Raises:
ValueError: If validation fails or cycle detected
"""
if stream_type not in ("raw", "processed", "static_image"):
if stream_type not in ("raw", "processed", "static_image", "video"):
raise ValueError(f"Invalid stream type: {stream_type}")
if stream_type == "raw":
@@ -124,6 +122,9 @@ class PictureSourceStore(BaseJsonStore[PictureSource]):
elif stream_type == "static_image":
if not image_source:
raise ValueError("Static image streams require image_source")
elif stream_type == "video":
if not url:
raise ValueError("Video streams require url")
# Check for duplicate name
self._check_name_unique(name)
@@ -151,6 +152,18 @@ class PictureSourceStore(BaseJsonStore[PictureSource]):
source_stream_id=source_stream_id, # type: ignore[arg-type]
postprocessing_template_id=postprocessing_template_id, # type: ignore[arg-type]
)
elif stream_type == "video":
stream = VideoCaptureSource(
**common,
url=url, # type: ignore[arg-type]
loop=loop,
playback_speed=playback_speed,
start_time=start_time,
end_time=end_time,
resolution_limit=resolution_limit,
clock_id=clock_id,
target_fps=target_fps or 30,
)
else:
stream = StaticImagePictureSource(
**common,
@@ -175,6 +188,14 @@ class PictureSourceStore(BaseJsonStore[PictureSource]):
image_source: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
# Video fields
url: Optional[str] = None,
loop: Optional[bool] = None,
playback_speed: Optional[float] = None,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
resolution_limit: Optional[int] = None,
clock_id: Optional[str] = None,
) -> PictureSource:
"""Update an existing picture source.
@@ -214,6 +235,23 @@ class PictureSourceStore(BaseJsonStore[PictureSource]):
elif isinstance(stream, StaticImagePictureSource):
if image_source is not None:
stream.image_source = image_source
elif isinstance(stream, VideoCaptureSource):
if url is not None:
stream.url = url
if loop is not None:
stream.loop = loop
if playback_speed is not None:
stream.playback_speed = playback_speed
if start_time is not None:
stream.start_time = start_time if start_time > 0 else None
if end_time is not None:
stream.end_time = end_time if end_time > 0 else None
if resolution_limit is not None:
stream.resolution_limit = resolution_limit if resolution_limit > 0 else None
if clock_id is not None:
stream.clock_id = resolve_ref(clock_id, stream.clock_id)
if target_fps is not None:
stream.target_fps = target_fps
stream.updated_at = datetime.now(timezone.utc)

View File

@@ -92,6 +92,52 @@
<div id="stream-image-validation-status" class="validation-status" style="display: none;"></div>
</div>
<div id="stream-video-fields" style="display: none;">
<div class="form-group">
<div class="label-row">
<label for="stream-video-url" data-i18n="picture_source.video.url">Video URL:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="picture_source.video.url.hint">Local file path, HTTP URL, or YouTube URL</small>
<input type="text" id="stream-video-url" data-i18n-placeholder="picture_source.video.url.placeholder" placeholder="https://example.com/video.mp4">
</div>
<div class="form-group settings-toggle-group">
<label data-i18n="picture_source.video.loop">Loop:</label>
<label class="settings-toggle">
<input type="checkbox" id="stream-video-loop" checked>
<span class="settings-toggle-slider"></span>
</label>
</div>
<div class="form-row">
<div class="form-group" style="flex:1">
<label for="stream-video-speed" data-i18n="picture_source.video.speed">Playback Speed: <span id="stream-video-speed-value">1.0</span>×</label>
<input type="range" id="stream-video-speed" min="0.1" max="5" step="0.1" value="1.0" oninput="document.getElementById('stream-video-speed-value').textContent=this.value">
</div>
<div class="form-group" style="flex:1">
<label for="stream-video-fps" data-i18n="streams.target_fps">Target FPS:</label>
<input type="number" id="stream-video-fps" min="1" max="60" step="1" value="30">
</div>
</div>
<div class="form-row">
<div class="form-group" style="flex:1">
<label for="stream-video-start" data-i18n="picture_source.video.start_time">Start Time (s):</label>
<input type="number" id="stream-video-start" min="0" step="0.1">
</div>
<div class="form-group" style="flex:1">
<label for="stream-video-end" data-i18n="picture_source.video.end_time">End Time (s):</label>
<input type="number" id="stream-video-end" min="0" step="0.1">
</div>
</div>
<div class="form-group">
<div class="label-row">
<label for="stream-video-resolution" data-i18n="picture_source.video.resolution_limit">Max Width (px):</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button>
</div>
<small class="input-hint" style="display:none" data-i18n="picture_source.video.resolution_limit.hint">Downscale video at decode time for performance</small>
<input type="number" id="stream-video-resolution" min="64" max="7680" step="1" placeholder="720">
</div>
</div>
<div class="form-group">
<label for="stream-description" data-i18n="streams.description_label">Description (optional):</label>
<input type="text" id="stream-description" data-i18n-placeholder="streams.description_placeholder" placeholder="Describe this source...">