Add static image picture stream type with auto-validating UI

Introduces a new "static_image" stream type that loads a frame from a URL
or local file path, enabling LED testing with known images or displaying
static content. Includes validate-image API endpoint, auto-validation on
blur/enter/paste with caching, capture template names on stream cards,
and conditional test stats display for single-frame results.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-11 19:57:43 +03:00
parent 4f9c30ef06
commit e0877a9b16
10 changed files with 566 additions and 181 deletions

View File

@@ -54,6 +54,8 @@ from wled_controller.api.schemas import (
PictureStreamListResponse, PictureStreamListResponse,
PictureStreamTestRequest, PictureStreamTestRequest,
PPTemplateTestRequest, PPTemplateTestRequest,
ImageValidateRequest,
ImageValidateResponse,
) )
from wled_controller.config import get_config from wled_controller.config import get_config
from wled_controller.core.processor_manager import ProcessorManager, ProcessingSettings from wled_controller.core.processor_manager import ProcessorManager, ProcessingSettings
@@ -1284,7 +1286,29 @@ async def test_pp_template(
raw_stream = chain["raw_stream"] raw_stream = chain["raw_stream"]
# Get capture template from raw stream if raw_stream.stream_type == "static_image":
# Static image: load directly
from pathlib import Path
source = raw_stream.image_source
start_time = time.perf_counter()
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
resp = await client.get(source)
resp.raise_for_status()
pil_image = Image.open(io.BytesIO(resp.content)).convert("RGB")
else:
path = Path(source)
if not path.exists():
raise HTTPException(status_code=400, detail=f"Image file not found: {source}")
pil_image = Image.open(path).convert("RGB")
actual_duration = time.perf_counter() - start_time
frame_count = 1
total_capture_time = actual_duration
else:
# Raw capture stream: use engine
try: try:
capture_template = template_store.get_template(raw_stream.capture_template_id) capture_template = template_store.get_template(raw_stream.capture_template_id)
except ValueError: except ValueError:
@@ -1295,14 +1319,12 @@ async def test_pp_template(
display_index = raw_stream.display_index display_index = raw_stream.display_index
# Validate engine
if capture_template.engine_type not in EngineRegistry.get_available_engines(): if capture_template.engine_type not in EngineRegistry.get_available_engines():
raise HTTPException( raise HTTPException(
status_code=400, status_code=400,
detail=f"Engine '{capture_template.engine_type}' is not available on this system", detail=f"Engine '{capture_template.engine_type}' is not available on this system",
) )
# Check display lock
locked_device_id = processor_manager.get_display_lock_info(display_index) locked_device_id = processor_manager.get_display_lock_info(display_index)
if locked_device_id: if locked_device_id:
try: try:
@@ -1316,7 +1338,6 @@ async def test_pp_template(
f"Please stop the device processing before testing.", f"Please stop the device processing before testing.",
) )
# Create engine and run test
engine = EngineRegistry.create_engine(capture_template.engine_type, capture_template.engine_config) engine = EngineRegistry.create_engine(capture_template.engine_type, capture_template.engine_config)
logger.info(f"Starting {test_request.capture_duration}s PP template test for {template_id} using stream {test_request.source_stream_id}") logger.info(f"Starting {test_request.capture_duration}s PP template test for {template_id} using stream {test_request.source_stream_id}")
@@ -1342,7 +1363,6 @@ async def test_pp_template(
if last_frame is None: if last_frame is None:
raise RuntimeError("No frames captured during test") raise RuntimeError("No frames captured during test")
# Convert to PIL Image
if isinstance(last_frame.image, np.ndarray): if isinstance(last_frame.image, np.ndarray):
pil_image = Image.fromarray(last_frame.image) pil_image = Image.fromarray(last_frame.image)
else: else:
@@ -1435,6 +1455,7 @@ def _stream_to_response(s) -> PictureStreamResponse:
target_fps=s.target_fps, target_fps=s.target_fps,
source_stream_id=s.source_stream_id, source_stream_id=s.source_stream_id,
postprocessing_template_id=s.postprocessing_template_id, postprocessing_template_id=s.postprocessing_template_id,
image_source=s.image_source,
created_at=s.created_at, created_at=s.created_at,
updated_at=s.updated_at, updated_at=s.updated_at,
description=s.description, description=s.description,
@@ -1456,6 +1477,53 @@ async def list_picture_streams(
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/picture-streams/validate-image", response_model=ImageValidateResponse, tags=["Picture Streams"])
async def validate_image(
data: ImageValidateRequest,
_auth: AuthRequired,
):
"""Validate an image source (URL or file path) and return a preview thumbnail."""
try:
from pathlib import Path
source = data.image_source.strip()
if not source:
return ImageValidateResponse(valid=False, error="Image source is empty")
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
response = await client.get(source)
response.raise_for_status()
pil_image = Image.open(io.BytesIO(response.content))
else:
path = Path(source)
if not path.exists():
return ImageValidateResponse(valid=False, error=f"File not found: {source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
width, height = pil_image.size
# Create thumbnail preview (max 320px wide)
thumb = pil_image.copy()
thumb.thumbnail((320, 320), Image.Resampling.LANCZOS)
buf = io.BytesIO()
thumb.save(buf, format="JPEG", quality=80)
buf.seek(0)
preview = f"data:image/jpeg;base64,{base64.b64encode(buf.getvalue()).decode()}"
return ImageValidateResponse(
valid=True, width=width, height=height, preview=preview
)
except httpx.HTTPStatusError as e:
return ImageValidateResponse(valid=False, error=f"HTTP {e.response.status_code}: {e.response.reason_phrase}")
except httpx.RequestError as e:
return ImageValidateResponse(valid=False, error=f"Request failed: {e}")
except Exception as e:
return ImageValidateResponse(valid=False, error=str(e))
@router.post("/api/v1/picture-streams", response_model=PictureStreamResponse, tags=["Picture Streams"], status_code=201) @router.post("/api/v1/picture-streams", response_model=PictureStreamResponse, tags=["Picture Streams"], status_code=201)
async def create_picture_stream( async def create_picture_stream(
data: PictureStreamCreate, data: PictureStreamCreate,
@@ -1493,6 +1561,7 @@ async def create_picture_stream(
target_fps=data.target_fps, target_fps=data.target_fps,
source_stream_id=data.source_stream_id, source_stream_id=data.source_stream_id,
postprocessing_template_id=data.postprocessing_template_id, postprocessing_template_id=data.postprocessing_template_id,
image_source=data.image_source,
description=data.description, description=data.description,
) )
return _stream_to_response(stream) return _stream_to_response(stream)
@@ -1536,6 +1605,7 @@ async def update_picture_stream(
target_fps=data.target_fps, target_fps=data.target_fps,
source_stream_id=data.source_stream_id, source_stream_id=data.source_stream_id,
postprocessing_template_id=data.postprocessing_template_id, postprocessing_template_id=data.postprocessing_template_id,
image_source=data.image_source,
description=data.description, description=data.description,
) )
return _stream_to_response(stream) return _stream_to_response(stream)
@@ -1600,7 +1670,30 @@ async def test_picture_stream(
raw_stream = chain["raw_stream"] raw_stream = chain["raw_stream"]
# Get capture template from raw stream if raw_stream.stream_type == "static_image":
# Static image stream: load image directly, no engine needed
from pathlib import Path
source = raw_stream.image_source
start_time = time.perf_counter()
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
resp = await client.get(source)
resp.raise_for_status()
pil_image = Image.open(io.BytesIO(resp.content)).convert("RGB")
else:
path = Path(source)
if not path.exists():
raise HTTPException(status_code=400, detail=f"Image file not found: {source}")
pil_image = Image.open(path).convert("RGB")
actual_duration = time.perf_counter() - start_time
frame_count = 1
total_capture_time = actual_duration
else:
# Raw capture stream: use engine
try: try:
capture_template = template_store.get_template(raw_stream.capture_template_id) capture_template = template_store.get_template(raw_stream.capture_template_id)
except ValueError: except ValueError:
@@ -1611,14 +1704,12 @@ async def test_picture_stream(
display_index = raw_stream.display_index display_index = raw_stream.display_index
# Validate engine
if capture_template.engine_type not in EngineRegistry.get_available_engines(): if capture_template.engine_type not in EngineRegistry.get_available_engines():
raise HTTPException( raise HTTPException(
status_code=400, status_code=400,
detail=f"Engine '{capture_template.engine_type}' is not available on this system", detail=f"Engine '{capture_template.engine_type}' is not available on this system",
) )
# Check display lock
locked_device_id = processor_manager.get_display_lock_info(display_index) locked_device_id = processor_manager.get_display_lock_info(display_index)
if locked_device_id: if locked_device_id:
try: try:
@@ -1632,7 +1723,6 @@ async def test_picture_stream(
f"Please stop the device processing before testing.", f"Please stop the device processing before testing.",
) )
# Create engine and run test
engine = EngineRegistry.create_engine(capture_template.engine_type, capture_template.engine_config) engine = EngineRegistry.create_engine(capture_template.engine_type, capture_template.engine_config)
logger.info(f"Starting {test_request.capture_duration}s stream test for {stream_id}") logger.info(f"Starting {test_request.capture_duration}s stream test for {stream_id}")
@@ -1658,7 +1748,6 @@ async def test_picture_stream(
if last_frame is None: if last_frame is None:
raise RuntimeError("No frames captured during test") raise RuntimeError("No frames captured during test")
# Convert to PIL Image
if isinstance(last_frame.image, np.ndarray): if isinstance(last_frame.image, np.ndarray):
pil_image = Image.fromarray(last_frame.image) pil_image = Image.fromarray(last_frame.image)
else: else:

View File

@@ -405,12 +405,13 @@ class PictureStreamCreate(BaseModel):
"""Request to create a picture stream.""" """Request to create a picture stream."""
name: str = Field(description="Stream name", min_length=1, max_length=100) name: str = Field(description="Stream name", min_length=1, max_length=100)
stream_type: Literal["raw", "processed"] = Field(description="Stream type") stream_type: Literal["raw", "processed", "static_image"] = Field(description="Stream type")
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0) display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)") capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90) target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)") source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)") postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500) description: Optional[str] = Field(None, description="Stream description", max_length=500)
@@ -423,6 +424,7 @@ class PictureStreamUpdate(BaseModel):
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90) target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)") source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)") postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500) description: Optional[str] = Field(None, description="Stream description", max_length=500)
@@ -431,12 +433,13 @@ class PictureStreamResponse(BaseModel):
id: str = Field(description="Stream ID") id: str = Field(description="Stream ID")
name: str = Field(description="Stream name") name: str = Field(description="Stream name")
stream_type: str = Field(description="Stream type (raw or processed)") stream_type: str = Field(description="Stream type (raw, processed, or static_image)")
display_index: Optional[int] = Field(None, description="Display index") display_index: Optional[int] = Field(None, description="Display index")
capture_template_id: Optional[str] = Field(None, description="Capture template ID") capture_template_id: Optional[str] = Field(None, description="Capture template ID")
target_fps: Optional[int] = Field(None, description="Target FPS") target_fps: Optional[int] = Field(None, description="Target FPS")
source_stream_id: Optional[str] = Field(None, description="Source stream ID") source_stream_id: Optional[str] = Field(None, description="Source stream ID")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID") postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID")
image_source: Optional[str] = Field(None, description="Image URL or file path")
created_at: datetime = Field(description="Creation timestamp") created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp") updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Stream description") description: Optional[str] = Field(None, description="Stream description")
@@ -461,3 +464,19 @@ class PPTemplateTestRequest(BaseModel):
source_stream_id: str = Field(description="ID of the source picture stream to capture from") source_stream_id: str = Field(description="ID of the source picture stream to capture from")
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds") capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")
class ImageValidateRequest(BaseModel):
"""Request to validate an image source (URL or file path)."""
image_source: str = Field(description="Image URL or local file path")
class ImageValidateResponse(BaseModel):
"""Response from image validation."""
valid: bool = Field(description="Whether the image source is accessible and valid")
width: Optional[int] = Field(None, description="Image width in pixels")
height: Optional[int] = Field(None, description="Image height in pixels")
preview: Optional[str] = Field(None, description="Base64-encoded JPEG thumbnail")
error: Optional[str] = Field(None, description="Error message if invalid")

View File

@@ -13,7 +13,10 @@ from wled_controller.core.calibration import (
PixelMapper, PixelMapper,
create_default_calibration, create_default_calibration,
) )
import numpy as np
from wled_controller.core.capture_engines import CaptureEngine, EngineRegistry from wled_controller.core.capture_engines import CaptureEngine, EngineRegistry
from wled_controller.core.capture_engines.base import ScreenCapture
from wled_controller.core.filters import FilterInstance, FilterRegistry, ImagePool, PostprocessingFilter from wled_controller.core.filters import FilterInstance, FilterRegistry, ImagePool, PostprocessingFilter
from wled_controller.core.pixel_processor import smooth_colors from wled_controller.core.pixel_processor import smooth_colors
from wled_controller.core.screen_capture import extract_border_pixels from wled_controller.core.screen_capture import extract_border_pixels
@@ -108,6 +111,8 @@ class ProcessorState:
resolved_engine_type: Optional[str] = None resolved_engine_type: Optional[str] = None
resolved_engine_config: Optional[dict] = None resolved_engine_config: Optional[dict] = None
resolved_filters: Optional[List[FilterInstance]] = None resolved_filters: Optional[List[FilterInstance]] = None
# Static image: cached frame for static_image streams (no engine needed)
static_image: Optional[np.ndarray] = None
image_pool: Optional[ImagePool] = None image_pool: Optional[ImagePool] = None
filter_instances: Optional[List[PostprocessingFilter]] = None filter_instances: Optional[List[PostprocessingFilter]] = None
@@ -280,6 +285,15 @@ class ProcessorManager:
raw_stream = chain["raw_stream"] raw_stream = chain["raw_stream"]
pp_template_ids = chain["postprocessing_template_ids"] pp_template_ids = chain["postprocessing_template_ids"]
if raw_stream.stream_type == "static_image":
# Static image stream: load image once, no engine needed
state.resolved_display_index = -1
state.resolved_target_fps = 1
state.resolved_engine_type = None
state.resolved_engine_config = None
state.static_image = self._load_static_image(raw_stream.image_source)
else:
# Raw capture stream
state.resolved_display_index = raw_stream.display_index state.resolved_display_index = raw_stream.display_index
state.resolved_target_fps = raw_stream.target_fps state.resolved_target_fps = raw_stream.target_fps
@@ -337,6 +351,27 @@ class ProcessorManager:
state.resolved_engine_type = "mss" state.resolved_engine_type = "mss"
state.resolved_engine_config = {} state.resolved_engine_config = {}
@staticmethod
def _load_static_image(image_source: str) -> np.ndarray:
"""Load a static image from URL or file path, return as RGB numpy array."""
from io import BytesIO
from pathlib import Path
from PIL import Image
if image_source.startswith(("http://", "https://")):
response = httpx.get(image_source, timeout=15.0, follow_redirects=True)
response.raise_for_status()
pil_image = Image.open(BytesIO(response.content))
else:
path = Path(image_source)
if not path.exists():
raise FileNotFoundError(f"Image file not found: {image_source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
return np.array(pil_image)
async def start_processing(self, device_id: str): async def start_processing(self, device_id: str):
"""Start screen processing for a device. """Start screen processing for a device.
@@ -373,7 +408,10 @@ class ProcessorManager:
logger.error(f"Failed to connect to WLED device {device_id}: {e}") logger.error(f"Failed to connect to WLED device {device_id}: {e}")
raise RuntimeError(f"Failed to connect to WLED device: {e}") raise RuntimeError(f"Failed to connect to WLED device: {e}")
# Initialize capture engine from resolved settings # Initialize capture engine from resolved settings (skip for static_image)
if state.static_image is not None:
logger.info(f"Using static image for device {device_id} ({state.static_image.shape[1]}x{state.static_image.shape[0]})")
else:
try: try:
engine_type = state.resolved_engine_type or "mss" engine_type = state.resolved_engine_type or "mss"
engine_config = state.resolved_engine_config or {} engine_config = state.resolved_engine_config or {}
@@ -443,6 +481,9 @@ class ProcessorManager:
state.capture_engine.cleanup() state.capture_engine.cleanup()
state.capture_engine = None state.capture_engine = None
# Release cached static image
state.static_image = None
logger.info(f"Stopped processing for device {device_id}") logger.info(f"Stopped processing for device {device_id}")
async def _processing_loop(self, device_id: str): async def _processing_loop(self, device_id: str):
@@ -502,7 +543,13 @@ class ProcessorManager:
continue continue
try: try:
# Capture screen using engine # Get frame: static image or live capture
if state.static_image is not None:
h, w = state.static_image.shape[:2]
capture = ScreenCapture(
image=state.static_image.copy(), width=w, height=h, display_index=-1
)
else:
capture = await asyncio.to_thread( capture = await asyncio.to_thread(
state.capture_engine.capture_display, state.capture_engine.capture_display,
display_index display_index

View File

@@ -2953,13 +2953,17 @@ async function runTemplateTest() {
function buildTestStatsHtml(result) { function buildTestStatsHtml(result) {
const p = result.performance; const p = result.performance;
const res = `${result.full_capture.width}x${result.full_capture.height}`; const res = `${result.full_capture.width}x${result.full_capture.height}`;
return ` let html = `
<div class="stat-item"><span>${t('templates.test.results.duration')}:</span> <strong>${p.capture_duration_s.toFixed(2)}s</strong></div> <div class="stat-item"><span>${t('templates.test.results.duration')}:</span> <strong>${p.capture_duration_s.toFixed(2)}s</strong></div>
<div class="stat-item"><span>${t('templates.test.results.frame_count')}:</span> <strong>${p.frame_count}</strong></div> <div class="stat-item"><span>${t('templates.test.results.frame_count')}:</span> <strong>${p.frame_count}</strong></div>`;
if (p.frame_count > 1) {
html += `
<div class="stat-item"><span>${t('templates.test.results.actual_fps')}:</span> <strong>${p.actual_fps.toFixed(1)}</strong></div> <div class="stat-item"><span>${t('templates.test.results.actual_fps')}:</span> <strong>${p.actual_fps.toFixed(1)}</strong></div>
<div class="stat-item"><span>${t('templates.test.results.avg_capture_time')}:</span> <strong>${p.avg_capture_time_ms.toFixed(1)}ms</strong></div> <div class="stat-item"><span>${t('templates.test.results.avg_capture_time')}:</span> <strong>${p.avg_capture_time_ms.toFixed(1)}ms</strong></div>`;
<div class="stat-item"><span>Resolution:</span> <strong>${res}</strong></div> }
`; html += `
<div class="stat-item"><span>Resolution:</span> <strong>${res}</strong></div>`;
return html;
} }
// Display test results — opens lightbox with stats overlay // Display test results — opens lightbox with stats overlay
@@ -3046,20 +3050,27 @@ async function deleteTemplate(templateId) {
let _cachedStreams = []; let _cachedStreams = [];
let _cachedPPTemplates = []; let _cachedPPTemplates = [];
let _cachedCaptureTemplates = [];
let _availableFilters = []; // Loaded from GET /filters let _availableFilters = []; // Loaded from GET /filters
async function loadPictureStreams() { async function loadPictureStreams() {
try { try {
// Ensure PP templates are cached so processed stream cards can show filter info // Ensure PP templates and capture templates are cached for stream card display
if (_cachedPPTemplates.length === 0) { if (_cachedPPTemplates.length === 0 || _cachedCaptureTemplates.length === 0) {
try { try {
if (_availableFilters.length === 0) { if (_availableFilters.length === 0) {
const fr = await fetchWithAuth('/filters'); const fr = await fetchWithAuth('/filters');
if (fr.ok) { const fd = await fr.json(); _availableFilters = fd.filters || []; } if (fr.ok) { const fd = await fr.json(); _availableFilters = fd.filters || []; }
} }
if (_cachedPPTemplates.length === 0) {
const pr = await fetchWithAuth('/postprocessing-templates'); const pr = await fetchWithAuth('/postprocessing-templates');
if (pr.ok) { const pd = await pr.json(); _cachedPPTemplates = pd.templates || []; } if (pr.ok) { const pd = await pr.json(); _cachedPPTemplates = pd.templates || []; }
} catch (e) { console.warn('Could not pre-load PP templates for streams:', e); } }
if (_cachedCaptureTemplates.length === 0) {
const cr = await fetchWithAuth('/capture-templates');
if (cr.ok) { const cd = await cr.json(); _cachedCaptureTemplates = cd.templates || []; }
}
} catch (e) { console.warn('Could not pre-load templates for streams:', e); }
} }
const response = await fetchWithAuth('/picture-streams'); const response = await fetchWithAuth('/picture-streams');
if (!response.ok) { if (!response.ok) {
@@ -3094,6 +3105,19 @@ function renderPictureStreamsList(streams) {
</div> </div>
</div> </div>
</div> </div>
<div class="stream-group">
<div class="stream-group-header">
<span class="stream-group-icon">🖼️</span>
<span class="stream-group-title">${t('streams.group.static_image')}</span>
<span class="stream-group-count">0</span>
</div>
<div class="templates-grid">
<div class="template-card add-template-card" onclick="showAddStreamModal('static_image')">
<div class="add-template-icon">+</div>
<div class="add-template-label">${t('streams.add.static_image')}</div>
</div>
</div>
</div>
<div class="stream-group"> <div class="stream-group">
<div class="stream-group-header"> <div class="stream-group-header">
<span class="stream-group-icon">🎨</span> <span class="stream-group-icon">🎨</span>
@@ -3111,13 +3135,24 @@ function renderPictureStreamsList(streams) {
} }
const renderCard = (stream) => { const renderCard = (stream) => {
const typeIcon = stream.stream_type === 'raw' ? '🖥️' : '🎨'; const typeIcons = { raw: '🖥️', processed: '🎨', static_image: '🖼️' };
const typeBadge = stream.stream_type === 'raw' const typeIcon = typeIcons[stream.stream_type] || '📺';
? `<span class="badge badge-raw">${t('streams.type.raw')}</span>` const typeBadges = {
: `<span class="badge badge-processed">${t('streams.type.processed')}</span>`; raw: `<span class="badge badge-raw">${t('streams.type.raw')}</span>`,
processed: `<span class="badge badge-processed">${t('streams.type.processed')}</span>`,
static_image: `<span class="badge badge-processed">${t('streams.type.static_image')}</span>`,
};
const typeBadge = typeBadges[stream.stream_type] || '';
let detailsHtml = ''; let detailsHtml = '';
if (stream.stream_type === 'raw') { if (stream.stream_type === 'raw') {
let captureTemplateHtml = '';
if (stream.capture_template_id) {
const capTmpl = _cachedCaptureTemplates.find(t => t.id === stream.capture_template_id);
if (capTmpl) {
captureTemplateHtml = `<div class="template-config"><strong>${t('streams.capture_template')}</strong> ${escapeHtml(capTmpl.name)}</div>`;
}
}
detailsHtml = ` detailsHtml = `
<div class="template-config"> <div class="template-config">
<strong>${t('streams.display')}</strong> ${stream.display_index ?? 0} <strong>${t('streams.display')}</strong> ${stream.display_index ?? 0}
@@ -3125,8 +3160,9 @@ function renderPictureStreamsList(streams) {
<div class="template-config"> <div class="template-config">
<strong>${t('streams.target_fps')}</strong> ${stream.target_fps ?? 30} <strong>${t('streams.target_fps')}</strong> ${stream.target_fps ?? 30}
</div> </div>
${captureTemplateHtml}
`; `;
} else { } else if (stream.stream_type === 'processed') {
// Find source stream name and PP template name // Find source stream name and PP template name
const sourceStream = _cachedStreams.find(s => s.id === stream.source_stream_id); const sourceStream = _cachedStreams.find(s => s.id === stream.source_stream_id);
const sourceName = sourceStream ? escapeHtml(sourceStream.name) : (stream.source_stream_id || '-'); const sourceName = sourceStream ? escapeHtml(sourceStream.name) : (stream.source_stream_id || '-');
@@ -3144,6 +3180,15 @@ function renderPictureStreamsList(streams) {
</div> </div>
${ppTemplateHtml} ${ppTemplateHtml}
`; `;
} else if (stream.stream_type === 'static_image') {
const src = stream.image_source || '';
const truncated = src.length > 50 ? src.substring(0, 47) + '...' : src;
detailsHtml = `
<div class="template-config">
<strong>${t('streams.image_source')}</strong>
</div>
<div class="stream-card-image-source" title="${escapeHtml(src)}">${escapeHtml(truncated)}</div>
`;
} }
return ` return `
@@ -3171,6 +3216,7 @@ function renderPictureStreamsList(streams) {
const rawStreams = streams.filter(s => s.stream_type === 'raw'); const rawStreams = streams.filter(s => s.stream_type === 'raw');
const processedStreams = streams.filter(s => s.stream_type === 'processed'); const processedStreams = streams.filter(s => s.stream_type === 'processed');
const staticImageStreams = streams.filter(s => s.stream_type === 'static_image');
let html = ''; let html = '';
@@ -3190,6 +3236,22 @@ function renderPictureStreamsList(streams) {
</div> </div>
</div>`; </div>`;
// Static Image streams section
html += `<div class="stream-group">
<div class="stream-group-header">
<span class="stream-group-icon">🖼️</span>
<span class="stream-group-title">${t('streams.group.static_image')}</span>
<span class="stream-group-count">${staticImageStreams.length}</span>
</div>
<div class="templates-grid">
${staticImageStreams.map(renderCard).join('')}
<div class="template-card add-template-card" onclick="showAddStreamModal('static_image')">
<div class="add-template-icon">+</div>
<div class="add-template-label">${t('streams.add.static_image')}</div>
</div>
</div>
</div>`;
// Processed streams section // Processed streams section
html += `<div class="stream-group"> html += `<div class="stream-group">
<div class="stream-group-header"> <div class="stream-group-header">
@@ -3213,18 +3275,28 @@ function onStreamTypeChange() {
const streamType = document.getElementById('stream-type').value; const streamType = document.getElementById('stream-type').value;
document.getElementById('stream-raw-fields').style.display = streamType === 'raw' ? '' : 'none'; document.getElementById('stream-raw-fields').style.display = streamType === 'raw' ? '' : 'none';
document.getElementById('stream-processed-fields').style.display = streamType === 'processed' ? '' : 'none'; document.getElementById('stream-processed-fields').style.display = streamType === 'processed' ? '' : 'none';
document.getElementById('stream-static-image-fields').style.display = streamType === 'static_image' ? '' : 'none';
} }
async function showAddStreamModal(presetType) { async function showAddStreamModal(presetType) {
const streamType = presetType || 'raw'; const streamType = presetType || 'raw';
const titleKey = streamType === 'raw' ? 'streams.add.raw' : 'streams.add.processed'; const titleKeys = { raw: 'streams.add.raw', processed: 'streams.add.processed', static_image: 'streams.add.static_image' };
document.getElementById('stream-modal-title').textContent = t(titleKey); document.getElementById('stream-modal-title').textContent = t(titleKeys[streamType] || 'streams.add');
document.getElementById('stream-form').reset(); document.getElementById('stream-form').reset();
document.getElementById('stream-id').value = ''; document.getElementById('stream-id').value = '';
document.getElementById('stream-display-index').value = ''; document.getElementById('stream-display-index').value = '';
document.getElementById('stream-display-picker-label').textContent = t('displays.picker.select'); document.getElementById('stream-display-picker-label').textContent = t('displays.picker.select');
document.getElementById('stream-error').style.display = 'none'; document.getElementById('stream-error').style.display = 'none';
document.getElementById('stream-type').value = streamType; document.getElementById('stream-type').value = streamType;
// Clear static image preview and wire up auto-validation
_lastValidatedImageSource = '';
const imgSrcInput = document.getElementById('stream-image-source');
imgSrcInput.value = '';
document.getElementById('stream-image-preview-container').style.display = 'none';
document.getElementById('stream-image-validation-status').style.display = 'none';
imgSrcInput.onblur = () => validateStaticImage();
imgSrcInput.onkeydown = (e) => { if (e.key === 'Enter') { e.preventDefault(); validateStaticImage(); } };
imgSrcInput.onpaste = () => setTimeout(() => validateStaticImage(), 0);
onStreamTypeChange(); onStreamTypeChange();
// Populate dropdowns // Populate dropdowns
@@ -3242,8 +3314,8 @@ async function editStream(streamId) {
if (!response.ok) throw new Error(`Failed to load stream: ${response.status}`); if (!response.ok) throw new Error(`Failed to load stream: ${response.status}`);
const stream = await response.json(); const stream = await response.json();
const editTitleKey = stream.stream_type === 'raw' ? 'streams.edit.raw' : 'streams.edit.processed'; const editTitleKeys = { raw: 'streams.edit.raw', processed: 'streams.edit.processed', static_image: 'streams.edit.static_image' };
document.getElementById('stream-modal-title').textContent = t(editTitleKey); document.getElementById('stream-modal-title').textContent = t(editTitleKeys[stream.stream_type] || 'streams.edit');
document.getElementById('stream-id').value = streamId; document.getElementById('stream-id').value = streamId;
document.getElementById('stream-name').value = stream.name; document.getElementById('stream-name').value = stream.name;
document.getElementById('stream-description').value = stream.description || ''; document.getElementById('stream-description').value = stream.description || '';
@@ -3251,6 +3323,9 @@ async function editStream(streamId) {
// Set type (hidden input) // Set type (hidden input)
document.getElementById('stream-type').value = stream.stream_type; document.getElementById('stream-type').value = stream.stream_type;
// Clear static image preview
document.getElementById('stream-image-preview-container').style.display = 'none';
document.getElementById('stream-image-validation-status').style.display = 'none';
onStreamTypeChange(); onStreamTypeChange();
// Populate dropdowns before setting values // Populate dropdowns before setting values
@@ -3264,9 +3339,15 @@ async function editStream(streamId) {
const fps = stream.target_fps ?? 30; const fps = stream.target_fps ?? 30;
document.getElementById('stream-target-fps').value = fps; document.getElementById('stream-target-fps').value = fps;
document.getElementById('stream-target-fps-value').textContent = fps; document.getElementById('stream-target-fps-value').textContent = fps;
} else { } else if (stream.stream_type === 'processed') {
document.getElementById('stream-source').value = stream.source_stream_id || ''; document.getElementById('stream-source').value = stream.source_stream_id || '';
document.getElementById('stream-pp-template').value = stream.postprocessing_template_id || ''; document.getElementById('stream-pp-template').value = stream.postprocessing_template_id || '';
} else if (stream.stream_type === 'static_image') {
document.getElementById('stream-image-source').value = stream.image_source || '';
// Auto-validate to show preview
if (stream.image_source) {
validateStaticImage();
}
} }
const modal = document.getElementById('stream-modal'); const modal = document.getElementById('stream-modal');
@@ -3324,7 +3405,8 @@ async function populateStreamModalDropdowns() {
if (s.id === editingId) return; if (s.id === editingId) return;
const opt = document.createElement('option'); const opt = document.createElement('option');
opt.value = s.id; opt.value = s.id;
const typeLabel = s.stream_type === 'raw' ? '🖥️' : '🎨'; const typeLabels = { raw: '🖥️', processed: '🎨', static_image: '🖼️' };
const typeLabel = typeLabels[s.stream_type] || '📺';
opt.textContent = `${typeLabel} ${s.name}`; opt.textContent = `${typeLabel} ${s.name}`;
sourceSelect.appendChild(opt); sourceSelect.appendChild(opt);
}); });
@@ -3367,9 +3449,16 @@ async function saveStream() {
payload.display_index = parseInt(document.getElementById('stream-display-index').value) || 0; payload.display_index = parseInt(document.getElementById('stream-display-index').value) || 0;
payload.capture_template_id = document.getElementById('stream-capture-template').value; payload.capture_template_id = document.getElementById('stream-capture-template').value;
payload.target_fps = parseInt(document.getElementById('stream-target-fps').value) || 30; payload.target_fps = parseInt(document.getElementById('stream-target-fps').value) || 30;
} else { } else if (streamType === 'processed') {
payload.source_stream_id = document.getElementById('stream-source').value; payload.source_stream_id = document.getElementById('stream-source').value;
payload.postprocessing_template_id = document.getElementById('stream-pp-template').value; payload.postprocessing_template_id = document.getElementById('stream-pp-template').value;
} else if (streamType === 'static_image') {
const imageSource = document.getElementById('stream-image-source').value.trim();
if (!imageSource) {
showToast(t('streams.error.required'), 'error');
return;
}
payload.image_source = imageSource;
} }
try { try {
@@ -3429,6 +3518,56 @@ function closeStreamModal() {
unlockBody(); unlockBody();
} }
let _lastValidatedImageSource = '';
async function validateStaticImage() {
const source = document.getElementById('stream-image-source').value.trim();
const previewContainer = document.getElementById('stream-image-preview-container');
const previewImg = document.getElementById('stream-image-preview');
const infoEl = document.getElementById('stream-image-info');
const statusEl = document.getElementById('stream-image-validation-status');
if (!source) {
_lastValidatedImageSource = '';
previewContainer.style.display = 'none';
statusEl.style.display = 'none';
return;
}
if (source === _lastValidatedImageSource) return;
// Show loading state
statusEl.textContent = t('streams.validate_image.validating');
statusEl.className = 'validation-status loading';
statusEl.style.display = 'block';
previewContainer.style.display = 'none';
try {
const response = await fetchWithAuth('/picture-streams/validate-image', {
method: 'POST',
body: JSON.stringify({ image_source: source }),
});
const data = await response.json();
_lastValidatedImageSource = source;
if (data.valid) {
previewImg.src = data.preview;
infoEl.textContent = `${data.width} × ${data.height} px`;
previewContainer.style.display = '';
statusEl.textContent = t('streams.validate_image.valid');
statusEl.className = 'validation-status success';
} else {
previewContainer.style.display = 'none';
statusEl.textContent = `${t('streams.validate_image.invalid')}: ${data.error}`;
statusEl.className = 'validation-status error';
}
} catch (err) {
previewContainer.style.display = 'none';
statusEl.textContent = `${t('streams.validate_image.invalid')}: ${err.message}`;
statusEl.className = 'validation-status error';
}
}
// ===== Picture Stream Test ===== // ===== Picture Stream Test =====
let _currentTestStreamId = null; let _currentTestStreamId = null;

View File

@@ -569,6 +569,20 @@
</div> </div>
</div> </div>
<!-- Static image fields -->
<div id="stream-static-image-fields" style="display: none;">
<div class="form-group">
<label for="stream-image-source" data-i18n="streams.image_source">Image Source:</label>
<input type="text" id="stream-image-source" data-i18n-placeholder="streams.image_source.placeholder" placeholder="https://example.com/image.jpg or C:\path\to\image.png">
<small class="form-hint" data-i18n="streams.image_source.hint">Enter a URL (http/https) or local file path to an image</small>
</div>
<div id="stream-image-preview-container" class="image-preview-container" style="display: none;">
<img id="stream-image-preview" class="stream-image-preview" src="" alt="Preview">
<div id="stream-image-info" class="stream-image-info"></div>
</div>
<div id="stream-image-validation-status" class="validation-status" style="display: none;"></div>
</div>
<div class="form-group"> <div class="form-group">
<label for="stream-description" data-i18n="streams.description_label">Description (optional):</label> <label for="stream-description" data-i18n="streams.description_label">Description (optional):</label>
<input type="text" id="stream-description" data-i18n-placeholder="streams.description_placeholder" placeholder="Describe this stream..."> <input type="text" id="stream-description" data-i18n-placeholder="streams.description_placeholder" placeholder="Describe this stream...">

View File

@@ -280,5 +280,15 @@
"device.stream_settings.interpolation_hint": "How to calculate LED color from sampled pixels", "device.stream_settings.interpolation_hint": "How to calculate LED color from sampled pixels",
"device.stream_settings.smoothing": "Smoothing:", "device.stream_settings.smoothing": "Smoothing:",
"device.stream_settings.smoothing_hint": "Temporal blending between frames (0=none, 1=full). Reduces flicker.", "device.stream_settings.smoothing_hint": "Temporal blending between frames (0=none, 1=full). Reduces flicker.",
"device.tip.stream_selector": "Configure picture stream and LED projection settings for this device" "device.tip.stream_selector": "Configure picture stream and LED projection settings for this device",
"streams.group.static_image": "Static Image Streams",
"streams.add.static_image": "Add Static Image",
"streams.edit.static_image": "Edit Static Image",
"streams.type.static_image": "Static Image",
"streams.image_source": "Image Source:",
"streams.image_source.placeholder": "https://example.com/image.jpg or C:\\path\\to\\image.png",
"streams.image_source.hint": "Enter a URL (http/https) or local file path to an image",
"streams.validate_image.validating": "Validating...",
"streams.validate_image.valid": "Image accessible",
"streams.validate_image.invalid": "Image not accessible"
} }

View File

@@ -280,5 +280,15 @@
"device.stream_settings.interpolation_hint": "Как вычислять цвет LED из выбранных пикселей", "device.stream_settings.interpolation_hint": "Как вычислять цвет LED из выбранных пикселей",
"device.stream_settings.smoothing": "Сглаживание:", "device.stream_settings.smoothing": "Сглаживание:",
"device.stream_settings.smoothing_hint": "Временное смешивание между кадрами (0=нет, 1=полное). Уменьшает мерцание.", "device.stream_settings.smoothing_hint": "Временное смешивание между кадрами (0=нет, 1=полное). Уменьшает мерцание.",
"device.tip.stream_selector": "Настройки видеопотока и проекции LED для этого устройства" "device.tip.stream_selector": "Настройки видеопотока и проекции LED для этого устройства",
"streams.group.static_image": "Статические изображения",
"streams.add.static_image": "Добавить статическое изображение",
"streams.edit.static_image": "Редактировать статическое изображение",
"streams.type.static_image": "Статическое изображение",
"streams.image_source": "Источник изображения:",
"streams.image_source.placeholder": "https://example.com/image.jpg или C:\\path\\to\\image.png",
"streams.image_source.hint": "Введите URL (http/https) или локальный путь к изображению",
"streams.validate_image.validating": "Проверка...",
"streams.validate_image.valid": "Изображение доступно",
"streams.validate_image.invalid": "Изображение недоступно"
} }

View File

@@ -2360,3 +2360,45 @@ input:-webkit-autofill:focus {
} }
} }
/* Static image stream styles */
.image-preview-container {
text-align: center;
margin: 12px 0;
padding: 12px;
background: var(--bg-secondary);
border-radius: 8px;
border: 1px solid var(--border-color);
}
.stream-image-preview {
max-width: 100%;
max-height: 200px;
border-radius: 4px;
border: 1px solid var(--border-color);
}
.stream-image-info {
font-size: 0.75rem;
color: var(--text-muted);
margin-top: 6px;
}
.validation-status {
font-size: 0.8rem;
margin-top: 4px;
padding: 4px 8px;
border-radius: 4px;
}
.validation-status.success {
color: #4caf50;
}
.validation-status.error {
color: #f44336;
}
.validation-status.loading {
color: var(--text-muted);
}
.stream-card-image-source {
font-size: 0.7rem;
color: var(--text-muted);
word-break: break-all;
margin-top: 4px;
}

View File

@@ -12,11 +12,12 @@ class PictureStream:
A picture stream is either: A picture stream is either:
- "raw": captures from a display using a capture engine template at a target FPS - "raw": captures from a display using a capture engine template at a target FPS
- "processed": applies postprocessing to another picture stream - "processed": applies postprocessing to another picture stream
- "static_image": returns a static frame from a URL or local file path
""" """
id: str id: str
name: str name: str
stream_type: str # "raw" or "processed" stream_type: str # "raw", "processed", or "static_image"
created_at: datetime created_at: datetime
updated_at: datetime updated_at: datetime
@@ -29,6 +30,9 @@ class PictureStream:
source_stream_id: Optional[str] = None source_stream_id: Optional[str] = None
postprocessing_template_id: Optional[str] = None postprocessing_template_id: Optional[str] = None
# Static image fields (used when stream_type == "static_image")
image_source: Optional[str] = None
description: Optional[str] = None description: Optional[str] = None
def to_dict(self) -> dict: def to_dict(self) -> dict:
@@ -42,6 +46,7 @@ class PictureStream:
"target_fps": self.target_fps, "target_fps": self.target_fps,
"source_stream_id": self.source_stream_id, "source_stream_id": self.source_stream_id,
"postprocessing_template_id": self.postprocessing_template_id, "postprocessing_template_id": self.postprocessing_template_id,
"image_source": self.image_source,
"created_at": self.created_at.isoformat(), "created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(), "updated_at": self.updated_at.isoformat(),
"description": self.description, "description": self.description,
@@ -59,6 +64,7 @@ class PictureStream:
target_fps=data.get("target_fps"), target_fps=data.get("target_fps"),
source_stream_id=data.get("source_stream_id"), source_stream_id=data.get("source_stream_id"),
postprocessing_template_id=data.get("postprocessing_template_id"), postprocessing_template_id=data.get("postprocessing_template_id"),
image_source=data.get("image_source"),
created_at=datetime.fromisoformat(data["created_at"]) created_at=datetime.fromisoformat(data["created_at"])
if isinstance(data.get("created_at"), str) if isinstance(data.get("created_at"), str)
else data.get("created_at", datetime.utcnow()), else data.get("created_at", datetime.utcnow()),

View File

@@ -105,7 +105,7 @@ class PictureStreamStore:
current_stream = self._streams.get(current_id) current_stream = self._streams.get(current_id)
if not current_stream: if not current_stream:
break break
if current_stream.stream_type == "raw": if current_stream.stream_type != "processed":
break break
current_id = current_stream.source_stream_id current_id = current_stream.source_stream_id
@@ -134,24 +134,26 @@ class PictureStreamStore:
target_fps: Optional[int] = None, target_fps: Optional[int] = None,
source_stream_id: Optional[str] = None, source_stream_id: Optional[str] = None,
postprocessing_template_id: Optional[str] = None, postprocessing_template_id: Optional[str] = None,
image_source: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
) -> PictureStream: ) -> PictureStream:
"""Create a new picture stream. """Create a new picture stream.
Args: Args:
name: Stream name name: Stream name
stream_type: "raw" or "processed" stream_type: "raw", "processed", or "static_image"
display_index: Display index (raw streams) display_index: Display index (raw streams)
capture_template_id: Capture template ID (raw streams) capture_template_id: Capture template ID (raw streams)
target_fps: Target FPS (raw streams) target_fps: Target FPS (raw streams)
source_stream_id: Source stream ID (processed streams) source_stream_id: Source stream ID (processed streams)
postprocessing_template_id: Postprocessing template ID (processed streams) postprocessing_template_id: Postprocessing template ID (processed streams)
image_source: URL or file path (static_image streams)
description: Optional description description: Optional description
Raises: Raises:
ValueError: If validation fails or cycle detected ValueError: If validation fails or cycle detected
""" """
if stream_type not in ("raw", "processed"): if stream_type not in ("raw", "processed", "static_image"):
raise ValueError(f"Invalid stream type: {stream_type}") raise ValueError(f"Invalid stream type: {stream_type}")
if stream_type == "raw": if stream_type == "raw":
@@ -172,6 +174,9 @@ class PictureStreamStore:
# Check for cycles # Check for cycles
if self._detect_cycle(source_stream_id): if self._detect_cycle(source_stream_id):
raise ValueError("Cycle detected in stream chain") raise ValueError("Cycle detected in stream chain")
elif stream_type == "static_image":
if not image_source:
raise ValueError("Static image streams require image_source")
# Check for duplicate name # Check for duplicate name
for stream in self._streams.values(): for stream in self._streams.values():
@@ -190,6 +195,7 @@ class PictureStreamStore:
target_fps=target_fps, target_fps=target_fps,
source_stream_id=source_stream_id, source_stream_id=source_stream_id,
postprocessing_template_id=postprocessing_template_id, postprocessing_template_id=postprocessing_template_id,
image_source=image_source,
created_at=now, created_at=now,
updated_at=now, updated_at=now,
description=description, description=description,
@@ -210,6 +216,7 @@ class PictureStreamStore:
target_fps: Optional[int] = None, target_fps: Optional[int] = None,
source_stream_id: Optional[str] = None, source_stream_id: Optional[str] = None,
postprocessing_template_id: Optional[str] = None, postprocessing_template_id: Optional[str] = None,
image_source: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
) -> PictureStream: ) -> PictureStream:
"""Update an existing picture stream. """Update an existing picture stream.
@@ -241,6 +248,8 @@ class PictureStreamStore:
stream.source_stream_id = source_stream_id stream.source_stream_id = source_stream_id
if postprocessing_template_id is not None: if postprocessing_template_id is not None:
stream.postprocessing_template_id = postprocessing_template_id stream.postprocessing_template_id = postprocessing_template_id
if image_source is not None:
stream.image_source = image_source
if description is not None: if description is not None:
stream.description = description stream.description = description
@@ -289,9 +298,9 @@ class PictureStreamStore:
return False return False
def resolve_stream_chain(self, stream_id: str) -> dict: def resolve_stream_chain(self, stream_id: str) -> dict:
"""Resolve a stream chain to get the final raw stream and collected postprocessing templates. """Resolve a stream chain to get the terminal stream and collected postprocessing templates.
Walks the chain from the given stream to the root raw stream, Walks the chain from the given stream to a terminal stream (raw or static_image),
collecting postprocessing template IDs along the way. collecting postprocessing template IDs along the way.
Args: Args:
@@ -299,7 +308,7 @@ class PictureStreamStore:
Returns: Returns:
Dict with: Dict with:
- raw_stream: The root raw PictureStream - raw_stream: The terminal PictureStream (raw or static_image)
- postprocessing_template_ids: List of PP template IDs (in chain order) - postprocessing_template_ids: List of PP template IDs (in chain order)
Raises: Raises:
@@ -316,7 +325,7 @@ class PictureStreamStore:
stream = self.get_stream(current_id) stream = self.get_stream(current_id)
if stream.stream_type == "raw": if stream.stream_type != "processed":
return { return {
"raw_stream": stream, "raw_stream": stream,
"postprocessing_template_ids": postprocessing_template_ids, "postprocessing_template_ids": postprocessing_template_ids,