Refactor capture engine architecture, rename PictureStream to PictureSource, and split API modules

- Separate CaptureEngine into stateless factory + stateful CaptureStream session
- Add LiveStream/LiveStreamManager for shared capture with reference counting
- Rename PictureStream to PictureSource across storage, API, and UI
- Remove legacy migration logic and unused compatibility code
- Split monolithic routes.py (1935 lines) into 5 focused route modules
- Split schemas.py (480 lines) into 7 schema modules with re-exports
- Extract dependency injection into dedicated dependencies.py

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-12 14:27:00 +03:00
parent b8389f080a
commit c3828e10fa
42 changed files with 4047 additions and 3797 deletions

View File

@@ -1,5 +1,18 @@
"""API routes and schemas.""" """API routes and schemas."""
from .routes import router from fastapi import APIRouter
from .routes.system import router as system_router
from .routes.devices import router as devices_router
from .routes.templates import router as templates_router
from .routes.postprocessing import router as postprocessing_router
from .routes.picture_sources import router as picture_sources_router
router = APIRouter()
router.include_router(system_router)
router.include_router(devices_router)
router.include_router(templates_router)
router.include_router(postprocessing_router)
router.include_router(picture_sources_router)
__all__ = ["router"] __all__ = ["router"]

View File

@@ -0,0 +1,65 @@
"""Dependency injection for API routes."""
from wled_controller.core.processor_manager import ProcessorManager
from wled_controller.storage import DeviceStore
from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore
from wled_controller.storage.picture_source_store import PictureSourceStore
# Global instances (initialized in main.py)
_device_store: DeviceStore | None = None
_template_store: TemplateStore | None = None
_pp_template_store: PostprocessingTemplateStore | None = None
_picture_source_store: PictureSourceStore | None = None
_processor_manager: ProcessorManager | None = None
def get_device_store() -> DeviceStore:
"""Get device store dependency."""
if _device_store is None:
raise RuntimeError("Device store not initialized")
return _device_store
def get_template_store() -> TemplateStore:
"""Get template store dependency."""
if _template_store is None:
raise RuntimeError("Template store not initialized")
return _template_store
def get_pp_template_store() -> PostprocessingTemplateStore:
"""Get postprocessing template store dependency."""
if _pp_template_store is None:
raise RuntimeError("Postprocessing template store not initialized")
return _pp_template_store
def get_picture_source_store() -> PictureSourceStore:
"""Get picture source store dependency."""
if _picture_source_store is None:
raise RuntimeError("Picture source store not initialized")
return _picture_source_store
def get_processor_manager() -> ProcessorManager:
"""Get processor manager dependency."""
if _processor_manager is None:
raise RuntimeError("Processor manager not initialized")
return _processor_manager
def init_dependencies(
device_store: DeviceStore,
template_store: TemplateStore,
processor_manager: ProcessorManager,
pp_template_store: PostprocessingTemplateStore | None = None,
picture_source_store: PictureSourceStore | None = None,
):
"""Initialize global dependencies."""
global _device_store, _template_store, _processor_manager, _pp_template_store, _picture_source_store
_device_store = device_store
_template_store = template_store
_processor_manager = processor_manager
_pp_template_store = pp_template_store
_picture_source_store = picture_source_store

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
"""API route modules."""

View File

@@ -0,0 +1,663 @@
"""Device routes: CRUD, processing control, settings, brightness, calibration, metrics."""
from datetime import datetime
import httpx
from fastapi import APIRouter, HTTPException, Depends
from wled_controller.api.auth import AuthRequired
from wled_controller.api.dependencies import (
get_device_store,
get_processor_manager,
)
from wled_controller.api.schemas.devices import (
Calibration as CalibrationSchema,
CalibrationTestModeRequest,
CalibrationTestModeResponse,
DeviceCreate,
DeviceListResponse,
DeviceResponse,
DeviceUpdate,
MetricsResponse,
ProcessingSettings as ProcessingSettingsSchema,
ProcessingState,
)
from wled_controller.core.calibration import (
calibration_from_dict,
calibration_to_dict,
)
from wled_controller.core.processor_manager import ProcessorManager, ProcessingSettings
from wled_controller.storage import DeviceStore
from wled_controller.utils import get_logger
logger = get_logger(__name__)
router = APIRouter()
# ===== DEVICE MANAGEMENT ENDPOINTS =====
@router.post("/api/v1/devices", response_model=DeviceResponse, tags=["Devices"], status_code=201)
async def create_device(
device_data: DeviceCreate,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Create and attach a new WLED device."""
try:
logger.info(f"Creating device: {device_data.name}")
# Validate WLED device is reachable before adding
device_url = device_data.url.rstrip("/")
try:
async with httpx.AsyncClient(timeout=5) as client:
response = await client.get(f"{device_url}/json/info")
response.raise_for_status()
wled_info = response.json()
wled_led_count = wled_info.get("leds", {}).get("count")
if not wled_led_count or wled_led_count < 1:
raise HTTPException(
status_code=422,
detail=f"WLED device at {device_url} reported invalid LED count: {wled_led_count}"
)
logger.info(
f"WLED device reachable: {wled_info.get('name', 'Unknown')} "
f"v{wled_info.get('ver', '?')} ({wled_led_count} LEDs)"
)
except httpx.ConnectError:
raise HTTPException(
status_code=422,
detail=f"Cannot reach WLED device at {device_url}. Check the URL and ensure the device is powered on."
)
except httpx.TimeoutException:
raise HTTPException(
status_code=422,
detail=f"Connection to {device_url} timed out. Check network connectivity."
)
except Exception as e:
raise HTTPException(
status_code=422,
detail=f"Failed to connect to WLED device at {device_url}: {e}"
)
# Create device in storage (LED count auto-detected from WLED)
device = store.create_device(
name=device_data.name,
url=device_data.url,
led_count=wled_led_count,
)
# Add to processor manager
manager.add_device(
device_id=device.id,
device_url=device.url,
led_count=device.led_count,
settings=device.settings,
calibration=device.calibration,
)
return DeviceResponse(
id=device.id,
name=device.name,
url=device.url,
led_count=device.led_count,
enabled=device.enabled,
status="disconnected",
settings=ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
),
calibration=CalibrationSchema(**calibration_to_dict(device.calibration)),
picture_source_id=device.picture_source_id,
created_at=device.created_at,
updated_at=device.updated_at,
)
except Exception as e:
logger.error(f"Failed to create device: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/devices", response_model=DeviceListResponse, tags=["Devices"])
async def list_devices(
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
):
"""List all attached WLED devices."""
try:
devices = store.get_all_devices()
device_responses = [
DeviceResponse(
id=device.id,
name=device.name,
url=device.url,
led_count=device.led_count,
enabled=device.enabled,
status="disconnected",
settings=ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
),
calibration=CalibrationSchema(**calibration_to_dict(device.calibration)),
picture_source_id=device.picture_source_id,
created_at=device.created_at,
updated_at=device.updated_at,
)
for device in devices
]
return DeviceListResponse(devices=device_responses, count=len(device_responses))
except Exception as e:
logger.error(f"Failed to list devices: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/devices/{device_id}", response_model=DeviceResponse, tags=["Devices"])
async def get_device(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Get device details by ID."""
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
# Determine status
status = "connected" if manager.is_processing(device_id) else "disconnected"
return DeviceResponse(
id=device.id,
name=device.name,
url=device.url,
led_count=device.led_count,
enabled=device.enabled,
status=status,
settings=ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
),
calibration=CalibrationSchema(**calibration_to_dict(device.calibration)),
picture_source_id=device.picture_source_id,
created_at=device.created_at,
updated_at=device.updated_at,
)
@router.put("/api/v1/devices/{device_id}", response_model=DeviceResponse, tags=["Devices"])
async def update_device(
device_id: str,
update_data: DeviceUpdate,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Update device information."""
try:
# Check if stream changed and device is processing (for hot-swap)
old_device = store.get_device(device_id)
stream_changed = (
update_data.picture_source_id is not None
and update_data.picture_source_id != old_device.picture_source_id
)
was_processing = manager.is_processing(device_id)
# Update device
device = store.update_device(
device_id=device_id,
name=update_data.name,
url=update_data.url,
enabled=update_data.enabled,
picture_source_id=update_data.picture_source_id,
)
# Sync processor state when stream changed
if stream_changed:
if was_processing:
# Hot-swap: restart with new settings
logger.info(f"Hot-swapping stream for device {device_id}")
try:
await manager.stop_processing(device_id)
manager.remove_device(device_id)
manager.add_device(
device_id=device.id,
device_url=device.url,
led_count=device.led_count,
settings=device.settings,
calibration=device.calibration,
picture_source_id=device.picture_source_id,
)
await manager.start_processing(device_id)
logger.info(f"Successfully hot-swapped stream for device {device_id}")
except Exception as e:
logger.error(f"Error during stream hot-swap: {e}")
else:
# Not processing -- update processor state so next start uses new values
manager.remove_device(device_id)
manager.add_device(
device_id=device.id,
device_url=device.url,
led_count=device.led_count,
settings=device.settings,
calibration=device.calibration,
picture_source_id=device.picture_source_id,
)
return DeviceResponse(
id=device.id,
name=device.name,
url=device.url,
led_count=device.led_count,
enabled=device.enabled,
status="disconnected",
settings=ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
),
calibration=CalibrationSchema(**calibration_to_dict(device.calibration)),
picture_source_id=device.picture_source_id,
created_at=device.created_at,
updated_at=device.updated_at,
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to update device: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/api/v1/devices/{device_id}", status_code=204, tags=["Devices"])
async def delete_device(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Delete/detach a device."""
try:
# Stop processing if running
if manager.is_processing(device_id):
await manager.stop_processing(device_id)
# Remove from manager
manager.remove_device(device_id)
# Delete from storage
store.delete_device(device_id)
logger.info(f"Deleted device {device_id}")
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to delete device: {e}")
raise HTTPException(status_code=500, detail=str(e))
# ===== PROCESSING CONTROL ENDPOINTS =====
@router.post("/api/v1/devices/{device_id}/start", tags=["Processing"])
async def start_processing(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Start screen processing for a device."""
try:
# Verify device exists
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
await manager.start_processing(device_id)
logger.info(f"Started processing for device {device_id}")
return {"status": "started", "device_id": device_id}
except RuntimeError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to start processing: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/devices/{device_id}/stop", tags=["Processing"])
async def stop_processing(
device_id: str,
_auth: AuthRequired,
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Stop screen processing for a device."""
try:
await manager.stop_processing(device_id)
logger.info(f"Stopped processing for device {device_id}")
return {"status": "stopped", "device_id": device_id}
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to stop processing: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/devices/{device_id}/state", response_model=ProcessingState, tags=["Processing"])
async def get_processing_state(
device_id: str,
_auth: AuthRequired,
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Get current processing state for a device."""
try:
state = manager.get_state(device_id)
return ProcessingState(**state)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to get state: {e}")
raise HTTPException(status_code=500, detail=str(e))
# ===== SETTINGS ENDPOINTS =====
@router.get("/api/v1/devices/{device_id}/settings", response_model=ProcessingSettingsSchema, tags=["Settings"])
async def get_settings(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
):
"""Get processing settings for a device."""
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
return ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
)
@router.put("/api/v1/devices/{device_id}/settings", response_model=ProcessingSettingsSchema, tags=["Settings"])
async def update_settings(
device_id: str,
settings: ProcessingSettingsSchema,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Update processing settings for a device.
Merges with existing settings so callers can send partial updates.
Only fields explicitly included in the request body are applied.
"""
try:
# Get existing device to merge settings
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
existing = device.settings
sent = settings.model_fields_set # fields the client actually sent
# Merge: only override fields the client explicitly provided
new_settings = ProcessingSettings(
display_index=settings.display_index if 'display_index' in sent else existing.display_index,
fps=settings.fps if 'fps' in sent else existing.fps,
border_width=settings.border_width if 'border_width' in sent else existing.border_width,
interpolation_mode=settings.interpolation_mode if 'interpolation_mode' in sent else existing.interpolation_mode,
brightness=settings.brightness if 'brightness' in sent else existing.brightness,
gamma=existing.gamma,
saturation=existing.saturation,
smoothing=settings.smoothing if 'smoothing' in sent else existing.smoothing,
state_check_interval=settings.state_check_interval if 'state_check_interval' in sent else existing.state_check_interval,
)
# Apply color_correction fields if explicitly sent
if 'color_correction' in sent and settings.color_correction:
cc_sent = settings.color_correction.model_fields_set
if 'brightness' in cc_sent:
new_settings.brightness = settings.color_correction.brightness
if 'gamma' in cc_sent:
new_settings.gamma = settings.color_correction.gamma
if 'saturation' in cc_sent:
new_settings.saturation = settings.color_correction.saturation
# Update in storage
device = store.update_device(device_id, settings=new_settings)
# Update in manager if device exists
try:
manager.update_settings(device_id, new_settings)
except ValueError:
# Device not in manager yet, that's ok
pass
return ProcessingSettingsSchema(
display_index=device.settings.display_index,
fps=device.settings.fps,
border_width=device.settings.border_width,
interpolation_mode=device.settings.interpolation_mode,
brightness=device.settings.brightness,
smoothing=device.settings.smoothing,
state_check_interval=device.settings.state_check_interval,
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to update settings: {e}")
raise HTTPException(status_code=500, detail=str(e))
# ===== WLED BRIGHTNESS ENDPOINT =====
@router.get("/api/v1/devices/{device_id}/brightness", tags=["Settings"])
async def get_device_brightness(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
):
"""Get current brightness from the WLED device."""
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
try:
async with httpx.AsyncClient(timeout=5.0) as http_client:
resp = await http_client.get(f"{device.url}/json/state")
resp.raise_for_status()
state = resp.json()
bri = state.get("bri", 255)
return {"brightness": bri}
except Exception as e:
logger.error(f"Failed to get WLED brightness for {device_id}: {e}")
raise HTTPException(status_code=502, detail=f"Failed to reach WLED device: {e}")
@router.put("/api/v1/devices/{device_id}/brightness", tags=["Settings"])
async def set_device_brightness(
device_id: str,
body: dict,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
):
"""Set brightness on the WLED device directly."""
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
bri = body.get("brightness")
if bri is None or not isinstance(bri, int) or not 0 <= bri <= 255:
raise HTTPException(status_code=400, detail="brightness must be an integer 0-255")
try:
async with httpx.AsyncClient(timeout=5.0) as http_client:
resp = await http_client.post(
f"{device.url}/json/state",
json={"bri": bri},
)
resp.raise_for_status()
return {"brightness": bri}
except Exception as e:
logger.error(f"Failed to set WLED brightness for {device_id}: {e}")
raise HTTPException(status_code=502, detail=f"Failed to reach WLED device: {e}")
# ===== CALIBRATION ENDPOINTS =====
@router.get("/api/v1/devices/{device_id}/calibration", response_model=CalibrationSchema, tags=["Calibration"])
async def get_calibration(
device_id: str,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
):
"""Get calibration configuration for a device."""
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
return CalibrationSchema(**calibration_to_dict(device.calibration))
@router.put("/api/v1/devices/{device_id}/calibration", response_model=CalibrationSchema, tags=["Calibration"])
async def update_calibration(
device_id: str,
calibration_data: CalibrationSchema,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Update calibration configuration for a device."""
try:
# Convert schema to CalibrationConfig
calibration_dict = calibration_data.model_dump()
calibration = calibration_from_dict(calibration_dict)
# Update in storage
device = store.update_device(device_id, calibration=calibration)
# Update in manager if device exists
try:
manager.update_calibration(device_id, calibration)
except ValueError:
# Device not in manager yet, that's ok
pass
return CalibrationSchema(**calibration_to_dict(device.calibration))
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to update calibration: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.put(
"/api/v1/devices/{device_id}/calibration/test",
response_model=CalibrationTestModeResponse,
tags=["Calibration"],
)
async def set_calibration_test_mode(
device_id: str,
body: CalibrationTestModeRequest,
_auth: AuthRequired,
store: DeviceStore = Depends(get_device_store),
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Toggle calibration test mode for specific edges.
Send edges with colors to light them up, or empty edges dict to exit test mode.
While test mode is active, screen capture processing is paused.
"""
try:
device = store.get_device(device_id)
if not device:
raise HTTPException(status_code=404, detail=f"Device {device_id} not found")
# Validate edge names and colors
valid_edges = {"top", "right", "bottom", "left"}
for edge_name, color in body.edges.items():
if edge_name not in valid_edges:
raise HTTPException(
status_code=400,
detail=f"Invalid edge '{edge_name}'. Must be one of: {', '.join(valid_edges)}"
)
if len(color) != 3 or not all(0 <= c <= 255 for c in color):
raise HTTPException(
status_code=400,
detail=f"Invalid color for edge '{edge_name}'. Must be [R, G, B] with values 0-255."
)
await manager.set_test_mode(device_id, body.edges)
active_edges = list(body.edges.keys())
logger.info(
f"Test mode {'activated' if active_edges else 'deactivated'} "
f"for device {device_id}: {active_edges}"
)
return CalibrationTestModeResponse(
test_mode=len(active_edges) > 0,
active_edges=active_edges,
device_id=device_id,
)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to set test mode: {e}")
raise HTTPException(status_code=500, detail=str(e))
# ===== METRICS ENDPOINTS =====
@router.get("/api/v1/devices/{device_id}/metrics", response_model=MetricsResponse, tags=["Metrics"])
async def get_metrics(
device_id: str,
_auth: AuthRequired,
manager: ProcessorManager = Depends(get_processor_manager),
):
"""Get processing metrics for a device."""
try:
metrics = manager.get_metrics(device_id)
return MetricsResponse(**metrics)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Failed to get metrics: {e}")
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,475 @@
"""Picture source routes."""
import base64
import io
import time
import httpx
import numpy as np
from PIL import Image
from fastapi import APIRouter, HTTPException, Depends, Query
from fastapi.responses import Response
from wled_controller.api.auth import AuthRequired
from wled_controller.api.dependencies import (
get_device_store,
get_picture_source_store,
get_pp_template_store,
get_processor_manager,
get_template_store,
)
from wled_controller.api.schemas.common import (
CaptureImage,
PerformanceMetrics,
TemplateTestResponse,
)
from wled_controller.api.schemas.picture_sources import (
ImageValidateRequest,
ImageValidateResponse,
PictureSourceCreate,
PictureSourceListResponse,
PictureSourceResponse,
PictureSourceTestRequest,
PictureSourceUpdate,
)
from wled_controller.core.capture_engines import EngineRegistry
from wled_controller.core.filters import FilterRegistry, ImagePool
from wled_controller.core.processor_manager import ProcessorManager
from wled_controller.storage import DeviceStore
from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore
from wled_controller.storage.picture_source_store import PictureSourceStore
from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource
from wled_controller.utils import get_logger
logger = get_logger(__name__)
router = APIRouter()
def _stream_to_response(s) -> PictureSourceResponse:
"""Convert a PictureSource to its API response."""
return PictureSourceResponse(
id=s.id,
name=s.name,
stream_type=s.stream_type,
display_index=getattr(s, "display_index", None),
capture_template_id=getattr(s, "capture_template_id", None),
target_fps=getattr(s, "target_fps", None),
source_stream_id=getattr(s, "source_stream_id", None),
postprocessing_template_id=getattr(s, "postprocessing_template_id", None),
image_source=getattr(s, "image_source", None),
created_at=s.created_at,
updated_at=s.updated_at,
description=s.description,
)
@router.get("/api/v1/picture-sources", response_model=PictureSourceListResponse, tags=["Picture Sources"])
async def list_picture_sources(
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
):
"""List all picture sources."""
try:
streams = store.get_all_streams()
responses = [_stream_to_response(s) for s in streams]
return PictureSourceListResponse(streams=responses, count=len(responses))
except Exception as e:
logger.error(f"Failed to list picture sources: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/picture-sources/validate-image", response_model=ImageValidateResponse, tags=["Picture Sources"])
async def validate_image(
data: ImageValidateRequest,
_auth: AuthRequired,
):
"""Validate an image source (URL or file path) and return a preview thumbnail."""
try:
from pathlib import Path
source = data.image_source.strip()
if not source:
return ImageValidateResponse(valid=False, error="Image source is empty")
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
response = await client.get(source)
response.raise_for_status()
pil_image = Image.open(io.BytesIO(response.content))
else:
path = Path(source)
if not path.exists():
return ImageValidateResponse(valid=False, error=f"File not found: {source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
width, height = pil_image.size
# Create thumbnail preview (max 320px wide)
thumb = pil_image.copy()
thumb.thumbnail((320, 320), Image.Resampling.LANCZOS)
buf = io.BytesIO()
thumb.save(buf, format="JPEG", quality=80)
buf.seek(0)
preview = f"data:image/jpeg;base64,{base64.b64encode(buf.getvalue()).decode()}"
return ImageValidateResponse(
valid=True, width=width, height=height, preview=preview
)
except httpx.HTTPStatusError as e:
return ImageValidateResponse(valid=False, error=f"HTTP {e.response.status_code}: {e.response.reason_phrase}")
except httpx.RequestError as e:
return ImageValidateResponse(valid=False, error=f"Request failed: {e}")
except Exception as e:
return ImageValidateResponse(valid=False, error=str(e))
@router.get("/api/v1/picture-sources/full-image", tags=["Picture Sources"])
async def get_full_image(
_auth: AuthRequired,
source: str = Query(..., description="Image URL or local file path"),
):
"""Serve the full-resolution image for lightbox preview."""
from pathlib import Path
try:
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
response = await client.get(source)
response.raise_for_status()
pil_image = Image.open(io.BytesIO(response.content))
else:
path = Path(source)
if not path.exists():
raise HTTPException(status_code=404, detail="File not found")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
buf = io.BytesIO()
pil_image.save(buf, format="JPEG", quality=90)
buf.seek(0)
return Response(content=buf.getvalue(), media_type="image/jpeg")
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post("/api/v1/picture-sources", response_model=PictureSourceResponse, tags=["Picture Sources"], status_code=201)
async def create_picture_source(
data: PictureSourceCreate,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
template_store: TemplateStore = Depends(get_template_store),
pp_store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""Create a new picture source."""
try:
# Validate referenced entities
if data.stream_type == "raw" and data.capture_template_id:
try:
template_store.get_template(data.capture_template_id)
except ValueError:
raise HTTPException(
status_code=400,
detail=f"Capture template not found: {data.capture_template_id}",
)
if data.stream_type == "processed" and data.postprocessing_template_id:
try:
pp_store.get_template(data.postprocessing_template_id)
except ValueError:
raise HTTPException(
status_code=400,
detail=f"Postprocessing template not found: {data.postprocessing_template_id}",
)
stream = store.create_stream(
name=data.name,
stream_type=data.stream_type,
display_index=data.display_index,
capture_template_id=data.capture_template_id,
target_fps=data.target_fps,
source_stream_id=data.source_stream_id,
postprocessing_template_id=data.postprocessing_template_id,
image_source=data.image_source,
description=data.description,
)
return _stream_to_response(stream)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to create picture source: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/picture-sources/{stream_id}", response_model=PictureSourceResponse, tags=["Picture Sources"])
async def get_picture_source(
stream_id: str,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
):
"""Get picture source by ID."""
try:
stream = store.get_stream(stream_id)
return _stream_to_response(stream)
except ValueError:
raise HTTPException(status_code=404, detail=f"Picture source {stream_id} not found")
@router.put("/api/v1/picture-sources/{stream_id}", response_model=PictureSourceResponse, tags=["Picture Sources"])
async def update_picture_source(
stream_id: str,
data: PictureSourceUpdate,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
):
"""Update a picture source."""
try:
stream = store.update_stream(
stream_id=stream_id,
name=data.name,
display_index=data.display_index,
capture_template_id=data.capture_template_id,
target_fps=data.target_fps,
source_stream_id=data.source_stream_id,
postprocessing_template_id=data.postprocessing_template_id,
image_source=data.image_source,
description=data.description,
)
return _stream_to_response(stream)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to update picture source: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/api/v1/picture-sources/{stream_id}", status_code=204, tags=["Picture Sources"])
async def delete_picture_source(
stream_id: str,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
device_store: DeviceStore = Depends(get_device_store),
):
"""Delete a picture source."""
try:
# Check if any device references this stream
if store.is_referenced_by_device(stream_id, device_store):
raise HTTPException(
status_code=409,
detail="Cannot delete picture source: it is assigned to one or more devices. "
"Please reassign those devices before deleting.",
)
store.delete_stream(stream_id)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to delete picture source: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/picture-sources/{stream_id}/test", response_model=TemplateTestResponse, tags=["Picture Sources"])
async def test_picture_source(
stream_id: str,
test_request: PictureSourceTestRequest,
_auth: AuthRequired,
store: PictureSourceStore = Depends(get_picture_source_store),
template_store: TemplateStore = Depends(get_template_store),
processor_manager: ProcessorManager = Depends(get_processor_manager),
device_store: DeviceStore = Depends(get_device_store),
pp_store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""Test a picture source by resolving its chain and running a capture test.
Resolves the stream chain to the raw stream, captures frames,
and returns preview image + performance metrics.
For processed streams, applies postprocessing (gamma, saturation, brightness)
to the preview image.
"""
stream = None
try:
# Resolve stream chain
try:
chain = store.resolve_stream_chain(stream_id)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
raw_stream = chain["raw_stream"]
if isinstance(raw_stream, StaticImagePictureSource):
# Static image stream: load image directly, no engine needed
from pathlib import Path
source = raw_stream.image_source
start_time = time.perf_counter()
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
resp = await client.get(source)
resp.raise_for_status()
pil_image = Image.open(io.BytesIO(resp.content)).convert("RGB")
else:
path = Path(source)
if not path.exists():
raise HTTPException(status_code=400, detail=f"Image file not found: {source}")
pil_image = Image.open(path).convert("RGB")
actual_duration = time.perf_counter() - start_time
frame_count = 1
total_capture_time = actual_duration
elif isinstance(raw_stream, ScreenCapturePictureSource):
# Screen capture stream: use engine
try:
capture_template = template_store.get_template(raw_stream.capture_template_id)
except ValueError:
raise HTTPException(
status_code=400,
detail=f"Capture template not found: {raw_stream.capture_template_id}",
)
display_index = raw_stream.display_index
if capture_template.engine_type not in EngineRegistry.get_available_engines():
raise HTTPException(
status_code=400,
detail=f"Engine '{capture_template.engine_type}' is not available on this system",
)
locked_device_id = processor_manager.get_display_lock_info(display_index)
if locked_device_id:
try:
device = device_store.get_device(locked_device_id)
device_name = device.name
except Exception:
device_name = locked_device_id
raise HTTPException(
status_code=409,
detail=f"Display {display_index} is currently being captured by device '{device_name}'. "
f"Please stop the device processing before testing.",
)
stream = EngineRegistry.create_stream(
capture_template.engine_type, display_index, capture_template.engine_config
)
stream.initialize()
logger.info(f"Starting {test_request.capture_duration}s stream test for {stream_id}")
frame_count = 0
total_capture_time = 0.0
last_frame = None
start_time = time.perf_counter()
end_time = start_time + test_request.capture_duration
while time.perf_counter() < end_time:
capture_start = time.perf_counter()
screen_capture = stream.capture_frame()
capture_elapsed = time.perf_counter() - capture_start
if screen_capture is None:
continue
total_capture_time += capture_elapsed
frame_count += 1
last_frame = screen_capture
actual_duration = time.perf_counter() - start_time
if last_frame is None:
raise RuntimeError("No frames captured during test")
if isinstance(last_frame.image, np.ndarray):
pil_image = Image.fromarray(last_frame.image)
else:
raise ValueError("Unexpected image format from engine")
# Create thumbnail
thumbnail_width = 640
aspect_ratio = pil_image.height / pil_image.width
thumbnail_height = int(thumbnail_width * aspect_ratio)
thumbnail = pil_image.copy()
thumbnail.thumbnail((thumbnail_width, thumbnail_height), Image.Resampling.LANCZOS)
# Apply postprocessing filters if this is a processed stream
pp_template_ids = chain["postprocessing_template_ids"]
if pp_template_ids:
try:
pp_template = pp_store.get_template(pp_template_ids[0])
pool = ImagePool()
def apply_filters(img):
arr = np.array(img)
for fi in pp_template.filters:
f = FilterRegistry.create_instance(fi.filter_id, fi.options)
result = f.process_image(arr, pool)
if result is not None:
arr = result
return Image.fromarray(arr)
thumbnail = apply_filters(thumbnail)
pil_image = apply_filters(pil_image)
except ValueError:
logger.warning(f"PP template {pp_template_ids[0]} not found, skipping postprocessing preview")
# Encode thumbnail
img_buffer = io.BytesIO()
thumbnail.save(img_buffer, format='JPEG', quality=85)
img_buffer.seek(0)
thumbnail_b64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
thumbnail_data_uri = f"data:image/jpeg;base64,{thumbnail_b64}"
# Encode full-resolution image
full_buffer = io.BytesIO()
pil_image.save(full_buffer, format='JPEG', quality=90)
full_buffer.seek(0)
full_b64 = base64.b64encode(full_buffer.getvalue()).decode('utf-8')
full_data_uri = f"data:image/jpeg;base64,{full_b64}"
actual_fps = frame_count / actual_duration if actual_duration > 0 else 0
avg_capture_time_ms = (total_capture_time / frame_count * 1000) if frame_count > 0 else 0
width, height = pil_image.size
return TemplateTestResponse(
full_capture=CaptureImage(
image=thumbnail_data_uri,
full_image=full_data_uri,
width=width,
height=height,
thumbnail_width=thumbnail_width,
thumbnail_height=thumbnail_height,
),
border_extraction=None,
performance=PerformanceMetrics(
capture_duration_s=actual_duration,
frame_count=frame_count,
actual_fps=actual_fps,
avg_capture_time_ms=avg_capture_time_ms,
),
)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except RuntimeError as e:
raise HTTPException(status_code=500, detail=f"Engine error: {str(e)}")
except Exception as e:
logger.error(f"Failed to test picture source: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
finally:
if stream:
try:
stream.cleanup()
except Exception as e:
logger.error(f"Error cleaning up test stream: {e}")

View File

@@ -0,0 +1,348 @@
"""Postprocessing template routes."""
import base64
import io
import time
import httpx
import numpy as np
from PIL import Image
from fastapi import APIRouter, HTTPException, Depends
from wled_controller.api.auth import AuthRequired
from wled_controller.api.dependencies import (
get_device_store,
get_picture_source_store,
get_pp_template_store,
get_processor_manager,
get_template_store,
)
from wled_controller.api.schemas.common import (
CaptureImage,
PerformanceMetrics,
TemplateTestResponse,
)
from wled_controller.api.schemas.filters import FilterInstanceSchema
from wled_controller.api.schemas.postprocessing import (
PostprocessingTemplateCreate,
PostprocessingTemplateListResponse,
PostprocessingTemplateResponse,
PostprocessingTemplateUpdate,
PPTemplateTestRequest,
)
from wled_controller.core.capture_engines import EngineRegistry
from wled_controller.core.filters import FilterRegistry, FilterInstance, ImagePool
from wled_controller.core.processor_manager import ProcessorManager
from wled_controller.storage import DeviceStore
from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore
from wled_controller.storage.picture_source_store import PictureSourceStore
from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource
from wled_controller.utils import get_logger
logger = get_logger(__name__)
router = APIRouter()
def _pp_template_to_response(t) -> PostprocessingTemplateResponse:
"""Convert a PostprocessingTemplate to its API response."""
return PostprocessingTemplateResponse(
id=t.id,
name=t.name,
filters=[FilterInstanceSchema(filter_id=f.filter_id, options=f.options) for f in t.filters],
created_at=t.created_at,
updated_at=t.updated_at,
description=t.description,
)
@router.get("/api/v1/postprocessing-templates", response_model=PostprocessingTemplateListResponse, tags=["Postprocessing Templates"])
async def list_pp_templates(
_auth: AuthRequired,
store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""List all postprocessing templates."""
try:
templates = store.get_all_templates()
responses = [_pp_template_to_response(t) for t in templates]
return PostprocessingTemplateListResponse(templates=responses, count=len(responses))
except Exception as e:
logger.error(f"Failed to list postprocessing templates: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/postprocessing-templates", response_model=PostprocessingTemplateResponse, tags=["Postprocessing Templates"], status_code=201)
async def create_pp_template(
data: PostprocessingTemplateCreate,
_auth: AuthRequired,
store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""Create a new postprocessing template."""
try:
filters = [FilterInstance(f.filter_id, f.options) for f in data.filters]
template = store.create_template(
name=data.name,
filters=filters,
description=data.description,
)
return _pp_template_to_response(template)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to create postprocessing template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/postprocessing-templates/{template_id}", response_model=PostprocessingTemplateResponse, tags=["Postprocessing Templates"])
async def get_pp_template(
template_id: str,
_auth: AuthRequired,
store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""Get postprocessing template by ID."""
try:
template = store.get_template(template_id)
return _pp_template_to_response(template)
except ValueError:
raise HTTPException(status_code=404, detail=f"Postprocessing template {template_id} not found")
@router.put("/api/v1/postprocessing-templates/{template_id}", response_model=PostprocessingTemplateResponse, tags=["Postprocessing Templates"])
async def update_pp_template(
template_id: str,
data: PostprocessingTemplateUpdate,
_auth: AuthRequired,
store: PostprocessingTemplateStore = Depends(get_pp_template_store),
):
"""Update a postprocessing template."""
try:
filters = [FilterInstance(f.filter_id, f.options) for f in data.filters] if data.filters is not None else None
template = store.update_template(
template_id=template_id,
name=data.name,
filters=filters,
description=data.description,
)
return _pp_template_to_response(template)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to update postprocessing template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/api/v1/postprocessing-templates/{template_id}", status_code=204, tags=["Postprocessing Templates"])
async def delete_pp_template(
template_id: str,
_auth: AuthRequired,
store: PostprocessingTemplateStore = Depends(get_pp_template_store),
stream_store: PictureSourceStore = Depends(get_picture_source_store),
):
"""Delete a postprocessing template."""
try:
# Check if any picture source references this template
if store.is_referenced_by(template_id, stream_store):
raise HTTPException(
status_code=409,
detail="Cannot delete postprocessing template: it is referenced by one or more picture sources. "
"Please reassign those streams before deleting.",
)
store.delete_template(template_id)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to delete postprocessing template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/postprocessing-templates/{template_id}/test", response_model=TemplateTestResponse, tags=["Postprocessing Templates"])
async def test_pp_template(
template_id: str,
test_request: PPTemplateTestRequest,
_auth: AuthRequired,
pp_store: PostprocessingTemplateStore = Depends(get_pp_template_store),
stream_store: PictureSourceStore = Depends(get_picture_source_store),
template_store: TemplateStore = Depends(get_template_store),
processor_manager: ProcessorManager = Depends(get_processor_manager),
device_store: DeviceStore = Depends(get_device_store),
):
"""Test a postprocessing template by capturing from a source stream and applying filters."""
stream = None
try:
# Get the PP template
try:
pp_template = pp_store.get_template(template_id)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
# Resolve source stream chain to get the raw stream
try:
chain = stream_store.resolve_stream_chain(test_request.source_stream_id)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
raw_stream = chain["raw_stream"]
if isinstance(raw_stream, StaticImagePictureSource):
# Static image: load directly
from pathlib import Path
source = raw_stream.image_source
start_time = time.perf_counter()
if source.startswith(("http://", "https://")):
async with httpx.AsyncClient(timeout=15, follow_redirects=True) as client:
resp = await client.get(source)
resp.raise_for_status()
pil_image = Image.open(io.BytesIO(resp.content)).convert("RGB")
else:
path = Path(source)
if not path.exists():
raise HTTPException(status_code=400, detail=f"Image file not found: {source}")
pil_image = Image.open(path).convert("RGB")
actual_duration = time.perf_counter() - start_time
frame_count = 1
total_capture_time = actual_duration
elif isinstance(raw_stream, ScreenCapturePictureSource):
# Screen capture stream: use engine
try:
capture_template = template_store.get_template(raw_stream.capture_template_id)
except ValueError:
raise HTTPException(
status_code=400,
detail=f"Capture template not found: {raw_stream.capture_template_id}",
)
display_index = raw_stream.display_index
if capture_template.engine_type not in EngineRegistry.get_available_engines():
raise HTTPException(
status_code=400,
detail=f"Engine '{capture_template.engine_type}' is not available on this system",
)
locked_device_id = processor_manager.get_display_lock_info(display_index)
if locked_device_id:
try:
device = device_store.get_device(locked_device_id)
device_name = device.name
except Exception:
device_name = locked_device_id
raise HTTPException(
status_code=409,
detail=f"Display {display_index} is currently being captured by device '{device_name}'. "
f"Please stop the device processing before testing.",
)
stream = EngineRegistry.create_stream(
capture_template.engine_type, display_index, capture_template.engine_config
)
stream.initialize()
logger.info(f"Starting {test_request.capture_duration}s PP template test for {template_id} using stream {test_request.source_stream_id}")
frame_count = 0
total_capture_time = 0.0
last_frame = None
start_time = time.perf_counter()
end_time = start_time + test_request.capture_duration
while time.perf_counter() < end_time:
capture_start = time.perf_counter()
screen_capture = stream.capture_frame()
capture_elapsed = time.perf_counter() - capture_start
if screen_capture is None:
continue
total_capture_time += capture_elapsed
frame_count += 1
last_frame = screen_capture
actual_duration = time.perf_counter() - start_time
if last_frame is None:
raise RuntimeError("No frames captured during test")
if isinstance(last_frame.image, np.ndarray):
pil_image = Image.fromarray(last_frame.image)
else:
raise ValueError("Unexpected image format from engine")
# Create thumbnail
thumbnail_width = 640
aspect_ratio = pil_image.height / pil_image.width
thumbnail_height = int(thumbnail_width * aspect_ratio)
thumbnail = pil_image.copy()
thumbnail.thumbnail((thumbnail_width, thumbnail_height), Image.Resampling.LANCZOS)
# Apply postprocessing filters
if pp_template.filters:
pool = ImagePool()
def apply_filters(img):
arr = np.array(img)
for fi in pp_template.filters:
f = FilterRegistry.create_instance(fi.filter_id, fi.options)
result = f.process_image(arr, pool)
if result is not None:
arr = result
return Image.fromarray(arr)
thumbnail = apply_filters(thumbnail)
pil_image = apply_filters(pil_image)
# Encode thumbnail
img_buffer = io.BytesIO()
thumbnail.save(img_buffer, format='JPEG', quality=85)
img_buffer.seek(0)
thumbnail_b64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
thumbnail_data_uri = f"data:image/jpeg;base64,{thumbnail_b64}"
# Encode full-resolution image
full_buffer = io.BytesIO()
pil_image.save(full_buffer, format='JPEG', quality=90)
full_buffer.seek(0)
full_b64 = base64.b64encode(full_buffer.getvalue()).decode('utf-8')
full_data_uri = f"data:image/jpeg;base64,{full_b64}"
actual_fps = frame_count / actual_duration if actual_duration > 0 else 0
avg_capture_time_ms = (total_capture_time / frame_count * 1000) if frame_count > 0 else 0
width, height = pil_image.size
return TemplateTestResponse(
full_capture=CaptureImage(
image=thumbnail_data_uri,
full_image=full_data_uri,
width=width,
height=height,
thumbnail_width=thumbnail_width,
thumbnail_height=thumbnail_height,
),
border_extraction=None,
performance=PerformanceMetrics(
capture_duration_s=actual_duration,
frame_count=frame_count,
actual_fps=actual_fps,
avg_capture_time_ms=avg_capture_time_ms,
),
)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Postprocessing template test failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
finally:
if stream:
try:
stream.cleanup()
except Exception:
pass

View File

@@ -0,0 +1,93 @@
"""System routes: health, version, displays."""
import sys
from datetime import datetime
from fastapi import APIRouter, HTTPException
from wled_controller import __version__
from wled_controller.api.auth import AuthRequired
from wled_controller.api.schemas.system import (
DisplayInfo,
DisplayListResponse,
HealthResponse,
VersionResponse,
)
from wled_controller.core.screen_capture import get_available_displays
from wled_controller.utils import get_logger
logger = get_logger(__name__)
router = APIRouter()
@router.get("/health", response_model=HealthResponse, tags=["Health"])
async def health_check():
"""Check service health status.
Returns basic health information including status, version, and timestamp.
"""
logger.info("Health check requested")
return HealthResponse(
status="healthy",
timestamp=datetime.utcnow(),
version=__version__,
)
@router.get("/api/v1/version", response_model=VersionResponse, tags=["Info"])
async def get_version():
"""Get version information.
Returns application version, Python version, and API version.
"""
logger.info("Version info requested")
return VersionResponse(
version=__version__,
python_version=f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
api_version="v1",
)
@router.get("/api/v1/config/displays", response_model=DisplayListResponse, tags=["Config"])
async def get_displays(_: AuthRequired):
"""Get list of available displays.
Returns information about all available monitors/displays that can be captured.
"""
logger.info("Listing available displays")
try:
# Get available displays with all metadata (name, refresh rate, etc.)
display_dataclasses = get_available_displays()
# Convert dataclass DisplayInfo to Pydantic DisplayInfo
displays = [
DisplayInfo(
index=d.index,
name=d.name,
width=d.width,
height=d.height,
x=d.x,
y=d.y,
is_primary=d.is_primary,
refresh_rate=d.refresh_rate,
)
for d in display_dataclasses
]
logger.info(f"Found {len(displays)} displays")
return DisplayListResponse(
displays=displays,
count=len(displays),
)
except Exception as e:
logger.error(f"Failed to get displays: {e}")
raise HTTPException(
status_code=500,
detail=f"Failed to retrieve display information: {str(e)}"
)

View File

@@ -0,0 +1,410 @@
"""Capture template, engine, and filter routes."""
import base64
import io
import time
import numpy as np
from PIL import Image
from fastapi import APIRouter, HTTPException, Depends
from wled_controller.api.auth import AuthRequired
from wled_controller.api.dependencies import (
get_device_store,
get_picture_source_store,
get_processor_manager,
get_template_store,
)
from wled_controller.api.schemas.common import (
CaptureImage,
PerformanceMetrics,
TemplateTestResponse,
)
from wled_controller.api.schemas.templates import (
EngineInfo,
EngineListResponse,
TemplateCreate,
TemplateListResponse,
TemplateResponse,
TemplateTestRequest,
TemplateUpdate,
)
from wled_controller.api.schemas.filters import (
FilterOptionDefSchema,
FilterTypeListResponse,
FilterTypeResponse,
)
from wled_controller.core.capture_engines import EngineRegistry
from wled_controller.core.filters import FilterRegistry
from wled_controller.core.processor_manager import ProcessorManager
from wled_controller.storage import DeviceStore
from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.picture_source_store import PictureSourceStore
from wled_controller.storage.picture_source import ScreenCapturePictureSource
from wled_controller.utils import get_logger
logger = get_logger(__name__)
router = APIRouter()
# ===== CAPTURE TEMPLATE ENDPOINTS =====
@router.get("/api/v1/capture-templates", response_model=TemplateListResponse, tags=["Templates"])
async def list_templates(
_auth: AuthRequired,
template_store: TemplateStore = Depends(get_template_store),
):
"""List all capture templates."""
try:
templates = template_store.get_all_templates()
template_responses = [
TemplateResponse(
id=t.id,
name=t.name,
engine_type=t.engine_type,
engine_config=t.engine_config,
created_at=t.created_at,
updated_at=t.updated_at,
description=t.description,
)
for t in templates
]
return TemplateListResponse(
templates=template_responses,
count=len(template_responses),
)
except Exception as e:
logger.error(f"Failed to list templates: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/capture-templates", response_model=TemplateResponse, tags=["Templates"], status_code=201)
async def create_template(
template_data: TemplateCreate,
_auth: AuthRequired,
template_store: TemplateStore = Depends(get_template_store),
):
"""Create a new capture template."""
try:
template = template_store.create_template(
name=template_data.name,
engine_type=template_data.engine_type,
engine_config=template_data.engine_config,
description=template_data.description,
)
return TemplateResponse(
id=template.id,
name=template.name,
engine_type=template.engine_type,
engine_config=template.engine_config,
created_at=template.created_at,
updated_at=template.updated_at,
description=template.description,
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to create template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/capture-templates/{template_id}", response_model=TemplateResponse, tags=["Templates"])
async def get_template(
template_id: str,
_auth: AuthRequired,
template_store: TemplateStore = Depends(get_template_store),
):
"""Get template by ID."""
try:
template = template_store.get_template(template_id)
except ValueError:
raise HTTPException(status_code=404, detail=f"Template {template_id} not found")
return TemplateResponse(
id=template.id,
name=template.name,
engine_type=template.engine_type,
engine_config=template.engine_config,
created_at=template.created_at,
updated_at=template.updated_at,
description=template.description,
)
@router.put("/api/v1/capture-templates/{template_id}", response_model=TemplateResponse, tags=["Templates"])
async def update_template(
template_id: str,
update_data: TemplateUpdate,
_auth: AuthRequired,
template_store: TemplateStore = Depends(get_template_store),
):
"""Update a template."""
try:
template = template_store.update_template(
template_id=template_id,
name=update_data.name,
engine_type=update_data.engine_type,
engine_config=update_data.engine_config,
description=update_data.description,
)
return TemplateResponse(
id=template.id,
name=template.name,
engine_type=template.engine_type,
engine_config=template.engine_config,
created_at=template.created_at,
updated_at=template.updated_at,
description=template.description,
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to update template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/api/v1/capture-templates/{template_id}", status_code=204, tags=["Templates"])
async def delete_template(
template_id: str,
_auth: AuthRequired,
template_store: TemplateStore = Depends(get_template_store),
stream_store: PictureSourceStore = Depends(get_picture_source_store),
):
"""Delete a template.
Validates that no streams are currently using this template before deletion.
"""
try:
# Check if any streams are using this template
streams_using_template = []
for stream in stream_store.get_all_streams():
if isinstance(stream, ScreenCapturePictureSource) and stream.capture_template_id == template_id:
streams_using_template.append(stream.name)
if streams_using_template:
stream_list = ", ".join(streams_using_template)
raise HTTPException(
status_code=409,
detail=f"Cannot delete template: it is used by the following stream(s): {stream_list}. "
f"Please reassign these streams to a different template before deleting."
)
# Proceed with deletion
template_store.delete_template(template_id)
except HTTPException:
raise # Re-raise HTTP exceptions as-is
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Failed to delete template: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/api/v1/capture-engines", response_model=EngineListResponse, tags=["Templates"])
async def list_engines(_auth: AuthRequired):
"""List available capture engines on this system.
Returns all registered engines that are available on the current platform.
"""
try:
available_engine_types = EngineRegistry.get_available_engines()
engines = []
for engine_type in available_engine_types:
engine_class = EngineRegistry.get_engine(engine_type)
engines.append(
EngineInfo(
type=engine_type,
name=engine_type.upper(),
default_config=engine_class.get_default_config(),
available=True,
)
)
return EngineListResponse(engines=engines, count=len(engines))
except Exception as e:
logger.error(f"Failed to list engines: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/api/v1/capture-templates/test", response_model=TemplateTestResponse, tags=["Templates"])
async def test_template(
test_request: TemplateTestRequest,
_auth: AuthRequired,
processor_manager: ProcessorManager = Depends(get_processor_manager),
device_store: DeviceStore = Depends(get_device_store),
):
"""Test a capture template configuration.
Temporarily instantiates an engine with the provided configuration,
captures frames for the specified duration, and returns actual FPS metrics.
"""
stream = None
try:
# Validate engine type
if test_request.engine_type not in EngineRegistry.get_available_engines():
raise HTTPException(
status_code=400,
detail=f"Engine '{test_request.engine_type}' is not available on this system"
)
# Check if display is already being captured
locked_device_id = processor_manager.get_display_lock_info(test_request.display_index)
if locked_device_id:
# Get device info for better error message
try:
device = device_store.get_device(locked_device_id)
device_name = device.name
except Exception:
device_name = locked_device_id
raise HTTPException(
status_code=409,
detail=(
f"Display {test_request.display_index} is currently being captured by device "
f"'{device_name}'. Please stop the device processing before testing this template."
)
)
# Create and initialize capture stream
stream = EngineRegistry.create_stream(
test_request.engine_type, test_request.display_index, test_request.engine_config
)
stream.initialize()
# Run sustained capture test
logger.info(f"Starting {test_request.capture_duration}s capture test with {test_request.engine_type}")
frame_count = 0
total_capture_time = 0.0
last_frame = None
start_time = time.perf_counter()
end_time = start_time + test_request.capture_duration
while time.perf_counter() < end_time:
capture_start = time.perf_counter()
screen_capture = stream.capture_frame()
capture_elapsed = time.perf_counter() - capture_start
# Skip if no new frame (screen unchanged)
if screen_capture is None:
continue
total_capture_time += capture_elapsed
frame_count += 1
last_frame = screen_capture
actual_duration = time.perf_counter() - start_time
logger.info(f"Captured {frame_count} frames in {actual_duration:.2f}s")
# Use the last captured frame for preview
if last_frame is None:
raise RuntimeError("No frames captured during test")
# Convert numpy array to PIL Image
if isinstance(last_frame.image, np.ndarray):
pil_image = Image.fromarray(last_frame.image)
else:
raise ValueError("Unexpected image format from engine")
# Create thumbnail (640px wide, maintain aspect ratio)
thumbnail_width = 640
aspect_ratio = pil_image.height / pil_image.width
thumbnail_height = int(thumbnail_width * aspect_ratio)
thumbnail = pil_image.copy()
thumbnail.thumbnail((thumbnail_width, thumbnail_height), Image.Resampling.LANCZOS)
# Encode thumbnail as JPEG
img_buffer = io.BytesIO()
thumbnail.save(img_buffer, format='JPEG', quality=85)
img_buffer.seek(0)
thumbnail_b64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
thumbnail_data_uri = f"data:image/jpeg;base64,{thumbnail_b64}"
# Encode full-resolution image as JPEG
full_buffer = io.BytesIO()
pil_image.save(full_buffer, format='JPEG', quality=90)
full_buffer.seek(0)
full_b64 = base64.b64encode(full_buffer.getvalue()).decode('utf-8')
full_data_uri = f"data:image/jpeg;base64,{full_b64}"
# Calculate metrics
actual_fps = frame_count / actual_duration if actual_duration > 0 else 0
avg_capture_time_ms = (total_capture_time / frame_count * 1000) if frame_count > 0 else 0
width, height = pil_image.size
return TemplateTestResponse(
full_capture=CaptureImage(
image=thumbnail_data_uri,
full_image=full_data_uri,
width=width,
height=height,
thumbnail_width=thumbnail_width,
thumbnail_height=thumbnail_height,
),
border_extraction=None,
performance=PerformanceMetrics(
capture_duration_s=actual_duration,
frame_count=frame_count,
actual_fps=actual_fps,
avg_capture_time_ms=avg_capture_time_ms,
),
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except RuntimeError as e:
raise HTTPException(status_code=500, detail=f"Engine error: {str(e)}")
except Exception as e:
logger.error(f"Failed to test template: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
finally:
if stream:
try:
stream.cleanup()
except Exception as e:
logger.error(f"Error cleaning up test stream: {e}")
# ===== FILTER TYPE ENDPOINTS =====
@router.get("/api/v1/filters", response_model=FilterTypeListResponse, tags=["Filters"])
async def list_filter_types(_auth: AuthRequired):
"""List all available postprocessing filter types and their options schemas."""
all_filters = FilterRegistry.get_all()
responses = []
for filter_id, filter_cls in all_filters.items():
schema = filter_cls.get_options_schema()
responses.append(FilterTypeResponse(
filter_id=filter_cls.filter_id,
filter_name=filter_cls.filter_name,
options_schema=[
FilterOptionDefSchema(
key=opt.key,
label=opt.label,
type=opt.option_type,
default=opt.default,
min_value=opt.min_value,
max_value=opt.max_value,
step=opt.step,
)
for opt in schema
],
))
return FilterTypeListResponse(filters=responses, count=len(responses))

View File

@@ -1,482 +0,0 @@
"""Pydantic schemas for API request and response models."""
from datetime import datetime
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Field, HttpUrl
from wled_controller.core.processor_manager import DEFAULT_STATE_CHECK_INTERVAL
# Health and Version Schemas
class HealthResponse(BaseModel):
"""Health check response."""
status: Literal["healthy", "unhealthy"] = Field(description="Service health status")
timestamp: datetime = Field(description="Current server time")
version: str = Field(description="Application version")
class VersionResponse(BaseModel):
"""Version information response."""
version: str = Field(description="Application version")
python_version: str = Field(description="Python version")
api_version: str = Field(description="API version")
# Display Schemas
class DisplayInfo(BaseModel):
"""Display/monitor information."""
index: int = Field(description="Display index")
name: str = Field(description="Display name")
width: int = Field(description="Display width in pixels")
height: int = Field(description="Display height in pixels")
x: int = Field(description="Display X position")
y: int = Field(description="Display Y position")
is_primary: bool = Field(default=False, description="Whether this is the primary display")
refresh_rate: int = Field(description="Display refresh rate in Hz")
class DisplayListResponse(BaseModel):
"""List of available displays."""
displays: List[DisplayInfo] = Field(description="Available displays")
count: int = Field(description="Number of displays")
# Device Schemas
class DeviceCreate(BaseModel):
"""Request to create/attach a WLED device."""
name: str = Field(description="Device name", min_length=1, max_length=100)
url: str = Field(description="WLED device URL (e.g., http://192.168.1.100)")
capture_template_id: Optional[str] = Field(None, description="Capture template ID (uses first available if not set or invalid)")
class DeviceUpdate(BaseModel):
"""Request to update device information."""
name: Optional[str] = Field(None, description="Device name", min_length=1, max_length=100)
url: Optional[str] = Field(None, description="WLED device URL")
enabled: Optional[bool] = Field(None, description="Whether device is enabled")
capture_template_id: Optional[str] = Field(None, description="Capture template ID (legacy)")
picture_stream_id: Optional[str] = Field(None, description="Picture stream ID")
class ColorCorrection(BaseModel):
"""Color correction settings."""
gamma: float = Field(default=2.2, description="Gamma correction", ge=0.1, le=5.0)
saturation: float = Field(default=1.0, description="Saturation multiplier", ge=0.0, le=2.0)
brightness: float = Field(default=1.0, description="Brightness multiplier", ge=0.0, le=1.0)
class ProcessingSettings(BaseModel):
"""Processing settings for a device."""
display_index: int = Field(default=0, description="Display to capture", ge=0)
fps: int = Field(default=30, description="Target frames per second", ge=10, le=90)
border_width: int = Field(default=10, description="Border width in pixels", ge=1, le=100)
interpolation_mode: str = Field(default="average", description="LED color interpolation mode (average, median, dominant)")
brightness: float = Field(default=1.0, description="Global brightness (0.0-1.0)", ge=0.0, le=1.0)
smoothing: float = Field(default=0.3, description="Temporal smoothing factor (0.0=none, 1.0=full)", ge=0.0, le=1.0)
state_check_interval: int = Field(
default=DEFAULT_STATE_CHECK_INTERVAL, ge=5, le=600,
description="Seconds between WLED health checks"
)
color_correction: Optional[ColorCorrection] = Field(
default_factory=ColorCorrection,
description="Color correction settings"
)
class Calibration(BaseModel):
"""Calibration configuration for pixel-to-LED mapping."""
layout: Literal["clockwise", "counterclockwise"] = Field(
default="clockwise",
description="LED strip layout direction"
)
start_position: Literal["top_left", "top_right", "bottom_left", "bottom_right"] = Field(
default="bottom_left",
description="Starting corner of the LED strip"
)
offset: int = Field(
default=0,
ge=0,
description="Number of LEDs from physical LED 0 to start corner (along strip direction)"
)
leds_top: int = Field(default=0, ge=0, description="Number of LEDs on the top edge")
leds_right: int = Field(default=0, ge=0, description="Number of LEDs on the right edge")
leds_bottom: int = Field(default=0, ge=0, description="Number of LEDs on the bottom edge")
leds_left: int = Field(default=0, ge=0, description="Number of LEDs on the left edge")
# Per-edge span: fraction of screen side covered by LEDs (0.01.0)
span_top_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of top edge coverage")
span_top_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of top edge coverage")
span_right_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of right edge coverage")
span_right_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of right edge coverage")
span_bottom_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of bottom edge coverage")
span_bottom_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of bottom edge coverage")
span_left_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of left edge coverage")
span_left_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of left edge coverage")
class CalibrationTestModeRequest(BaseModel):
"""Request to set calibration test mode with multiple edges."""
edges: Dict[str, List[int]] = Field(
default_factory=dict,
description="Map of active edge names to RGB colors. "
"E.g. {'top': [255, 0, 0], 'left': [255, 255, 0]}. "
"Empty dict = exit test mode."
)
class CalibrationTestModeResponse(BaseModel):
"""Response for calibration test mode."""
test_mode: bool = Field(description="Whether test mode is active")
active_edges: List[str] = Field(default_factory=list, description="Currently lit edges")
device_id: str = Field(description="Device ID")
class DeviceResponse(BaseModel):
"""Device information response."""
id: str = Field(description="Device ID")
name: str = Field(description="Device name")
url: str = Field(description="WLED device URL")
led_count: int = Field(description="Total number of LEDs")
enabled: bool = Field(description="Whether device is enabled")
status: Literal["connected", "disconnected", "error"] = Field(
description="Connection status"
)
settings: ProcessingSettings = Field(description="Processing settings")
calibration: Optional[Calibration] = Field(None, description="Calibration configuration")
capture_template_id: str = Field(description="ID of assigned capture template (legacy)")
picture_stream_id: str = Field(default="", description="ID of assigned picture stream")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
class DeviceListResponse(BaseModel):
"""List of devices response."""
devices: List[DeviceResponse] = Field(description="List of devices")
count: int = Field(description="Number of devices")
# Processing State Schemas
class ProcessingState(BaseModel):
"""Processing state for a device."""
device_id: str = Field(description="Device ID")
processing: bool = Field(description="Whether processing is active")
fps_actual: Optional[float] = Field(None, description="Actual FPS achieved")
fps_target: int = Field(description="Target FPS")
display_index: int = Field(description="Current display index")
last_update: Optional[datetime] = Field(None, description="Last successful update")
errors: List[str] = Field(default_factory=list, description="Recent errors")
wled_online: bool = Field(default=False, description="Whether WLED device is reachable")
wled_latency_ms: Optional[float] = Field(None, description="WLED health check latency in ms")
wled_name: Optional[str] = Field(None, description="WLED device name")
wled_version: Optional[str] = Field(None, description="WLED firmware version")
wled_led_count: Optional[int] = Field(None, description="LED count reported by WLED device")
wled_rgbw: Optional[bool] = Field(None, description="Whether WLED device uses RGBW LEDs")
wled_led_type: Optional[str] = Field(None, description="LED chip type (e.g. WS2812B, SK6812 RGBW)")
wled_last_checked: Optional[datetime] = Field(None, description="Last health check time")
wled_error: Optional[str] = Field(None, description="Last health check error")
class MetricsResponse(BaseModel):
"""Device metrics response."""
device_id: str = Field(description="Device ID")
processing: bool = Field(description="Whether processing is active")
fps_actual: Optional[float] = Field(None, description="Actual FPS")
fps_target: int = Field(description="Target FPS")
uptime_seconds: float = Field(description="Processing uptime in seconds")
frames_processed: int = Field(description="Total frames processed")
errors_count: int = Field(description="Total error count")
last_error: Optional[str] = Field(None, description="Last error message")
last_update: Optional[datetime] = Field(None, description="Last update timestamp")
# Error Schemas
class ErrorResponse(BaseModel):
"""Error response."""
error: str = Field(description="Error type")
message: str = Field(description="Error message")
detail: Optional[Dict] = Field(None, description="Additional error details")
timestamp: datetime = Field(default_factory=datetime.utcnow, description="Error timestamp")
# Capture Template Schemas
class TemplateCreate(BaseModel):
"""Request to create a capture template."""
name: str = Field(description="Template name", min_length=1, max_length=100)
engine_type: str = Field(description="Engine type (e.g., 'mss', 'dxcam', 'wgc')", min_length=1)
engine_config: Dict = Field(default_factory=dict, description="Engine-specific configuration")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class TemplateUpdate(BaseModel):
"""Request to update a template."""
name: Optional[str] = Field(None, description="Template name", min_length=1, max_length=100)
engine_type: Optional[str] = Field(None, description="Capture engine type (mss, dxcam, wgc)")
engine_config: Optional[Dict] = Field(None, description="Engine-specific configuration")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class TemplateResponse(BaseModel):
"""Template information response."""
id: str = Field(description="Template ID")
name: str = Field(description="Template name")
engine_type: str = Field(description="Engine type identifier")
engine_config: Dict = Field(description="Engine-specific configuration")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Template description")
class TemplateListResponse(BaseModel):
"""List of templates response."""
templates: List[TemplateResponse] = Field(description="List of templates")
count: int = Field(description="Number of templates")
class EngineInfo(BaseModel):
"""Capture engine information."""
type: str = Field(description="Engine type identifier (e.g., 'mss', 'dxcam')")
name: str = Field(description="Human-readable engine name")
default_config: Dict = Field(description="Default configuration for this engine")
available: bool = Field(description="Whether engine is available on this system")
class EngineListResponse(BaseModel):
"""List of available engines response."""
engines: List[EngineInfo] = Field(description="Available capture engines")
count: int = Field(description="Number of engines")
class TemplateAssignment(BaseModel):
"""Request to assign template to device."""
template_id: str = Field(description="Template ID to assign")
class TemplateTestRequest(BaseModel):
"""Request to test a capture template."""
engine_type: str = Field(description="Capture engine type to test")
engine_config: Dict = Field(default={}, description="Engine configuration")
display_index: int = Field(description="Display index to capture")
border_width: int = Field(default=10, ge=1, le=100, description="Border width in pixels")
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")
class CaptureImage(BaseModel):
"""Captured image with metadata."""
image: str = Field(description="Base64-encoded thumbnail image data")
full_image: Optional[str] = Field(None, description="Base64-encoded full-resolution image data")
width: int = Field(description="Original image width in pixels")
height: int = Field(description="Original image height in pixels")
thumbnail_width: Optional[int] = Field(None, description="Thumbnail width (if resized)")
thumbnail_height: Optional[int] = Field(None, description="Thumbnail height (if resized)")
class BorderExtraction(BaseModel):
"""Extracted border images."""
top: str = Field(description="Base64-encoded top border image")
right: str = Field(description="Base64-encoded right border image")
bottom: str = Field(description="Base64-encoded bottom border image")
left: str = Field(description="Base64-encoded left border image")
class PerformanceMetrics(BaseModel):
"""Performance metrics for template test."""
capture_duration_s: float = Field(description="Total capture duration in seconds")
frame_count: int = Field(description="Number of frames captured")
actual_fps: float = Field(description="Actual FPS (frame_count / duration)")
avg_capture_time_ms: float = Field(description="Average time per frame capture in milliseconds")
class TemplateTestResponse(BaseModel):
"""Response from template test."""
full_capture: CaptureImage = Field(description="Full screen capture with thumbnail")
border_extraction: Optional[BorderExtraction] = Field(None, description="Extracted border images (deprecated)")
performance: PerformanceMetrics = Field(description="Performance metrics")
# Filter Schemas
class FilterInstanceSchema(BaseModel):
"""A single filter instance with its configuration."""
filter_id: str = Field(description="Filter type identifier")
options: Dict[str, Any] = Field(default_factory=dict, description="Filter-specific options")
class FilterOptionDefSchema(BaseModel):
"""Describes a configurable option for a filter type."""
key: str = Field(description="Option key")
label: str = Field(description="Display label")
type: str = Field(description="Option type (float or int)")
default: Any = Field(description="Default value")
min_value: Any = Field(description="Minimum value")
max_value: Any = Field(description="Maximum value")
step: Any = Field(description="Step increment")
class FilterTypeResponse(BaseModel):
"""Available filter type with its options schema."""
filter_id: str = Field(description="Filter type identifier")
filter_name: str = Field(description="Display name")
options_schema: List[FilterOptionDefSchema] = Field(description="Configurable options")
class FilterTypeListResponse(BaseModel):
"""List of available filter types."""
filters: List[FilterTypeResponse] = Field(description="Available filter types")
count: int = Field(description="Number of filter types")
# Postprocessing Template Schemas
class PostprocessingTemplateCreate(BaseModel):
"""Request to create a postprocessing template."""
name: str = Field(description="Template name", min_length=1, max_length=100)
filters: List[FilterInstanceSchema] = Field(default_factory=list, description="Ordered list of filter instances")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class PostprocessingTemplateUpdate(BaseModel):
"""Request to update a postprocessing template."""
name: Optional[str] = Field(None, description="Template name", min_length=1, max_length=100)
filters: Optional[List[FilterInstanceSchema]] = Field(None, description="Ordered list of filter instances")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class PostprocessingTemplateResponse(BaseModel):
"""Postprocessing template information response."""
id: str = Field(description="Template ID")
name: str = Field(description="Template name")
filters: List[FilterInstanceSchema] = Field(description="Ordered list of filter instances")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Template description")
class PostprocessingTemplateListResponse(BaseModel):
"""List of postprocessing templates response."""
templates: List[PostprocessingTemplateResponse] = Field(description="List of postprocessing templates")
count: int = Field(description="Number of templates")
# Picture Stream Schemas
class PictureStreamCreate(BaseModel):
"""Request to create a picture stream."""
name: str = Field(description="Stream name", min_length=1, max_length=100)
stream_type: Literal["raw", "processed", "static_image"] = Field(description="Stream type")
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
class PictureStreamUpdate(BaseModel):
"""Request to update a picture stream."""
name: Optional[str] = Field(None, description="Stream name", min_length=1, max_length=100)
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
class PictureStreamResponse(BaseModel):
"""Picture stream information response."""
id: str = Field(description="Stream ID")
name: str = Field(description="Stream name")
stream_type: str = Field(description="Stream type (raw, processed, or static_image)")
display_index: Optional[int] = Field(None, description="Display index")
capture_template_id: Optional[str] = Field(None, description="Capture template ID")
target_fps: Optional[int] = Field(None, description="Target FPS")
source_stream_id: Optional[str] = Field(None, description="Source stream ID")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID")
image_source: Optional[str] = Field(None, description="Image URL or file path")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Stream description")
class PictureStreamListResponse(BaseModel):
"""List of picture streams response."""
streams: List[PictureStreamResponse] = Field(description="List of picture streams")
count: int = Field(description="Number of streams")
class PictureStreamTestRequest(BaseModel):
"""Request to test a picture stream."""
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")
border_width: int = Field(default=10, ge=1, le=100, description="Border width in pixels for preview")
class PPTemplateTestRequest(BaseModel):
"""Request to test a postprocessing template against a source stream."""
source_stream_id: str = Field(description="ID of the source picture stream to capture from")
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")
class ImageValidateRequest(BaseModel):
"""Request to validate an image source (URL or file path)."""
image_source: str = Field(description="Image URL or local file path")
class ImageValidateResponse(BaseModel):
"""Response from image validation."""
valid: bool = Field(description="Whether the image source is accessible and valid")
width: Optional[int] = Field(None, description="Image width in pixels")
height: Optional[int] = Field(None, description="Image height in pixels")
preview: Optional[str] = Field(None, description="Base64-encoded JPEG thumbnail")
error: Optional[str] = Field(None, description="Error message if invalid")

View File

@@ -0,0 +1,107 @@
"""Pydantic schemas for API request and response models."""
from .common import (
CaptureImage,
BorderExtraction,
ErrorResponse,
PerformanceMetrics,
TemplateTestResponse,
)
from .system import (
DisplayInfo,
DisplayListResponse,
HealthResponse,
VersionResponse,
)
from .devices import (
Calibration,
CalibrationTestModeRequest,
CalibrationTestModeResponse,
ColorCorrection,
DeviceCreate,
DeviceListResponse,
DeviceResponse,
DeviceUpdate,
MetricsResponse,
ProcessingSettings,
ProcessingState,
)
from .templates import (
EngineInfo,
EngineListResponse,
TemplateAssignment,
TemplateCreate,
TemplateListResponse,
TemplateResponse,
TemplateTestRequest,
TemplateUpdate,
)
from .filters import (
FilterInstanceSchema,
FilterOptionDefSchema,
FilterTypeListResponse,
FilterTypeResponse,
)
from .postprocessing import (
PostprocessingTemplateCreate,
PostprocessingTemplateListResponse,
PostprocessingTemplateResponse,
PostprocessingTemplateUpdate,
PPTemplateTestRequest,
)
from .picture_sources import (
ImageValidateRequest,
ImageValidateResponse,
PictureSourceCreate,
PictureSourceListResponse,
PictureSourceResponse,
PictureSourceTestRequest,
PictureSourceUpdate,
)
__all__ = [
"CaptureImage",
"BorderExtraction",
"ErrorResponse",
"PerformanceMetrics",
"TemplateTestResponse",
"DisplayInfo",
"DisplayListResponse",
"HealthResponse",
"VersionResponse",
"Calibration",
"CalibrationTestModeRequest",
"CalibrationTestModeResponse",
"ColorCorrection",
"DeviceCreate",
"DeviceListResponse",
"DeviceResponse",
"DeviceUpdate",
"MetricsResponse",
"ProcessingSettings",
"ProcessingState",
"EngineInfo",
"EngineListResponse",
"TemplateAssignment",
"TemplateCreate",
"TemplateListResponse",
"TemplateResponse",
"TemplateTestRequest",
"TemplateUpdate",
"FilterInstanceSchema",
"FilterOptionDefSchema",
"FilterTypeListResponse",
"FilterTypeResponse",
"PostprocessingTemplateCreate",
"PostprocessingTemplateListResponse",
"PostprocessingTemplateResponse",
"PostprocessingTemplateUpdate",
"PPTemplateTestRequest",
"ImageValidateRequest",
"ImageValidateResponse",
"PictureSourceCreate",
"PictureSourceListResponse",
"PictureSourceResponse",
"PictureSourceTestRequest",
"PictureSourceUpdate",
]

View File

@@ -0,0 +1,52 @@
"""Shared schemas used across multiple route modules."""
from datetime import datetime
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
class ErrorResponse(BaseModel):
"""Error response."""
error: str = Field(description="Error type")
message: str = Field(description="Error message")
detail: Optional[Dict] = Field(None, description="Additional error details")
timestamp: datetime = Field(default_factory=datetime.utcnow, description="Error timestamp")
class CaptureImage(BaseModel):
"""Captured image with metadata."""
image: str = Field(description="Base64-encoded thumbnail image data")
full_image: Optional[str] = Field(None, description="Base64-encoded full-resolution image data")
width: int = Field(description="Original image width in pixels")
height: int = Field(description="Original image height in pixels")
thumbnail_width: Optional[int] = Field(None, description="Thumbnail width (if resized)")
thumbnail_height: Optional[int] = Field(None, description="Thumbnail height (if resized)")
class BorderExtraction(BaseModel):
"""Extracted border images."""
top: str = Field(description="Base64-encoded top border image")
right: str = Field(description="Base64-encoded right border image")
bottom: str = Field(description="Base64-encoded bottom border image")
left: str = Field(description="Base64-encoded left border image")
class PerformanceMetrics(BaseModel):
"""Performance metrics for template test."""
capture_duration_s: float = Field(description="Total capture duration in seconds")
frame_count: int = Field(description="Number of frames captured")
actual_fps: float = Field(description="Actual FPS (frame_count / duration)")
avg_capture_time_ms: float = Field(description="Average time per frame capture in milliseconds")
class TemplateTestResponse(BaseModel):
"""Response from template test."""
full_capture: CaptureImage = Field(description="Full screen capture with thumbnail")
border_extraction: Optional[BorderExtraction] = Field(None, description="Extracted border images (deprecated)")
performance: PerformanceMetrics = Field(description="Performance metrics")

View File

@@ -0,0 +1,161 @@
"""Device-related schemas (CRUD, settings, calibration, processing state, metrics)."""
from datetime import datetime
from typing import Dict, List, Literal, Optional
from pydantic import BaseModel, Field
from wled_controller.core.processor_manager import DEFAULT_STATE_CHECK_INTERVAL
class DeviceCreate(BaseModel):
"""Request to create/attach a WLED device."""
name: str = Field(description="Device name", min_length=1, max_length=100)
url: str = Field(description="WLED device URL (e.g., http://192.168.1.100)")
class DeviceUpdate(BaseModel):
"""Request to update device information."""
name: Optional[str] = Field(None, description="Device name", min_length=1, max_length=100)
url: Optional[str] = Field(None, description="WLED device URL")
enabled: Optional[bool] = Field(None, description="Whether device is enabled")
picture_source_id: Optional[str] = Field(None, description="Picture source ID")
class ColorCorrection(BaseModel):
"""Color correction settings."""
gamma: float = Field(default=2.2, description="Gamma correction", ge=0.1, le=5.0)
saturation: float = Field(default=1.0, description="Saturation multiplier", ge=0.0, le=2.0)
brightness: float = Field(default=1.0, description="Brightness multiplier", ge=0.0, le=1.0)
class ProcessingSettings(BaseModel):
"""Processing settings for a device."""
display_index: int = Field(default=0, description="Display to capture", ge=0)
fps: int = Field(default=30, description="Target frames per second", ge=10, le=90)
border_width: int = Field(default=10, description="Border width in pixels", ge=1, le=100)
interpolation_mode: str = Field(default="average", description="LED color interpolation mode (average, median, dominant)")
brightness: float = Field(default=1.0, description="Global brightness (0.0-1.0)", ge=0.0, le=1.0)
smoothing: float = Field(default=0.3, description="Temporal smoothing factor (0.0=none, 1.0=full)", ge=0.0, le=1.0)
state_check_interval: int = Field(
default=DEFAULT_STATE_CHECK_INTERVAL, ge=5, le=600,
description="Seconds between WLED health checks"
)
color_correction: Optional[ColorCorrection] = Field(
default_factory=ColorCorrection,
description="Color correction settings"
)
class Calibration(BaseModel):
"""Calibration configuration for pixel-to-LED mapping."""
layout: Literal["clockwise", "counterclockwise"] = Field(
default="clockwise",
description="LED strip layout direction"
)
start_position: Literal["top_left", "top_right", "bottom_left", "bottom_right"] = Field(
default="bottom_left",
description="Starting corner of the LED strip"
)
offset: int = Field(
default=0,
ge=0,
description="Number of LEDs from physical LED 0 to start corner (along strip direction)"
)
leds_top: int = Field(default=0, ge=0, description="Number of LEDs on the top edge")
leds_right: int = Field(default=0, ge=0, description="Number of LEDs on the right edge")
leds_bottom: int = Field(default=0, ge=0, description="Number of LEDs on the bottom edge")
leds_left: int = Field(default=0, ge=0, description="Number of LEDs on the left edge")
# Per-edge span: fraction of screen side covered by LEDs (0.0-1.0)
span_top_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of top edge coverage")
span_top_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of top edge coverage")
span_right_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of right edge coverage")
span_right_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of right edge coverage")
span_bottom_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of bottom edge coverage")
span_bottom_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of bottom edge coverage")
span_left_start: float = Field(default=0.0, ge=0.0, le=1.0, description="Start of left edge coverage")
span_left_end: float = Field(default=1.0, ge=0.0, le=1.0, description="End of left edge coverage")
class CalibrationTestModeRequest(BaseModel):
"""Request to set calibration test mode with multiple edges."""
edges: Dict[str, List[int]] = Field(
default_factory=dict,
description="Map of active edge names to RGB colors. "
"E.g. {'top': [255, 0, 0], 'left': [255, 255, 0]}. "
"Empty dict = exit test mode."
)
class CalibrationTestModeResponse(BaseModel):
"""Response for calibration test mode."""
test_mode: bool = Field(description="Whether test mode is active")
active_edges: List[str] = Field(default_factory=list, description="Currently lit edges")
device_id: str = Field(description="Device ID")
class DeviceResponse(BaseModel):
"""Device information response."""
id: str = Field(description="Device ID")
name: str = Field(description="Device name")
url: str = Field(description="WLED device URL")
led_count: int = Field(description="Total number of LEDs")
enabled: bool = Field(description="Whether device is enabled")
status: Literal["connected", "disconnected", "error"] = Field(
description="Connection status"
)
settings: ProcessingSettings = Field(description="Processing settings")
calibration: Optional[Calibration] = Field(None, description="Calibration configuration")
picture_source_id: str = Field(default="", description="ID of assigned picture source")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
class DeviceListResponse(BaseModel):
"""List of devices response."""
devices: List[DeviceResponse] = Field(description="List of devices")
count: int = Field(description="Number of devices")
class ProcessingState(BaseModel):
"""Processing state for a device."""
device_id: str = Field(description="Device ID")
processing: bool = Field(description="Whether processing is active")
fps_actual: Optional[float] = Field(None, description="Actual FPS achieved")
fps_target: int = Field(description="Target FPS")
display_index: int = Field(description="Current display index")
last_update: Optional[datetime] = Field(None, description="Last successful update")
errors: List[str] = Field(default_factory=list, description="Recent errors")
wled_online: bool = Field(default=False, description="Whether WLED device is reachable")
wled_latency_ms: Optional[float] = Field(None, description="WLED health check latency in ms")
wled_name: Optional[str] = Field(None, description="WLED device name")
wled_version: Optional[str] = Field(None, description="WLED firmware version")
wled_led_count: Optional[int] = Field(None, description="LED count reported by WLED device")
wled_rgbw: Optional[bool] = Field(None, description="Whether WLED device uses RGBW LEDs")
wled_led_type: Optional[str] = Field(None, description="LED chip type (e.g. WS2812B, SK6812 RGBW)")
wled_last_checked: Optional[datetime] = Field(None, description="Last health check time")
wled_error: Optional[str] = Field(None, description="Last health check error")
class MetricsResponse(BaseModel):
"""Device metrics response."""
device_id: str = Field(description="Device ID")
processing: bool = Field(description="Whether processing is active")
fps_actual: Optional[float] = Field(None, description="Actual FPS")
fps_target: int = Field(description="Target FPS")
uptime_seconds: float = Field(description="Processing uptime in seconds")
frames_processed: int = Field(description="Total frames processed")
errors_count: int = Field(description="Total error count")
last_error: Optional[str] = Field(None, description="Last error message")
last_update: Optional[datetime] = Field(None, description="Last update timestamp")

View File

@@ -0,0 +1,39 @@
"""Filter-related schemas."""
from typing import Any, Dict, List
from pydantic import BaseModel, Field
class FilterInstanceSchema(BaseModel):
"""A single filter instance with its configuration."""
filter_id: str = Field(description="Filter type identifier")
options: Dict[str, Any] = Field(default_factory=dict, description="Filter-specific options")
class FilterOptionDefSchema(BaseModel):
"""Describes a configurable option for a filter type."""
key: str = Field(description="Option key")
label: str = Field(description="Display label")
type: str = Field(description="Option type (float or int)")
default: Any = Field(description="Default value")
min_value: Any = Field(description="Minimum value")
max_value: Any = Field(description="Maximum value")
step: Any = Field(description="Step increment")
class FilterTypeResponse(BaseModel):
"""Available filter type with its options schema."""
filter_id: str = Field(description="Filter type identifier")
filter_name: str = Field(description="Display name")
options_schema: List[FilterOptionDefSchema] = Field(description="Configurable options")
class FilterTypeListResponse(BaseModel):
"""List of available filter types."""
filters: List[FilterTypeResponse] = Field(description="Available filter types")
count: int = Field(description="Number of filter types")

View File

@@ -0,0 +1,80 @@
"""Picture source schemas."""
from datetime import datetime
from typing import List, Literal, Optional
from pydantic import BaseModel, Field
class PictureSourceCreate(BaseModel):
"""Request to create a picture source."""
name: str = Field(description="Stream name", min_length=1, max_length=100)
stream_type: Literal["raw", "processed", "static_image"] = Field(description="Stream type")
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
class PictureSourceUpdate(BaseModel):
"""Request to update a picture source."""
name: Optional[str] = Field(None, description="Stream name", min_length=1, max_length=100)
display_index: Optional[int] = Field(None, description="Display index (raw streams)", ge=0)
capture_template_id: Optional[str] = Field(None, description="Capture template ID (raw streams)")
target_fps: Optional[int] = Field(None, description="Target FPS (raw streams)", ge=10, le=90)
source_stream_id: Optional[str] = Field(None, description="Source stream ID (processed streams)")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID (processed streams)")
image_source: Optional[str] = Field(None, description="Image URL or file path (static_image streams)")
description: Optional[str] = Field(None, description="Stream description", max_length=500)
class PictureSourceResponse(BaseModel):
"""Picture source information response."""
id: str = Field(description="Stream ID")
name: str = Field(description="Stream name")
stream_type: str = Field(description="Stream type (raw, processed, or static_image)")
display_index: Optional[int] = Field(None, description="Display index")
capture_template_id: Optional[str] = Field(None, description="Capture template ID")
target_fps: Optional[int] = Field(None, description="Target FPS")
source_stream_id: Optional[str] = Field(None, description="Source stream ID")
postprocessing_template_id: Optional[str] = Field(None, description="Postprocessing template ID")
image_source: Optional[str] = Field(None, description="Image URL or file path")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Stream description")
class PictureSourceListResponse(BaseModel):
"""List of picture sources response."""
streams: List[PictureSourceResponse] = Field(description="List of picture sources")
count: int = Field(description="Number of streams")
class PictureSourceTestRequest(BaseModel):
"""Request to test a picture source."""
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")
border_width: int = Field(default=10, ge=1, le=100, description="Border width in pixels for preview")
class ImageValidateRequest(BaseModel):
"""Request to validate an image source (URL or file path)."""
image_source: str = Field(description="Image URL or local file path")
class ImageValidateResponse(BaseModel):
"""Response from image validation."""
valid: bool = Field(description="Whether the image source is accessible and valid")
width: Optional[int] = Field(None, description="Image width in pixels")
height: Optional[int] = Field(None, description="Image height in pixels")
preview: Optional[str] = Field(None, description="Base64-encoded JPEG thumbnail")
error: Optional[str] = Field(None, description="Error message if invalid")

View File

@@ -0,0 +1,49 @@
"""Postprocessing template schemas."""
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel, Field
from .filters import FilterInstanceSchema
class PostprocessingTemplateCreate(BaseModel):
"""Request to create a postprocessing template."""
name: str = Field(description="Template name", min_length=1, max_length=100)
filters: List[FilterInstanceSchema] = Field(default_factory=list, description="Ordered list of filter instances")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class PostprocessingTemplateUpdate(BaseModel):
"""Request to update a postprocessing template."""
name: Optional[str] = Field(None, description="Template name", min_length=1, max_length=100)
filters: Optional[List[FilterInstanceSchema]] = Field(None, description="Ordered list of filter instances")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class PostprocessingTemplateResponse(BaseModel):
"""Postprocessing template information response."""
id: str = Field(description="Template ID")
name: str = Field(description="Template name")
filters: List[FilterInstanceSchema] = Field(description="Ordered list of filter instances")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Template description")
class PostprocessingTemplateListResponse(BaseModel):
"""List of postprocessing templates response."""
templates: List[PostprocessingTemplateResponse] = Field(description="List of postprocessing templates")
count: int = Field(description="Number of templates")
class PPTemplateTestRequest(BaseModel):
"""Request to test a postprocessing template against a source stream."""
source_stream_id: str = Field(description="ID of the source picture source to capture from")
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")

View File

@@ -0,0 +1,42 @@
"""System-related schemas (health, version, displays)."""
from datetime import datetime
from typing import List, Literal
from pydantic import BaseModel, Field
class HealthResponse(BaseModel):
"""Health check response."""
status: Literal["healthy", "unhealthy"] = Field(description="Service health status")
timestamp: datetime = Field(description="Current server time")
version: str = Field(description="Application version")
class VersionResponse(BaseModel):
"""Version information response."""
version: str = Field(description="Application version")
python_version: str = Field(description="Python version")
api_version: str = Field(description="API version")
class DisplayInfo(BaseModel):
"""Display/monitor information."""
index: int = Field(description="Display index")
name: str = Field(description="Display name")
width: int = Field(description="Display width in pixels")
height: int = Field(description="Display height in pixels")
x: int = Field(description="Display X position")
y: int = Field(description="Display Y position")
is_primary: bool = Field(default=False, description="Whether this is the primary display")
refresh_rate: int = Field(description="Display refresh rate in Hz")
class DisplayListResponse(BaseModel):
"""List of available displays."""
displays: List[DisplayInfo] = Field(description="Available displays")
count: int = Field(description="Number of displays")

View File

@@ -0,0 +1,75 @@
"""Capture template and engine schemas."""
from datetime import datetime
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
class TemplateCreate(BaseModel):
"""Request to create a capture template."""
name: str = Field(description="Template name", min_length=1, max_length=100)
engine_type: str = Field(description="Engine type (e.g., 'mss', 'dxcam', 'wgc')", min_length=1)
engine_config: Dict = Field(default_factory=dict, description="Engine-specific configuration")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class TemplateUpdate(BaseModel):
"""Request to update a template."""
name: Optional[str] = Field(None, description="Template name", min_length=1, max_length=100)
engine_type: Optional[str] = Field(None, description="Capture engine type (mss, dxcam, wgc)")
engine_config: Optional[Dict] = Field(None, description="Engine-specific configuration")
description: Optional[str] = Field(None, description="Template description", max_length=500)
class TemplateResponse(BaseModel):
"""Template information response."""
id: str = Field(description="Template ID")
name: str = Field(description="Template name")
engine_type: str = Field(description="Engine type identifier")
engine_config: Dict = Field(description="Engine-specific configuration")
created_at: datetime = Field(description="Creation timestamp")
updated_at: datetime = Field(description="Last update timestamp")
description: Optional[str] = Field(None, description="Template description")
class TemplateListResponse(BaseModel):
"""List of templates response."""
templates: List[TemplateResponse] = Field(description="List of templates")
count: int = Field(description="Number of templates")
class EngineInfo(BaseModel):
"""Capture engine information."""
type: str = Field(description="Engine type identifier (e.g., 'mss', 'dxcam')")
name: str = Field(description="Human-readable engine name")
default_config: Dict = Field(description="Default configuration for this engine")
available: bool = Field(description="Whether engine is available on this system")
class EngineListResponse(BaseModel):
"""List of available engines response."""
engines: List[EngineInfo] = Field(description="Available capture engines")
count: int = Field(description="Number of engines")
class TemplateAssignment(BaseModel):
"""Request to assign template to device."""
template_id: str = Field(description="Template ID to assign")
class TemplateTestRequest(BaseModel):
"""Request to test a capture template."""
engine_type: str = Field(description="Capture engine type to test")
engine_config: Dict = Field(default={}, description="Engine configuration")
display_index: int = Field(description="Display index to capture")
border_width: int = Field(default=10, ge=1, le=100, description="Border width in pixels")
capture_duration: float = Field(default=5.0, ge=1.0, le=30.0, description="Duration to capture in seconds")

View File

@@ -56,7 +56,7 @@ class StorageConfig(BaseSettings):
devices_file: str = "data/devices.json" devices_file: str = "data/devices.json"
templates_file: str = "data/capture_templates.json" templates_file: str = "data/capture_templates.json"
postprocessing_templates_file: str = "data/postprocessing_templates.json" postprocessing_templates_file: str = "data/postprocessing_templates.json"
picture_streams_file: str = "data/picture_streams.json" picture_sources_file: str = "data/picture_sources.json"
class LoggingConfig(BaseSettings): class LoggingConfig(BaseSettings):

View File

@@ -349,9 +349,6 @@ def create_default_calibration(led_count: int) -> CalibrationConfig:
def calibration_from_dict(data: dict) -> CalibrationConfig: def calibration_from_dict(data: dict) -> CalibrationConfig:
"""Create calibration configuration from dictionary. """Create calibration configuration from dictionary.
Supports both new format (leds_top/right/bottom/left) and legacy format
(segments list) for backward compatibility.
Args: Args:
data: Dictionary with calibration data data: Dictionary with calibration data

View File

@@ -2,14 +2,15 @@
from wled_controller.core.capture_engines.base import ( from wled_controller.core.capture_engines.base import (
CaptureEngine, CaptureEngine,
CaptureStream,
DisplayInfo, DisplayInfo,
ScreenCapture, ScreenCapture,
) )
from wled_controller.core.capture_engines.factory import EngineRegistry from wled_controller.core.capture_engines.factory import EngineRegistry
from wled_controller.core.capture_engines.mss_engine import MSSEngine from wled_controller.core.capture_engines.mss_engine import MSSEngine, MSSCaptureStream
from wled_controller.core.capture_engines.dxcam_engine import DXcamEngine from wled_controller.core.capture_engines.dxcam_engine import DXcamEngine, DXcamCaptureStream
from wled_controller.core.capture_engines.bettercam_engine import BetterCamEngine from wled_controller.core.capture_engines.bettercam_engine import BetterCamEngine, BetterCamCaptureStream
from wled_controller.core.capture_engines.wgc_engine import WGCEngine from wled_controller.core.capture_engines.wgc_engine import WGCEngine, WGCCaptureStream
# Auto-register available engines # Auto-register available engines
EngineRegistry.register(MSSEngine) EngineRegistry.register(MSSEngine)
@@ -19,11 +20,16 @@ EngineRegistry.register(WGCEngine)
__all__ = [ __all__ = [
"CaptureEngine", "CaptureEngine",
"CaptureStream",
"DisplayInfo", "DisplayInfo",
"ScreenCapture", "ScreenCapture",
"EngineRegistry", "EngineRegistry",
"MSSEngine", "MSSEngine",
"MSSCaptureStream",
"DXcamEngine", "DXcamEngine",
"DXcamCaptureStream",
"BetterCamEngine", "BetterCamEngine",
"BetterCamCaptureStream",
"WGCEngine", "WGCEngine",
"WGCCaptureStream",
] ]

View File

@@ -31,31 +31,33 @@ class ScreenCapture:
display_index: int display_index: int
class CaptureEngine(ABC): class CaptureStream(ABC):
"""Abstract base class for screen capture engines. """Abstract base class for a display capture session.
All screen capture engines must implement this interface to be A CaptureStream is a stateful session bound to a specific display.
compatible with the WLED Grab system. It holds all display-specific resources and provides frame capture.
Created by CaptureEngine.create_stream().
Lifecycle:
stream = engine.create_stream(display_index, config)
stream.initialize()
frame = stream.capture_frame()
stream.cleanup()
Or via context manager:
with engine.create_stream(display_index, config) as stream:
frame = stream.capture_frame()
""" """
ENGINE_TYPE: str = "base" # Override in subclasses def __init__(self, display_index: int, config: Dict[str, Any]):
ENGINE_PRIORITY: int = 0 # Higher = preferred. Override in subclasses. self.display_index = display_index
def __init__(self, config: Dict[str, Any]):
"""Initialize engine with configuration.
Args:
config: Engine-specific configuration dict
"""
self.config = config self.config = config
self._initialized = False self._initialized = False
@abstractmethod @abstractmethod
def initialize(self) -> None: def initialize(self) -> None:
"""Initialize the capture engine. """Initialize capture resources for this display.
This method should prepare any resources needed for screen capture
(e.g., creating capture objects, allocating buffers).
Raises: Raises:
RuntimeError: If initialization fails RuntimeError: If initialization fails
@@ -64,17 +66,64 @@ class CaptureEngine(ABC):
@abstractmethod @abstractmethod
def cleanup(self) -> None: def cleanup(self) -> None:
"""Cleanup engine resources. """Release all capture resources for this display."""
This method should release any resources allocated during
initialization or capture operations.
"""
pass pass
@abstractmethod @abstractmethod
def get_available_displays(self) -> List[DisplayInfo]: def capture_frame(self) -> Optional[ScreenCapture]:
"""Capture one frame from the bound display.
Returns:
ScreenCapture with image data (RGB), or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
pass
def __enter__(self):
"""Context manager entry - initialize stream."""
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - cleanup stream."""
self.cleanup()
class CaptureEngine(ABC):
"""Abstract base class for screen capture engines.
A CaptureEngine is a stateless factory that knows about a capture
technology. It can enumerate displays, check availability, and create
CaptureStream instances for specific displays.
All methods are classmethods — no instance creation needed.
"""
ENGINE_TYPE: str = "base"
ENGINE_PRIORITY: int = 0
@classmethod
@abstractmethod
def is_available(cls) -> bool:
"""Check if this engine is available on current system."""
pass
@classmethod
@abstractmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default configuration for this engine."""
pass
@classmethod
@abstractmethod
def get_available_displays(cls) -> List[DisplayInfo]:
"""Get list of available displays. """Get list of available displays.
This method works without prior initialization — implementations
create temporary resources as needed.
Returns: Returns:
List of DisplayInfo objects describing available displays List of DisplayInfo objects describing available displays
@@ -83,61 +132,17 @@ class CaptureEngine(ABC):
""" """
pass pass
@classmethod
@abstractmethod @abstractmethod
def capture_display(self, display_index: int) -> Optional[ScreenCapture]: def create_stream(cls, display_index: int, config: Dict[str, Any]) -> CaptureStream:
"""Capture the specified display. """Create a capture stream for the specified display.
Args: Args:
display_index: Index of display to capture (0-based) display_index: Index of display to capture (0-based)
config: Engine-specific configuration dict
Returns: Returns:
ScreenCapture object with image data as numpy array (RGB format), Uninitialized CaptureStream. Caller must call initialize()
or None if no new frame is available (screen unchanged). or use as context manager.
Raises:
ValueError: If display_index is invalid
RuntimeError: If capture fails
""" """
pass pass
@classmethod
@abstractmethod
def is_available(cls) -> bool:
"""Check if this engine is available on current system.
Returns:
True if engine can be used on this platform
Examples:
>>> MSSEngine.is_available()
True # MSS is available on all platforms
>>> DXcamEngine.is_available()
True # On Windows 8.1+
False # On Linux/macOS
"""
pass
@classmethod
@abstractmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default configuration for this engine.
Returns:
Default config dict with engine-specific options
Examples:
>>> MSSEngine.get_default_config()
{}
>>> DXcamEngine.get_default_config()
{'device_idx': 0, 'output_color': 'RGB', 'max_buffer_len': 64}
"""
pass
def __enter__(self):
"""Context manager entry - initialize engine."""
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - cleanup engine."""
self.cleanup()

View File

@@ -7,6 +7,7 @@ import numpy as np
from wled_controller.core.capture_engines.base import ( from wled_controller.core.capture_engines.base import (
CaptureEngine, CaptureEngine,
CaptureStream,
DisplayInfo, DisplayInfo,
ScreenCapture, ScreenCapture,
) )
@@ -15,32 +16,15 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
class BetterCamEngine(CaptureEngine): class BetterCamCaptureStream(CaptureStream):
"""BetterCam-based screen capture engine. """BetterCam capture stream for a specific display."""
Uses the bettercam library (a high-performance fork of DXCam) which leverages def __init__(self, display_index: int, config: Dict[str, Any]):
DXGI Desktop Duplication API for ultra-fast screen capture on Windows. super().__init__(display_index, config)
Offers better performance than DXCam with multi-GPU support.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "bettercam"
ENGINE_PRIORITY = 4
def __init__(self, config: Dict[str, Any]):
"""Initialize BetterCam engine."""
super().__init__(config)
self._camera = None self._camera = None
self._bettercam = None self._bettercam = None
self._current_output = None
def initialize(self) -> None: def initialize(self) -> None:
"""Initialize BetterCam capture.
Raises:
RuntimeError: If bettercam not installed or initialization fails
"""
try: try:
import bettercam import bettercam
self._bettercam = bettercam self._bettercam = bettercam
@@ -49,49 +33,24 @@ class BetterCamEngine(CaptureEngine):
"BetterCam not installed. Install with: pip install bettercam" "BetterCam not installed. Install with: pip install bettercam"
) )
self._initialized = True # Clear global camera cache for fresh DXGI state
logger.info("BetterCam engine initialized")
def _ensure_camera(self, display_index: int) -> None:
"""Ensure camera is created for the requested display.
Creates or recreates the BetterCam camera if needed.
"""
if self._camera and self._current_output == display_index:
return
# Stop and release existing camera
if self._camera:
try:
if self._camera.is_capturing:
self._camera.stop()
except Exception:
pass
try:
self._camera.release()
except Exception:
pass
self._camera = None
# Clear global camera cache to avoid stale DXGI state
try: try:
self._bettercam.__factory.clean_up() self._bettercam.__factory.clean_up()
except Exception: except Exception:
pass pass
self._camera = self._bettercam.create( self._camera = self._bettercam.create(
output_idx=display_index, output_idx=self.display_index,
output_color="RGB", output_color="RGB",
) )
if not self._camera: if not self._camera:
raise RuntimeError(f"Failed to create BetterCam camera for display {display_index}") raise RuntimeError(f"Failed to create BetterCam camera for display {self.display_index}")
self._current_output = display_index self._initialized = True
logger.info(f"BetterCam camera created (output={display_index})") logger.info(f"BetterCam capture stream initialized (display={self.display_index})")
def cleanup(self) -> None: def cleanup(self) -> None:
"""Cleanup BetterCam resources."""
if self._camera: if self._camera:
try: try:
if self._camera.is_capturing: if self._camera.is_capturing:
@@ -104,94 +63,27 @@ class BetterCamEngine(CaptureEngine):
logger.error(f"Error releasing BetterCam camera: {e}") logger.error(f"Error releasing BetterCam camera: {e}")
self._camera = None self._camera = None
# Clear global cache so next create() gets fresh DXGI state
if self._bettercam: if self._bettercam:
try: try:
self._bettercam.__factory.clean_up() self._bettercam.__factory.clean_up()
except Exception: except Exception:
pass pass
self._current_output = None
self._initialized = False self._initialized = False
logger.info("BetterCam engine cleaned up") logger.info(f"BetterCam capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]: def capture_frame(self) -> Optional[ScreenCapture]:
"""Get list of available displays using BetterCam.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If not initialized or detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
displays = []
output_idx = self._current_output or 0
if self._camera and hasattr(self._camera, "width") and hasattr(self._camera, "height"):
display_info = DisplayInfo(
index=output_idx,
name=f"BetterCam Display {output_idx}",
width=self._camera.width,
height=self._camera.height,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
else:
display_info = DisplayInfo(
index=output_idx,
name=f"BetterCam Display {output_idx}",
width=1920,
height=1080,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
logger.debug(f"BetterCam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with BetterCam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> Optional[ScreenCapture]:
"""Capture display using BetterCam.
Args:
display_index: Index of display to capture (0-based).
Returns:
ScreenCapture object with image data, or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
if not self._initialized: if not self._initialized:
self.initialize() self.initialize()
# Ensure camera is ready for the requested display
self._ensure_camera(display_index)
try: try:
# grab() uses AcquireNextFrame with timeout=0 (non-blocking).
# Returns None if screen content hasn't changed since last grab.
frame = self._camera.grab() frame = self._camera.grab()
if frame is None: if frame is None:
return None return None
logger.debug( logger.debug(
f"BetterCam captured display {display_index}: " f"BetterCam captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}" f"{frame.shape[1]}x{frame.shape[0]}"
) )
@@ -199,27 +91,32 @@ class BetterCamEngine(CaptureEngine):
image=frame, image=frame,
width=frame.shape[1], width=frame.shape[1],
height=frame.shape[0], height=frame.shape[0],
display_index=display_index, display_index=self.display_index,
) )
except ValueError: except ValueError:
raise raise
except Exception as e: except Exception as e:
logger.error(f"Failed to capture display {display_index} with BetterCam: {e}") logger.error(f"Failed to capture display {self.display_index} with BetterCam: {e}")
raise RuntimeError(f"Screen capture failed: {e}") raise RuntimeError(f"Screen capture failed: {e}")
class BetterCamEngine(CaptureEngine):
"""BetterCam-based screen capture engine.
Uses the bettercam library (a high-performance fork of DXCam) which leverages
DXGI Desktop Duplication API for ultra-fast screen capture on Windows.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "bettercam"
ENGINE_PRIORITY = 4
@classmethod @classmethod
def is_available(cls) -> bool: def is_available(cls) -> bool:
"""Check if BetterCam is available.
BetterCam requires Windows 8.1+ and the bettercam package.
Returns:
True if bettercam is available on this system
"""
if sys.platform != "win32": if sys.platform != "win32":
return False return False
try: try:
import bettercam import bettercam
return True return True
@@ -228,9 +125,34 @@ class BetterCamEngine(CaptureEngine):
@classmethod @classmethod
def get_default_config(cls) -> Dict[str, Any]: def get_default_config(cls) -> Dict[str, Any]:
"""Get default BetterCam configuration.
Returns:
Default config dict with BetterCam options
"""
return {} return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
displays = []
with mss.mss() as sct:
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Display {i}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"BetterCam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with BetterCam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> BetterCamCaptureStream:
return BetterCamCaptureStream(display_index, config)

View File

@@ -7,6 +7,7 @@ import numpy as np
from wled_controller.core.capture_engines.base import ( from wled_controller.core.capture_engines.base import (
CaptureEngine, CaptureEngine,
CaptureStream,
DisplayInfo, DisplayInfo,
ScreenCapture, ScreenCapture,
) )
@@ -15,32 +16,15 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
class DXcamEngine(CaptureEngine): class DXcamCaptureStream(CaptureStream):
"""DXcam-based screen capture engine. """DXcam capture stream for a specific display."""
Uses the dxcam library which leverages DXGI Desktop Duplication API for def __init__(self, display_index: int, config: Dict[str, Any]):
ultra-fast screen capture on Windows. Offers significantly better performance super().__init__(display_index, config)
than MSS and eliminates cursor flickering.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "dxcam"
ENGINE_PRIORITY = 3
def __init__(self, config: Dict[str, Any]):
"""Initialize DXcam engine."""
super().__init__(config)
self._camera = None self._camera = None
self._dxcam = None self._dxcam = None
self._current_output = None
def initialize(self) -> None: def initialize(self) -> None:
"""Initialize DXcam capture.
Raises:
RuntimeError: If DXcam not installed or initialization fails
"""
try: try:
import dxcam import dxcam
self._dxcam = dxcam self._dxcam = dxcam
@@ -49,51 +33,24 @@ class DXcamEngine(CaptureEngine):
"DXcam not installed. Install with: pip install dxcam" "DXcam not installed. Install with: pip install dxcam"
) )
self._initialized = True # Clear global camera cache for fresh DXGI state
logger.info("DXcam engine initialized")
def _ensure_camera(self, display_index: int) -> None:
"""Ensure camera is created for the requested display.
Creates or recreates the DXcam camera if needed.
DXcam caches cameras globally per (device, output). We clear the
cache before creating to avoid stale DXGI state from prior requests.
"""
if self._camera and self._current_output == display_index:
return
# Stop and release existing camera
if self._camera:
try:
if self._camera.is_capturing:
self._camera.stop()
except Exception:
pass
try:
self._camera.release()
except Exception:
pass
self._camera = None
# Clear dxcam's global camera cache to avoid stale DXGI state
try: try:
self._dxcam.__factory.clean_up() self._dxcam.__factory.clean_up()
except Exception: except Exception:
pass pass
self._camera = self._dxcam.create( self._camera = self._dxcam.create(
output_idx=display_index, output_idx=self.display_index,
output_color="RGB", output_color="RGB",
) )
if not self._camera: if not self._camera:
raise RuntimeError(f"Failed to create DXcam camera for display {display_index}") raise RuntimeError(f"Failed to create DXcam camera for display {self.display_index}")
self._current_output = display_index self._initialized = True
logger.info(f"DXcam camera created (output={display_index})") logger.info(f"DXcam capture stream initialized (display={self.display_index})")
def cleanup(self) -> None: def cleanup(self) -> None:
"""Cleanup DXcam resources."""
if self._camera: if self._camera:
try: try:
if self._camera.is_capturing: if self._camera.is_capturing:
@@ -106,104 +63,27 @@ class DXcamEngine(CaptureEngine):
logger.error(f"Error releasing DXcam camera: {e}") logger.error(f"Error releasing DXcam camera: {e}")
self._camera = None self._camera = None
# Clear dxcam's global cache so next create() gets fresh DXGI state
if self._dxcam: if self._dxcam:
try: try:
self._dxcam.__factory.clean_up() self._dxcam.__factory.clean_up()
except Exception: except Exception:
pass pass
self._current_output = None
self._initialized = False self._initialized = False
logger.info("DXcam engine cleaned up") logger.info(f"DXcam capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]: def capture_frame(self) -> Optional[ScreenCapture]:
"""Get list of available displays using DXcam.
Note: DXcam provides limited display enumeration. This method
returns basic information for the configured output.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If not initialized or detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
displays = []
# Get output information from DXcam
# Note: DXcam doesn't provide comprehensive display enumeration
# We report the single configured output
output_idx = self._current_output or 0
# DXcam camera has basic output info
if self._camera and hasattr(self._camera, "width") and hasattr(self._camera, "height"):
display_info = DisplayInfo(
index=output_idx,
name=f"DXcam Display {output_idx}",
width=self._camera.width,
height=self._camera.height,
x=0, # DXcam doesn't provide position info
y=0,
is_primary=(output_idx == 0),
refresh_rate=60, # DXcam doesn't report refresh rate
)
displays.append(display_info)
else:
# Fallback if camera doesn't have dimensions
display_info = DisplayInfo(
index=output_idx,
name=f"DXcam Display {output_idx}",
width=1920, # Reasonable default
height=1080,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
logger.debug(f"DXcam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with DXcam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> Optional[ScreenCapture]:
"""Capture display using DXcam.
Args:
display_index: Index of display to capture (0-based).
Returns:
ScreenCapture object with image data, or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
if not self._initialized: if not self._initialized:
self.initialize() self.initialize()
# Ensure camera is ready for the requested display
self._ensure_camera(display_index)
try: try:
# grab() uses AcquireNextFrame with timeout=0 (non-blocking).
# Returns None if screen content hasn't changed since last grab.
frame = self._camera.grab() frame = self._camera.grab()
if frame is None: if frame is None:
return None return None
# DXcam returns numpy array directly in configured color format
logger.debug( logger.debug(
f"DXcam captured display {display_index}: " f"DXcam captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}" f"{frame.shape[1]}x{frame.shape[0]}"
) )
@@ -211,29 +91,32 @@ class DXcamEngine(CaptureEngine):
image=frame, image=frame,
width=frame.shape[1], width=frame.shape[1],
height=frame.shape[0], height=frame.shape[0],
display_index=display_index, display_index=self.display_index,
) )
except ValueError: except ValueError:
raise raise
except Exception as e: except Exception as e:
logger.error(f"Failed to capture display {display_index} with DXcam: {e}") logger.error(f"Failed to capture display {self.display_index} with DXcam: {e}")
raise RuntimeError(f"Screen capture failed: {e}") raise RuntimeError(f"Screen capture failed: {e}")
class DXcamEngine(CaptureEngine):
"""DXcam-based screen capture engine.
Uses the dxcam library which leverages DXGI Desktop Duplication API for
ultra-fast screen capture on Windows.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "dxcam"
ENGINE_PRIORITY = 3
@classmethod @classmethod
def is_available(cls) -> bool: def is_available(cls) -> bool:
"""Check if DXcam is available.
DXcam requires Windows 8.1+ and the dxcam package.
Returns:
True if dxcam is available on this system
"""
# Check platform
if sys.platform != "win32": if sys.platform != "win32":
return False return False
# Check if dxcam is installed
try: try:
import dxcam import dxcam
return True return True
@@ -242,9 +125,34 @@ class DXcamEngine(CaptureEngine):
@classmethod @classmethod
def get_default_config(cls) -> Dict[str, Any]: def get_default_config(cls) -> Dict[str, Any]:
"""Get default DXcam configuration.
Returns:
Default config dict with DXcam options
"""
return {} return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
displays = []
with mss.mss() as sct:
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Display {i}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"DXcam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with DXcam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> DXcamCaptureStream:
return DXcamCaptureStream(display_index, config)

View File

@@ -2,7 +2,7 @@
from typing import Any, Dict, List, Optional, Type from typing import Any, Dict, List, Optional, Type
from wled_controller.core.capture_engines.base import CaptureEngine from wled_controller.core.capture_engines.base import CaptureEngine, CaptureStream
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
@@ -11,8 +11,8 @@ logger = get_logger(__name__)
class EngineRegistry: class EngineRegistry:
"""Registry for available capture engines. """Registry for available capture engines.
This class maintains a registry of all capture engine implementations Maintains a registry of all capture engine implementations
and provides factory methods for creating engine instances. and provides factory methods for creating capture streams.
""" """
_engines: Dict[str, Type[CaptureEngine]] = {} _engines: Dict[str, Type[CaptureEngine]] = {}
@@ -26,7 +26,6 @@ class EngineRegistry:
Raises: Raises:
ValueError: If engine_class is not a subclass of CaptureEngine ValueError: If engine_class is not a subclass of CaptureEngine
ValueError: If an engine with the same ENGINE_TYPE is already registered
""" """
if not issubclass(engine_class, CaptureEngine): if not issubclass(engine_class, CaptureEngine):
raise ValueError(f"{engine_class} must be a subclass of CaptureEngine") raise ValueError(f"{engine_class} must be a subclass of CaptureEngine")
@@ -66,12 +65,7 @@ class EngineRegistry:
"""Get list of available engine types on this system. """Get list of available engine types on this system.
Returns: Returns:
List of engine type identifiers that are available on the current platform List of engine type identifiers that are available
Examples:
>>> EngineRegistry.get_available_engines()
['mss'] # On Linux
['mss', 'dxcam', 'wgc'] # On Windows 10+
""" """
available = [] available = []
for engine_type, engine_class in cls._engines.items(): for engine_type, engine_class in cls._engines.items():
@@ -115,19 +109,27 @@ class EngineRegistry:
return cls._engines.copy() return cls._engines.copy()
@classmethod @classmethod
def create_engine(cls, engine_type: str, config: Dict[str, Any]) -> CaptureEngine: def create_stream(
"""Create engine instance with configuration. cls,
engine_type: str,
display_index: int,
config: Dict[str, Any],
) -> CaptureStream:
"""Create a CaptureStream for the specified engine and display.
Looks up the engine class, validates availability, and creates
an uninitialized CaptureStream for the specified display.
Args: Args:
engine_type: Engine type identifier engine_type: Engine type identifier
display_index: Display index for the stream
config: Engine-specific configuration config: Engine-specific configuration
Returns: Returns:
Initialized engine instance Uninitialized CaptureStream instance
Raises: Raises:
ValueError: If engine type not found or not available ValueError: If engine type not found or not available
RuntimeError: If engine initialization fails
""" """
engine_class = cls.get_engine(engine_type) engine_class = cls.get_engine(engine_type)
@@ -137,18 +139,15 @@ class EngineRegistry:
) )
try: try:
engine = engine_class(config) stream = engine_class.create_stream(display_index, config)
logger.debug(f"Created engine instance: {engine_type}") logger.debug(f"Created capture stream: {engine_type} (display={display_index})")
return engine return stream
except Exception as e: except Exception as e:
logger.error(f"Failed to create engine '{engine_type}': {e}") logger.error(f"Failed to create stream for engine '{engine_type}': {e}")
raise RuntimeError(f"Failed to create engine '{engine_type}': {e}") raise RuntimeError(f"Failed to create stream for engine '{engine_type}': {e}")
@classmethod @classmethod
def clear_registry(cls): def clear_registry(cls):
"""Clear all registered engines. """Clear all registered engines (for testing)."""
This is primarily useful for testing.
"""
cls._engines.clear() cls._engines.clear()
logger.debug("Cleared engine registry") logger.debug("Cleared engine registry")

View File

@@ -1,6 +1,6 @@
"""MSS-based screen capture engine (cross-platform).""" """MSS-based screen capture engine (cross-platform)."""
from typing import Any, Dict, List from typing import Any, Dict, List, Optional
import mss import mss
import numpy as np import numpy as np
@@ -8,6 +8,7 @@ from PIL import Image
from wled_controller.core.capture_engines.base import ( from wled_controller.core.capture_engines.base import (
CaptureEngine, CaptureEngine,
CaptureStream,
DisplayInfo, DisplayInfo,
ScreenCapture, ScreenCapture,
) )
@@ -16,126 +17,43 @@ from wled_controller.utils import get_logger, get_monitor_names, get_monitor_ref
logger = get_logger(__name__) logger = get_logger(__name__)
class MSSEngine(CaptureEngine): class MSSCaptureStream(CaptureStream):
"""MSS-based screen capture engine. """MSS capture stream for a specific display."""
Uses the mss library for cross-platform screen capture support. def __init__(self, display_index: int, config: Dict[str, Any]):
Works on Windows, macOS, and Linux. super().__init__(display_index, config)
Note: May experience cursor flickering on some systems.
"""
ENGINE_TYPE = "mss"
ENGINE_PRIORITY = 1
def __init__(self, config: Dict[str, Any]):
"""Initialize MSS engine.
Args:
config: Engine configuration (currently unused for MSS)
"""
super().__init__(config)
self._sct = None self._sct = None
def initialize(self) -> None: def initialize(self) -> None:
"""Initialize MSS capture context.
Raises:
RuntimeError: If MSS initialization fails
"""
try: try:
self._sct = mss.mss() self._sct = mss.mss()
self._initialized = True self._initialized = True
logger.info("MSS engine initialized") logger.info(f"MSS capture stream initialized (display={self.display_index})")
except Exception as e: except Exception as e:
raise RuntimeError(f"Failed to initialize MSS: {e}") raise RuntimeError(f"Failed to initialize MSS: {e}")
def cleanup(self) -> None: def cleanup(self) -> None:
"""Cleanup MSS resources."""
if self._sct: if self._sct:
self._sct.close() self._sct.close()
self._sct = None self._sct = None
self._initialized = False self._initialized = False
logger.info("MSS engine cleaned up") logger.info(f"MSS capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]: def capture_frame(self) -> Optional[ScreenCapture]:
"""Get list of available displays using MSS.
Returns:
List of DisplayInfo objects for each available monitor
Raises:
RuntimeError: If not initialized or display detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
# Get friendly monitor names (Windows only, falls back to generic names)
monitor_names = get_monitor_names()
# Get monitor refresh rates (Windows only, falls back to 60Hz)
refresh_rates = get_monitor_refresh_rates()
displays = []
# Skip the first monitor (combined virtual screen on multi-monitor setups)
for idx, monitor in enumerate(self._sct.monitors[1:], start=0):
# Use friendly name from WMI if available, otherwise generic name
friendly_name = monitor_names.get(idx, f"Display {idx}")
# Use detected refresh rate or default to 60Hz
refresh_rate = refresh_rates.get(idx, 60)
display_info = DisplayInfo(
index=idx,
name=friendly_name,
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(idx == 0),
refresh_rate=refresh_rate,
)
displays.append(display_info)
logger.debug(f"MSS detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with MSS: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> ScreenCapture:
"""Capture display using MSS.
Args:
display_index: Index of display to capture (0-based)
Returns:
ScreenCapture object with image data
Raises:
ValueError: If display_index is invalid
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
if not self._initialized: if not self._initialized:
self.initialize() self.initialize()
try: try:
# mss monitors[0] is the combined screen, monitors[1+] are individual displays # mss monitors[0] is the combined screen, monitors[1+] are individual displays
monitor_index = display_index + 1 monitor_index = self.display_index + 1
if monitor_index >= len(self._sct.monitors): if monitor_index >= len(self._sct.monitors):
raise ValueError( raise ValueError(
f"Invalid display index {display_index}. " f"Invalid display index {self.display_index}. "
f"Available displays: 0-{len(self._sct.monitors) - 2}" f"Available displays: 0-{len(self._sct.monitors) - 2}"
) )
monitor = self._sct.monitors[monitor_index] monitor = self._sct.monitors[monitor_index]
# Capture screenshot
screenshot = self._sct.grab(monitor) screenshot = self._sct.grab(monitor)
# Convert to numpy array (RGB) # Convert to numpy array (RGB)
@@ -143,31 +61,35 @@ class MSSEngine(CaptureEngine):
img_array = np.array(img) img_array = np.array(img)
logger.debug( logger.debug(
f"MSS captured display {display_index}: {monitor['width']}x{monitor['height']}" f"MSS captured display {self.display_index}: {monitor['width']}x{monitor['height']}"
) )
return ScreenCapture( return ScreenCapture(
image=img_array, image=img_array,
width=monitor["width"], width=monitor["width"],
height=monitor["height"], height=monitor["height"],
display_index=display_index, display_index=self.display_index,
) )
except ValueError: except ValueError:
raise raise
except Exception as e: except Exception as e:
logger.error(f"Failed to capture display {display_index} with MSS: {e}") logger.error(f"Failed to capture display {self.display_index} with MSS: {e}")
raise RuntimeError(f"Screen capture failed: {e}") raise RuntimeError(f"Screen capture failed: {e}")
class MSSEngine(CaptureEngine):
"""MSS-based screen capture engine.
Uses the mss library for cross-platform screen capture support.
Works on Windows, macOS, and Linux.
"""
ENGINE_TYPE = "mss"
ENGINE_PRIORITY = 1
@classmethod @classmethod
def is_available(cls) -> bool: def is_available(cls) -> bool:
"""Check if MSS is available.
MSS is cross-platform and should always be available.
Returns:
True if mss library is available
"""
try: try:
import mss import mss
return True return True
@@ -176,11 +98,38 @@ class MSSEngine(CaptureEngine):
@classmethod @classmethod
def get_default_config(cls) -> Dict[str, Any]: def get_default_config(cls) -> Dict[str, Any]:
"""Get default MSS configuration.
MSS has no configurable options.
Returns:
Empty dict (MSS has no configuration)
"""
return {} return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
monitor_names = get_monitor_names()
refresh_rates = get_monitor_refresh_rates()
displays = []
with mss.mss() as sct:
for idx, monitor in enumerate(sct.monitors[1:], start=0):
friendly_name = monitor_names.get(idx, f"Display {idx}")
refresh_rate = refresh_rates.get(idx, 60)
displays.append(DisplayInfo(
index=idx,
name=friendly_name,
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(idx == 0),
refresh_rate=refresh_rate,
))
logger.debug(f"MSS detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with MSS: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> MSSCaptureStream:
return MSSCaptureStream(display_index, config)

View File

@@ -2,14 +2,14 @@
import gc import gc
import sys import sys
import time
import threading import threading
from typing import Any, Dict, List from typing import Any, Dict, List, Optional
import numpy as np import numpy as np
from wled_controller.core.capture_engines.base import ( from wled_controller.core.capture_engines.base import (
CaptureEngine, CaptureEngine,
CaptureStream,
DisplayInfo, DisplayInfo,
ScreenCapture, ScreenCapture,
) )
@@ -18,54 +18,20 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
class WGCEngine(CaptureEngine): class WGCCaptureStream(CaptureStream):
"""Windows Graphics Capture engine. """WGC capture stream for a specific display."""
Uses the windows-capture library which leverages Windows.Graphics.Capture API. def __init__(self, display_index: int, config: Dict[str, Any]):
This is Microsoft's recommended modern screen capture API for Windows 10+. super().__init__(display_index, config)
Features:
- Cross-GPU support (works regardless of GPU routing)
- Hardware cursor exclusion (no cursor flickering)
- GPU-accelerated with direct texture sharing
- Modern, future-proof API
Requires: Windows 10 1803+
"""
ENGINE_TYPE = "wgc"
ENGINE_PRIORITY = 2
def __init__(self, config: Dict[str, Any]):
"""Initialize WGC engine.
Args:
config: Engine configuration
- capture_cursor (bool): Include cursor in capture (default: False)
- draw_border (bool): Draw border around capture (default: False)
Note: monitor_index is NOT in config - WGC maintains separate instances per monitor
to support simultaneous capture from multiple monitors.
"""
super().__init__(config)
self._wgc = None self._wgc = None
# Per-monitor capture instances: {monitor_index: (instance, control, frame, frame_event)} self._capture_instance = None
self._monitor_captures = {} self._capture_control = None
self._latest_frame = None
self._frame_event = threading.Event()
self._closed_event = threading.Event()
self._frame_lock = threading.Lock() self._frame_lock = threading.Lock()
def initialize(self, monitor_index: int = 0) -> None: def initialize(self) -> None:
"""Initialize WGC capture for a specific monitor.
Maintains separate capture instances per monitor to support simultaneous
capture from multiple monitors.
Args:
monitor_index: Monitor index to capture (0-based)
Raises:
RuntimeError: If windows-capture not installed or initialization fails
"""
# Import windows_capture if not already imported
if self._wgc is None: if self._wgc is None:
try: try:
import windows_capture import windows_capture
@@ -75,249 +41,110 @@ class WGCEngine(CaptureEngine):
"windows-capture not installed. Install with: pip install windows-capture" "windows-capture not installed. Install with: pip install windows-capture"
) )
# Skip if already initialized for this monitor
if monitor_index in self._monitor_captures:
logger.debug(f"WGC already initialized for monitor {monitor_index}")
return
try: try:
capture_cursor = self.config.get("capture_cursor", False) capture_cursor = self.config.get("capture_cursor", False)
# Note: draw_border is not supported by WGC API on most platforms
# WGC uses 1-based monitor indexing (1, 2, 3...) while we use 0-based (0, 1, 2...) # WGC uses 1-based monitor indexing
wgc_monitor_index = monitor_index + 1 wgc_monitor_index = self.display_index + 1
# Create per-monitor events and storage self._capture_instance = self._wgc.WindowsCapture(
frame_event = threading.Event()
closed_event = threading.Event()
latest_frame = None
# Create capture instance
# Note: draw_border parameter not supported on all platforms
capture_instance = self._wgc.WindowsCapture(
cursor_capture=capture_cursor, cursor_capture=capture_cursor,
monitor_index=wgc_monitor_index, monitor_index=wgc_monitor_index,
) )
# Define event handlers as local functions that capture monitor_index
def on_frame_arrived(frame, capture_control): def on_frame_arrived(frame, capture_control):
"""Called when a new frame is captured."""
nonlocal latest_frame
try: try:
logger.debug(f"WGC frame callback triggered for monitor {monitor_index}")
# Get frame buffer as numpy array
frame_buffer = frame.frame_buffer frame_buffer = frame.frame_buffer
width = frame.width width = frame.width
height = frame.height height = frame.height
# Reshape to image dimensions (height, width, channels) # WGC provides BGRA format, convert to RGB
# WGC provides BGRA format
frame_array = frame_buffer.reshape((height, width, 4)) frame_array = frame_buffer.reshape((height, width, 4))
frame_rgb = frame_array[:, :, [2, 1, 0]]
# Convert BGRA to RGB
frame_rgb = frame_array[:, :, [2, 1, 0]] # Take BGR channels
# Store the latest frame for this monitor
with self._frame_lock: with self._frame_lock:
if monitor_index in self._monitor_captures: self._latest_frame = frame_rgb.copy()
self._monitor_captures[monitor_index]['latest_frame'] = frame_rgb.copy() self._frame_event.set()
self._monitor_captures[monitor_index]['frame_event'].set()
except Exception as e: except Exception as e:
logger.error(f"Error processing WGC frame for monitor {monitor_index}: {e}", exc_info=True) logger.error(f"Error processing WGC frame: {e}", exc_info=True)
def on_closed(): def on_closed():
"""Called when capture session is closed.""" logger.debug(f"WGC capture session closed for display {self.display_index}")
logger.debug(f"WGC capture session closed for monitor {monitor_index}") self._closed_event.set()
# Signal that the capture session has fully closed and resources are released
with self._frame_lock:
if monitor_index in self._monitor_captures:
self._monitor_captures[monitor_index]['closed_event'].set()
# Set handlers directly as attributes self._capture_instance.frame_handler = on_frame_arrived
capture_instance.frame_handler = on_frame_arrived self._capture_instance.closed_handler = on_closed
capture_instance.closed_handler = on_closed
# Start capture using free-threaded mode (non-blocking) logger.debug(f"Starting WGC capture for display {self.display_index} (free-threaded mode)...")
# IMPORTANT: start_free_threaded() returns a CaptureControl object for cleanup self._capture_control = self._capture_instance.start_free_threaded()
logger.debug(f"Starting WGC capture for monitor {monitor_index} (free-threaded mode)...")
capture_control = capture_instance.start_free_threaded()
# Store all per-monitor data # Wait for first frame
self._monitor_captures[monitor_index] = { logger.debug(f"Waiting for first WGC frame from display {self.display_index}...")
'instance': capture_instance, frame_received = self._frame_event.wait(timeout=5.0)
'control': capture_control,
'latest_frame': None,
'frame_event': frame_event,
'closed_event': closed_event,
}
# Wait for first frame to arrive (with timeout) if not frame_received or self._latest_frame is None:
logger.debug(f"Waiting for first WGC frame from monitor {monitor_index}...") self._cleanup_internal()
frame_received = frame_event.wait(timeout=5.0)
if not frame_received or self._monitor_captures[monitor_index]['latest_frame'] is None:
# Cleanup on failure
with self._frame_lock:
if monitor_index in self._monitor_captures:
del self._monitor_captures[monitor_index]
raise RuntimeError( raise RuntimeError(
f"WGC capture started for monitor {monitor_index} but no frames received within 5 seconds. " f"WGC capture started for display {self.display_index} but no frames received within 5 seconds."
"This may indicate the capture session failed to start or "
"the display is not actively updating."
) )
self._initialized = True
logger.info( logger.info(
f"WGC engine initialized (monitor={monitor_index}, " f"WGC capture stream initialized (display={self.display_index}, "
f"cursor={capture_cursor})" f"cursor={capture_cursor})"
) )
except RuntimeError:
raise
except Exception as e: except Exception as e:
logger.error(f"Failed to initialize WGC for monitor {monitor_index}: {e}", exc_info=True) logger.error(f"Failed to initialize WGC for display {self.display_index}: {e}", exc_info=True)
raise RuntimeError(f"Failed to initialize WGC for monitor {monitor_index}: {e}") raise RuntimeError(f"Failed to initialize WGC for display {self.display_index}: {e}")
def _cleanup_internal(self) -> None:
"""Internal cleanup helper."""
if self._capture_control:
try:
logger.debug(f"Stopping WGC capture thread for display {self.display_index}...")
self._capture_control.stop()
self._capture_control.wait()
logger.debug(f"WGC capture thread finished for display {self.display_index}")
except Exception as e:
logger.error(f"Error during WGC capture control cleanup: {e}", exc_info=True)
self._capture_control = None
if self._capture_instance:
try:
del self._capture_instance
except Exception:
pass
self._capture_instance = None
self._frame_event.clear()
self._closed_event.clear()
self._latest_frame = None
def cleanup(self) -> None: def cleanup(self) -> None:
"""Cleanup WGC resources for all monitors.""" self._cleanup_internal()
# Proper cleanup for free-threaded captures: self._initialized = False
# 1. Stop capture via CaptureControl.stop() (signals thread to stop)
# 2. Wait for thread to finish using CaptureControl.wait() (blocks until done)
# 3. Delete capture instance (releases COM objects)
# 4. Force garbage collection (ensures COM cleanup)
with self._frame_lock:
monitors_to_cleanup = list(self._monitor_captures.keys())
for monitor_index in monitors_to_cleanup:
logger.debug(f"Cleaning up WGC resources for monitor {monitor_index}...")
with self._frame_lock:
if monitor_index not in self._monitor_captures:
continue
monitor_data = self._monitor_captures[monitor_index]
# Stop and wait for capture thread
capture_control = monitor_data.get('control')
if capture_control:
try:
logger.debug(f"Stopping WGC capture thread for monitor {monitor_index}...")
capture_control.stop()
logger.debug(f"Waiting for WGC capture thread to finish (monitor {monitor_index})...")
# This will block until the capture thread actually finishes
capture_control.wait()
logger.debug(f"WGC capture thread finished successfully for monitor {monitor_index}")
except Exception as e:
logger.error(f"Error during WGC capture control cleanup for monitor {monitor_index}: {e}", exc_info=True)
# Delete capture instance
capture_instance = monitor_data.get('instance')
if capture_instance:
try:
logger.debug(f"Deleting WGC capture instance for monitor {monitor_index}...")
del capture_instance
logger.debug(f"WGC capture instance deleted for monitor {monitor_index}")
except Exception as e:
logger.error(f"Error deleting WGC capture instance for monitor {monitor_index}: {e}", exc_info=True)
# Clear events
frame_event = monitor_data.get('frame_event')
if frame_event:
frame_event.clear()
closed_event = monitor_data.get('closed_event')
if closed_event:
closed_event.clear()
# Remove from dictionary
with self._frame_lock:
if monitor_index in self._monitor_captures:
del self._monitor_captures[monitor_index]
logger.info(f"WGC engine cleaned up for monitor {monitor_index}")
# Force garbage collection to release COM objects # Force garbage collection to release COM objects
logger.debug("Running garbage collection for COM cleanup...")
gc.collect() gc.collect()
logger.debug("Garbage collection completed") logger.info(f"WGC capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]: def capture_frame(self) -> Optional[ScreenCapture]:
"""Get list of available displays using MSS. if not self._initialized:
self.initialize()
Note: WGC doesn't provide a direct API for enumerating monitors,
so we use MSS for display detection.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If detection fails
"""
try:
import mss
with mss.mss() as sct:
displays = []
# Skip monitor 0 (all monitors combined)
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(
DisplayInfo(
index=i,
name=f"Monitor {i+1}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
)
)
logger.debug(f"WGC detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> ScreenCapture:
"""Capture display using WGC.
WGC dynamically initializes for the requested display if needed.
Supports simultaneous capture from multiple monitors.
Args:
display_index: Index of display to capture (0-based)
Returns:
ScreenCapture object with image data
Raises:
RuntimeError: If initialization or capture fails
"""
# Initialize for this monitor if not already initialized
self.initialize(display_index)
try: try:
# Get the latest frame for this monitor
with self._frame_lock: with self._frame_lock:
if display_index not in self._monitor_captures: if self._latest_frame is None:
raise RuntimeError( raise RuntimeError(
f"Monitor {display_index} not initialized. This should not happen." f"No frame available yet for display {self.display_index}."
) )
frame = self._latest_frame.copy()
monitor_data = self._monitor_captures[display_index]
latest_frame = monitor_data.get('latest_frame')
if latest_frame is None:
raise RuntimeError(
f"No frame available yet for monitor {display_index}. "
"The capture may not have started or the screen hasn't updated. "
"Wait a moment and try again."
)
frame = latest_frame.copy()
logger.debug( logger.debug(
f"WGC captured display {display_index}: " f"WGC captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}" f"{frame.shape[1]}x{frame.shape[0]}"
) )
@@ -325,46 +152,51 @@ class WGCEngine(CaptureEngine):
image=frame, image=frame,
width=frame.shape[1], width=frame.shape[1],
height=frame.shape[0], height=frame.shape[0],
display_index=display_index, display_index=self.display_index,
) )
except ValueError: except RuntimeError:
raise raise
except Exception as e: except Exception as e:
logger.error(f"Failed to capture display {display_index} with WGC: {e}") logger.error(f"Failed to capture display {self.display_index} with WGC: {e}")
raise RuntimeError(f"Screen capture failed: {e}") raise RuntimeError(f"Screen capture failed: {e}")
class WGCEngine(CaptureEngine):
"""Windows Graphics Capture engine.
Uses the windows-capture library which leverages Windows.Graphics.Capture API.
This is Microsoft's recommended modern screen capture API for Windows 10+.
Features:
- Cross-GPU support
- Hardware cursor exclusion
- GPU-accelerated with direct texture sharing
Requires: Windows 10 1803+
"""
ENGINE_TYPE = "wgc"
ENGINE_PRIORITY = 2
@classmethod @classmethod
def is_available(cls) -> bool: def is_available(cls) -> bool:
"""Check if WGC is available.
WGC requires Windows 10 1803+ and the windows-capture package.
Returns:
True if windows-capture is available on this system
"""
# Check platform
if sys.platform != "win32": if sys.platform != "win32":
return False return False
# Check Windows version (Windows 10 1803 = version 10.0.17134)
try: try:
import platform import platform
version = platform.version() version = platform.version()
# Parse version string like "10.0.19045"
parts = version.split(".") parts = version.split(".")
if len(parts) >= 3: if len(parts) >= 3:
major = int(parts[0]) major = int(parts[0])
minor = int(parts[1]) minor = int(parts[1])
build = int(parts[2]) build = int(parts[2])
# Check for Windows 10 1803+ (build 17134+)
if major < 10 or (major == 10 and minor == 0 and build < 17134): if major < 10 or (major == 10 and minor == 0 and build < 17134):
return False return False
except Exception: except Exception:
# If we can't parse version, assume it might work
pass pass
# Check if windows-capture is installed
try: try:
import windows_capture import windows_capture
return True return True
@@ -373,15 +205,37 @@ class WGCEngine(CaptureEngine):
@classmethod @classmethod
def get_default_config(cls) -> Dict[str, Any]: def get_default_config(cls) -> Dict[str, Any]:
"""Get default WGC configuration.
Note: monitor_index is NOT in config - WGC dynamically initializes
for the requested monitor at capture time.
Returns:
Default config dict with WGC options
"""
return { return {
"capture_cursor": False, # Exclude cursor (hardware exclusion) "capture_cursor": False,
"draw_border": False, # Don't draw border around capture "draw_border": False,
} }
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
with mss.mss() as sct:
displays = []
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Monitor {i+1}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"WGC detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> WGCCaptureStream:
return WGCCaptureStream(display_index, config)

View File

@@ -0,0 +1,240 @@
"""Runtime live stream abstractions for frame production.
LiveStream is the runtime counterpart of PictureSource (config/storage layer).
Each PictureSource type maps to a LiveStream implementation:
ScreenCapturePictureSource → ScreenCaptureLiveStream
ProcessedPictureSource → ProcessedLiveStream
StaticImagePictureSource → StaticImageLiveStream
LiveStreams are managed by LiveStreamManager which handles sharing and
reference counting — multiple devices using the same PictureSource
share a single LiveStream instance.
"""
import threading
import time
from abc import ABC, abstractmethod
from typing import List, Optional
import numpy as np
from wled_controller.core.capture_engines.base import CaptureStream, ScreenCapture
from wled_controller.core.filters import ImagePool, PostprocessingFilter
from wled_controller.utils import get_logger
logger = get_logger(__name__)
class LiveStream(ABC):
"""Abstract base for a runtime frame source.
A LiveStream produces frames at some frequency. Consumers call
get_latest_frame() to read the most recent frame (non-blocking).
"""
@property
@abstractmethod
def target_fps(self) -> int:
"""Frame rate this stream targets."""
@property
@abstractmethod
def display_index(self) -> Optional[int]:
"""Display index being captured, or None for non-capture streams."""
@abstractmethod
def start(self) -> None:
"""Start producing frames. Called once when the stream is first needed."""
@abstractmethod
def stop(self) -> None:
"""Stop producing frames and release resources."""
@abstractmethod
def get_latest_frame(self) -> Optional[ScreenCapture]:
"""Get the most recent frame.
Returns:
ScreenCapture with image data (RGB), or None if no frame available yet.
"""
class ScreenCaptureLiveStream(LiveStream):
"""Live stream backed by a CaptureStream with a dedicated capture thread.
Runs a background thread that captures frames at the target FPS and stores
the latest frame under a lock. Consumers read the cached frame via
get_latest_frame() (non-blocking).
The dedicated thread naturally satisfies thread affinity requirements
for capture libraries (DXGI, GDI, WGC).
"""
def __init__(self, capture_stream: CaptureStream, fps: int):
self._capture_stream = capture_stream
self._fps = fps
self._latest_frame: Optional[ScreenCapture] = None
self._frame_lock = threading.Lock()
self._running = False
self._thread: Optional[threading.Thread] = None
@property
def target_fps(self) -> int:
return self._fps
@property
def display_index(self) -> Optional[int]:
return self._capture_stream.display_index
def start(self) -> None:
if self._running:
return
self._capture_stream.initialize()
self._running = True
self._thread = threading.Thread(
target=self._capture_loop,
name=f"live-capture-{self._capture_stream.display_index}",
daemon=True,
)
self._thread.start()
logger.info(
f"ScreenCaptureLiveStream started "
f"(display={self._capture_stream.display_index}, fps={self._fps})"
)
def stop(self) -> None:
if not self._running:
return
self._running = False
if self._thread:
self._thread.join(timeout=5.0)
if self._thread.is_alive():
logger.warning("Capture thread did not terminate within 5s")
self._thread = None
self._capture_stream.cleanup()
self._latest_frame = None
logger.info(
f"ScreenCaptureLiveStream stopped "
f"(display={self._capture_stream.display_index})"
)
def get_latest_frame(self) -> Optional[ScreenCapture]:
with self._frame_lock:
return self._latest_frame
def _capture_loop(self) -> None:
frame_time = 1.0 / self._fps if self._fps > 0 else 1.0
while self._running:
loop_start = time.time()
try:
frame = self._capture_stream.capture_frame()
if frame is not None:
with self._frame_lock:
self._latest_frame = frame
except Exception as e:
logger.error(f"Capture error (display={self._capture_stream.display_index}): {e}")
elapsed = time.time() - loop_start
sleep_time = max(0, frame_time - elapsed)
if sleep_time > 0:
time.sleep(sleep_time)
class ProcessedLiveStream(LiveStream):
"""Live stream that applies postprocessing filters to a source stream.
Reads frames from a source LiveStream and applies a chain of filters.
Uses identity caching — if the source frame hasn't changed, returns
the previously processed result without recomputing.
Thread-safe: a lock protects the filter application so concurrent
consumers don't duplicate work.
"""
def __init__(
self,
source: LiveStream,
filters: List[PostprocessingFilter],
):
self._source = source
self._filters = filters
self._image_pool = ImagePool()
self._process_lock = threading.Lock()
self._cached_source_frame: Optional[ScreenCapture] = None
self._cached_result: Optional[ScreenCapture] = None
@property
def target_fps(self) -> int:
return self._source.target_fps
@property
def display_index(self) -> Optional[int]:
return self._source.display_index
def start(self) -> None:
# Source lifecycle managed by LiveStreamManager
pass
def stop(self) -> None:
# Source lifecycle managed by LiveStreamManager
self._cached_source_frame = None
self._cached_result = None
def get_latest_frame(self) -> Optional[ScreenCapture]:
source_frame = self._source.get_latest_frame()
if source_frame is None:
return None
with self._process_lock:
# Identity cache: if source frame object hasn't changed, reuse result
if source_frame is self._cached_source_frame and self._cached_result is not None:
return self._cached_result
# Apply filters to a copy of the source image
image = source_frame.image.copy()
for f in self._filters:
result = f.process_image(image, self._image_pool)
if result is not None:
image = result
processed = ScreenCapture(
image=image,
width=source_frame.width,
height=source_frame.height,
display_index=source_frame.display_index,
)
self._cached_source_frame = source_frame
self._cached_result = processed
return processed
class StaticImageLiveStream(LiveStream):
"""Live stream that always returns the same static image."""
def __init__(self, image: np.ndarray):
self._image = image
h, w = image.shape[:2]
self._frame = ScreenCapture(
image=image, width=w, height=h, display_index=-1
)
@property
def target_fps(self) -> int:
return 1
@property
def display_index(self) -> Optional[int]:
return None
def start(self) -> None:
pass
def stop(self) -> None:
pass
def get_latest_frame(self) -> Optional[ScreenCapture]:
return self._frame

View File

@@ -0,0 +1,272 @@
"""Shared live stream management with reference counting.
LiveStreamManager creates LiveStream instances from PictureSource configs
and shares them across multiple consumers (devices). When multiple devices
reference the same PictureSource, they share a single LiveStream instance.
Reference counting ensures streams are cleaned up when the last consumer
releases them.
"""
from dataclasses import dataclass
from typing import Dict, Optional
import httpx
import numpy as np
from wled_controller.core.capture_engines import EngineRegistry
from wled_controller.core.filters import FilterRegistry, PostprocessingFilter
from wled_controller.core.live_stream import (
LiveStream,
ProcessedLiveStream,
ScreenCaptureLiveStream,
StaticImageLiveStream,
)
from wled_controller.utils import get_logger
logger = get_logger(__name__)
@dataclass
class _LiveStreamEntry:
"""Internal tracking entry for a managed live stream."""
live_stream: LiveStream
ref_count: int
# For ProcessedLiveStream: the source stream ID whose live stream we depend on.
# Used to recursively release the source when this stream's ref count hits 0.
source_stream_id: Optional[str] = None
class LiveStreamManager:
"""Manages shared LiveStream instances with reference counting.
Multiple devices using the same PictureSource share a single LiveStream.
Streams are created on first acquire and cleaned up when the last
consumer releases.
For ProcessedPictureSources, the source stream is recursively acquired,
enabling sharing at every level of the stream chain.
"""
def __init__(self, picture_source_store, capture_template_store=None, pp_template_store=None):
"""Initialize the live stream manager.
Args:
picture_source_store: PictureSourceStore for resolving stream configs
capture_template_store: TemplateStore for resolving capture engine settings
pp_template_store: PostprocessingTemplateStore for resolving filter chains
"""
self._picture_source_store = picture_source_store
self._capture_template_store = capture_template_store
self._pp_template_store = pp_template_store
self._streams: Dict[str, _LiveStreamEntry] = {}
def acquire(self, picture_source_id: str) -> LiveStream:
"""Get or create a LiveStream for the given PictureSource config.
If a LiveStream already exists for this picture_source_id, increments
the reference count and returns the existing instance.
Otherwise, creates a new LiveStream from the PictureSource config,
starts it, and stores it with ref_count=1.
Args:
picture_source_id: ID of the PictureSource config
Returns:
LiveStream instance (shared if already exists)
Raises:
ValueError: If PictureSource not found or config invalid
RuntimeError: If stream creation/start fails
"""
if picture_source_id in self._streams:
entry = self._streams[picture_source_id]
entry.ref_count += 1
logger.info(
f"Reusing live stream for picture source {picture_source_id} "
f"(ref_count={entry.ref_count})"
)
return entry.live_stream
# Create new live stream from config
live_stream, source_stream_id = self._create_live_stream(picture_source_id)
try:
live_stream.start()
except Exception as e:
# If start fails, release any source dependency we acquired
if source_stream_id:
self.release(source_stream_id)
raise RuntimeError(
f"Failed to start live stream for picture source {picture_source_id}: {e}"
)
self._streams[picture_source_id] = _LiveStreamEntry(
live_stream=live_stream,
ref_count=1,
source_stream_id=source_stream_id,
)
logger.info(f"Created live stream for picture source {picture_source_id}")
return live_stream
def release(self, picture_source_id: str) -> None:
"""Release a reference to a LiveStream.
Decrements the reference count. When it reaches 0, stops the
LiveStream and removes it from the registry.
For ProcessedLiveStreams, recursively releases the source dependency.
Args:
picture_source_id: ID of the PictureSource to release
"""
entry = self._streams.get(picture_source_id)
if not entry:
logger.warning(f"Attempted to release unknown live stream: {picture_source_id}")
return
entry.ref_count -= 1
logger.debug(
f"Released live stream {picture_source_id} (ref_count={entry.ref_count})"
)
if entry.ref_count <= 0:
# Stop and remove
try:
entry.live_stream.stop()
except Exception as e:
logger.error(f"Error stopping live stream {picture_source_id}: {e}")
source_stream_id = entry.source_stream_id
del self._streams[picture_source_id]
logger.info(f"Removed live stream for picture source {picture_source_id}")
# Recursively release source dependency
if source_stream_id:
self.release(source_stream_id)
def release_all(self) -> None:
"""Stop and remove all managed live streams. Safety net for shutdown."""
stream_ids = list(self._streams.keys())
for stream_id in stream_ids:
entry = self._streams.get(stream_id)
if entry:
try:
entry.live_stream.stop()
except Exception as e:
logger.error(f"Error stopping live stream {stream_id}: {e}")
self._streams.clear()
logger.info("Released all managed live streams")
def get_active_stream_ids(self) -> list:
"""Get list of currently active stream IDs (for diagnostics)."""
return [
{"id": sid, "ref_count": entry.ref_count}
for sid, entry in self._streams.items()
]
def _create_live_stream(self, picture_source_id: str) -> tuple:
"""Create a LiveStream from a PictureSource config.
Returns:
Tuple of (LiveStream, source_stream_id or None)
"""
from wled_controller.storage.picture_source import (
ProcessedPictureSource,
ScreenCapturePictureSource,
StaticImagePictureSource,
)
stream_config = self._picture_source_store.get_stream(picture_source_id)
if isinstance(stream_config, ScreenCapturePictureSource):
return self._create_screen_capture_live_stream(stream_config), None
elif isinstance(stream_config, ProcessedPictureSource):
return self._create_processed_live_stream(stream_config)
elif isinstance(stream_config, StaticImagePictureSource):
return self._create_static_image_live_stream(stream_config), None
else:
raise ValueError(f"Unknown picture source type: {type(stream_config)}")
def _create_screen_capture_live_stream(self, config) -> ScreenCaptureLiveStream:
"""Create a ScreenCaptureLiveStream from a ScreenCapturePictureSource config."""
# Resolve capture engine from template
engine_type = "mss"
engine_config = {}
if config.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(config.capture_template_id)
engine_type = tpl.engine_type
engine_config = tpl.engine_config
except ValueError:
logger.warning(
f"Capture template {config.capture_template_id} not found, using MSS fallback"
)
capture_stream = EngineRegistry.create_stream(
engine_type, config.display_index, engine_config
)
return ScreenCaptureLiveStream(capture_stream, config.target_fps)
def _create_processed_live_stream(self, config) -> tuple:
"""Create a ProcessedLiveStream from a ProcessedPictureSource config.
Returns:
Tuple of (ProcessedLiveStream, source_stream_id)
"""
# Recursively acquire source stream (with ref counting)
source_stream_id = config.source_stream_id
source_live = self.acquire(source_stream_id)
# Resolve postprocessing filters
filters = []
if config.postprocessing_template_id and self._pp_template_store:
try:
pp = self._pp_template_store.get_template(config.postprocessing_template_id)
for fi in pp.filters:
try:
filters.append(
FilterRegistry.create_instance(fi.filter_id, fi.options)
)
except ValueError as e:
logger.warning(f"Skipping unknown filter '{fi.filter_id}': {e}")
except ValueError:
logger.warning(
f"PP template {config.postprocessing_template_id} not found, no filters applied"
)
return ProcessedLiveStream(source_live, filters), source_stream_id
def _create_static_image_live_stream(self, config) -> StaticImageLiveStream:
"""Create a StaticImageLiveStream from a StaticImagePictureSource config."""
image = self._load_static_image(config.image_source)
return StaticImageLiveStream(image)
@staticmethod
def _load_static_image(image_source: str) -> np.ndarray:
"""Load a static image from URL or file path, return as RGB numpy array."""
from io import BytesIO
from pathlib import Path
from PIL import Image
if image_source.startswith(("http://", "https://")):
response = httpx.get(image_source, timeout=15.0, follow_redirects=True)
response.raise_for_status()
pil_image = Image.open(BytesIO(response.content))
else:
path = Path(image_source)
if not path.exists():
raise FileNotFoundError(f"Image file not found: {image_source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
return np.array(pil_image)

View File

@@ -1,24 +1,22 @@
"""Processing manager for coordinating screen capture and WLED updates.""" """Processing manager for coordinating screen capture and WLED updates."""
import asyncio import asyncio
import concurrent.futures
import time import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
from datetime import datetime from datetime import datetime
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import httpx import httpx
import numpy as np
from wled_controller.core.calibration import ( from wled_controller.core.calibration import (
CalibrationConfig, CalibrationConfig,
PixelMapper, PixelMapper,
create_default_calibration, create_default_calibration,
) )
import numpy as np
from wled_controller.core.capture_engines import CaptureEngine, EngineRegistry
from wled_controller.core.capture_engines.base import ScreenCapture from wled_controller.core.capture_engines.base import ScreenCapture
from wled_controller.core.filters import FilterInstance, FilterRegistry, ImagePool, PostprocessingFilter from wled_controller.core.live_stream import LiveStream
from wled_controller.core.live_stream_manager import LiveStreamManager
from wled_controller.core.pixel_processor import smooth_colors from wled_controller.core.pixel_processor import smooth_colors
from wled_controller.core.screen_capture import extract_border_pixels from wled_controller.core.screen_capture import extract_border_pixels
from wled_controller.core.wled_client import WLEDClient from wled_controller.core.wled_client import WLEDClient
@@ -93,11 +91,9 @@ class ProcessorState:
led_count: int led_count: int
settings: ProcessingSettings settings: ProcessingSettings
calibration: CalibrationConfig calibration: CalibrationConfig
capture_template_id: str = "" picture_source_id: str = ""
picture_stream_id: str = ""
wled_client: Optional[WLEDClient] = None wled_client: Optional[WLEDClient] = None
pixel_mapper: Optional[PixelMapper] = None pixel_mapper: Optional[PixelMapper] = None
capture_engine: Optional[CaptureEngine] = None
is_running: bool = False is_running: bool = False
task: Optional[asyncio.Task] = None task: Optional[asyncio.Task] = None
metrics: ProcessingMetrics = field(default_factory=ProcessingMetrics) metrics: ProcessingMetrics = field(default_factory=ProcessingMetrics)
@@ -111,15 +107,8 @@ class ProcessorState:
resolved_target_fps: Optional[int] = None resolved_target_fps: Optional[int] = None
resolved_engine_type: Optional[str] = None resolved_engine_type: Optional[str] = None
resolved_engine_config: Optional[dict] = None resolved_engine_config: Optional[dict] = None
resolved_filters: Optional[List[FilterInstance]] = None # LiveStream: runtime frame source (shared via LiveStreamManager)
# Static image: cached frame for static_image streams (no engine needed) live_stream: Optional[LiveStream] = None
static_image: Optional[np.ndarray] = None
image_pool: Optional[ImagePool] = None
filter_instances: Optional[List[PostprocessingFilter]] = None
# Dedicated single-thread executor for capture engine calls.
# Capture libraries (BetterCam, MSS, DXcam) use thread-local state,
# so all calls must run on the same thread.
capture_executor: Optional[concurrent.futures.ThreadPoolExecutor] = None
# WLED state snapshot taken before streaming starts (to restore on stop) # WLED state snapshot taken before streaming starts (to restore on stop)
wled_state_before: Optional[dict] = None wled_state_before: Optional[dict] = None
@@ -127,20 +116,23 @@ class ProcessorState:
class ProcessorManager: class ProcessorManager:
"""Manages screen processing for multiple WLED devices.""" """Manages screen processing for multiple WLED devices."""
def __init__(self, picture_stream_store=None, capture_template_store=None, pp_template_store=None): def __init__(self, picture_source_store=None, capture_template_store=None, pp_template_store=None):
"""Initialize processor manager. """Initialize processor manager.
Args: Args:
picture_stream_store: PictureStreamStore instance (for stream resolution) picture_source_store: PictureSourceStore instance (for stream resolution)
capture_template_store: TemplateStore instance (for engine lookup) capture_template_store: TemplateStore instance (for engine lookup)
pp_template_store: PostprocessingTemplateStore instance (for PP settings) pp_template_store: PostprocessingTemplateStore instance (for PP settings)
""" """
self._processors: Dict[str, ProcessorState] = {} self._processors: Dict[str, ProcessorState] = {}
self._health_monitoring_active = False self._health_monitoring_active = False
self._http_client: Optional[httpx.AsyncClient] = None self._http_client: Optional[httpx.AsyncClient] = None
self._picture_stream_store = picture_stream_store self._picture_source_store = picture_source_store
self._capture_template_store = capture_template_store self._capture_template_store = capture_template_store
self._pp_template_store = pp_template_store self._pp_template_store = pp_template_store
self._live_stream_manager = LiveStreamManager(
picture_source_store, capture_template_store, pp_template_store
)
logger.info("Processor manager initialized") logger.info("Processor manager initialized")
async def _get_http_client(self) -> httpx.AsyncClient: async def _get_http_client(self) -> httpx.AsyncClient:
@@ -156,8 +148,7 @@ class ProcessorManager:
led_count: int, led_count: int,
settings: Optional[ProcessingSettings] = None, settings: Optional[ProcessingSettings] = None,
calibration: Optional[CalibrationConfig] = None, calibration: Optional[CalibrationConfig] = None,
capture_template_id: str = "", picture_source_id: str = "",
picture_stream_id: str = "",
): ):
"""Add a device for processing. """Add a device for processing.
@@ -167,8 +158,7 @@ class ProcessorManager:
led_count: Number of LEDs led_count: Number of LEDs
settings: Processing settings (uses defaults if None) settings: Processing settings (uses defaults if None)
calibration: Calibration config (creates default if None) calibration: Calibration config (creates default if None)
capture_template_id: Legacy template ID for screen capture engine picture_source_id: Picture source ID
picture_stream_id: Picture stream ID (preferred over capture_template_id)
""" """
if device_id in self._processors: if device_id in self._processors:
raise ValueError(f"Device {device_id} already exists") raise ValueError(f"Device {device_id} already exists")
@@ -185,8 +175,7 @@ class ProcessorManager:
led_count=led_count, led_count=led_count,
settings=settings, settings=settings,
calibration=calibration, calibration=calibration,
capture_template_id=capture_template_id, picture_source_id=picture_source_id,
picture_stream_id=picture_stream_id,
) )
self._processors[device_id] = state self._processors[device_id] = state
@@ -280,116 +269,56 @@ class ProcessorManager:
logger.info(f"Updated calibration for device {device_id}") logger.info(f"Updated calibration for device {device_id}")
def _resolve_stream_settings(self, state: ProcessorState): def _resolve_stream_settings(self, state: ProcessorState):
"""Resolve picture stream chain to populate resolved_* fields on state. """Resolve picture source chain to populate resolved_* metadata fields.
If device has a picture_stream_id and stores are available, resolves the Resolves metadata (display_index, fps, engine info) for status reporting.
stream chain to get display_index, fps, engine type/config, and PP settings. Actual stream creation is handled by LiveStreamManager.
Otherwise falls back to legacy device settings.
""" """
if state.picture_stream_id and self._picture_stream_store: if not state.picture_source_id or not self._picture_source_store:
try: raise ValueError(f"Device {state.device_id} has no picture source assigned")
chain = self._picture_stream_store.resolve_stream_chain(state.picture_stream_id)
raw_stream = chain["raw_stream"]
pp_template_ids = chain["postprocessing_template_ids"]
if raw_stream.stream_type == "static_image": from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource
# Static image stream: load image once, no engine needed
state.resolved_display_index = -1
state.resolved_target_fps = 1
state.resolved_engine_type = None
state.resolved_engine_config = None
state.static_image = self._load_static_image(raw_stream.image_source)
else:
# Raw capture stream
state.resolved_display_index = raw_stream.display_index
state.resolved_target_fps = raw_stream.target_fps
# Resolve capture engine from raw stream's capture template chain = self._picture_source_store.resolve_stream_chain(state.picture_source_id)
if raw_stream.capture_template_id and self._capture_template_store: raw_stream = chain["raw_stream"]
try: pp_template_ids = chain["postprocessing_template_ids"]
tpl = self._capture_template_store.get_template(raw_stream.capture_template_id)
state.resolved_engine_type = tpl.engine_type
state.resolved_engine_config = tpl.engine_config
except ValueError:
logger.warning(f"Capture template {raw_stream.capture_template_id} not found, using MSS fallback")
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
# Resolve postprocessing: use first PP template in chain if isinstance(raw_stream, StaticImagePictureSource):
if pp_template_ids and self._pp_template_store: state.resolved_display_index = -1
try: state.resolved_target_fps = 1
pp = self._pp_template_store.get_template(pp_template_ids[0]) state.resolved_engine_type = None
state.resolved_filters = pp.filters state.resolved_engine_config = None
except ValueError: elif isinstance(raw_stream, ScreenCapturePictureSource):
logger.warning(f"PP template {pp_template_ids[0]} not found, using defaults") state.resolved_display_index = raw_stream.display_index
state.resolved_target_fps = raw_stream.target_fps
logger.info( if raw_stream.capture_template_id and self._capture_template_store:
f"Resolved stream chain for {state.device_id}: " try:
f"display={state.resolved_display_index}, fps={state.resolved_target_fps}, " tpl = self._capture_template_store.get_template(raw_stream.capture_template_id)
f"engine={state.resolved_engine_type}, pp_templates={len(pp_template_ids)}" state.resolved_engine_type = tpl.engine_type
) state.resolved_engine_config = tpl.engine_config
return except ValueError:
except ValueError as e: logger.warning(f"Capture template {raw_stream.capture_template_id} not found, using MSS fallback")
logger.warning(f"Failed to resolve stream {state.picture_stream_id}: {e}, falling back to legacy settings") state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
# Fallback: use legacy device settings (construct filters from flat fields) logger.info(
state.resolved_display_index = state.settings.display_index f"Resolved stream metadata for {state.device_id}: "
state.resolved_target_fps = state.settings.fps f"display={state.resolved_display_index}, fps={state.resolved_target_fps}, "
legacy_filters = [] f"engine={state.resolved_engine_type}, pp_templates={len(pp_template_ids)}"
if state.settings.brightness != 1.0: )
legacy_filters.append(FilterInstance("brightness", {"value": state.settings.brightness}))
if state.settings.saturation != 1.0:
legacy_filters.append(FilterInstance("saturation", {"value": state.settings.saturation}))
if state.settings.gamma != 1.0:
legacy_filters.append(FilterInstance("gamma", {"value": state.settings.gamma}))
state.resolved_filters = legacy_filters
# Resolve engine from legacy capture_template_id
if state.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(state.capture_template_id)
state.resolved_engine_type = tpl.engine_type
state.resolved_engine_config = tpl.engine_config
except ValueError:
logger.warning(f"Capture template {state.capture_template_id} not found, using MSS fallback")
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
else:
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
@staticmethod
def _load_static_image(image_source: str) -> np.ndarray:
"""Load a static image from URL or file path, return as RGB numpy array."""
from io import BytesIO
from pathlib import Path
from PIL import Image
if image_source.startswith(("http://", "https://")):
response = httpx.get(image_source, timeout=15.0, follow_redirects=True)
response.raise_for_status()
pil_image = Image.open(BytesIO(response.content))
else:
path = Path(image_source)
if not path.exists():
raise FileNotFoundError(f"Image file not found: {image_source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
return np.array(pil_image)
async def start_processing(self, device_id: str): async def start_processing(self, device_id: str):
"""Start screen processing for a device. """Start screen processing for a device.
Resolves the picture stream chain (if assigned) to determine capture engine, Resolves the picture source chain to determine capture engine,
display, FPS, and postprocessing settings. Falls back to legacy device settings. display, FPS, and postprocessing settings.
Args: Args:
device_id: Device identifier device_id: Device identifier
Raises: Raises:
ValueError: If device not found ValueError: If device not found or no picture source assigned
RuntimeError: If processing already running RuntimeError: If processing already running
""" """
if device_id not in self._processors: if device_id not in self._processors:
@@ -433,34 +362,25 @@ class ProcessorManager:
logger.error(f"Failed to connect to WLED device {device_id}: {e}") logger.error(f"Failed to connect to WLED device {device_id}: {e}")
raise RuntimeError(f"Failed to connect to WLED device: {e}") raise RuntimeError(f"Failed to connect to WLED device: {e}")
# Initialize capture engine from resolved settings (skip for static_image) # Acquire live stream via LiveStreamManager (shared across devices)
if state.static_image is not None: try:
logger.info(f"Using static image for device {device_id} ({state.static_image.shape[1]}x{state.static_image.shape[0]})") live_stream = await asyncio.to_thread(
else: self._live_stream_manager.acquire, state.picture_source_id
try: )
engine_type = state.resolved_engine_type or "mss" state.live_stream = live_stream
engine_config = state.resolved_engine_config or {} # Update resolved metadata from actual live stream
engine = EngineRegistry.create_engine(engine_type, engine_config) if live_stream.display_index is not None:
state.resolved_display_index = live_stream.display_index
# Create a dedicated single-thread executor for capture calls. state.resolved_target_fps = live_stream.target_fps
# Capture libraries use thread-local state (DXGI contexts, GDI DCs) logger.info(
# so initialize + capture + cleanup must all run on the same thread. f"Acquired live stream for device {device_id} "
state.capture_executor = concurrent.futures.ThreadPoolExecutor( f"(picture_source={state.picture_source_id})"
max_workers=1, thread_name_prefix=f"capture-{device_id}" )
) except Exception as e:
loop = asyncio.get_event_loop() logger.error(f"Failed to initialize live stream for device {device_id}: {e}")
await loop.run_in_executor(state.capture_executor, engine.initialize) if state.wled_client:
await state.wled_client.disconnect()
state.capture_engine = engine raise RuntimeError(f"Failed to initialize live stream: {e}")
logger.info(f"Initialized capture engine for device {device_id}: {engine_type}")
except Exception as e:
logger.error(f"Failed to initialize capture engine for device {device_id}: {e}")
if state.capture_executor:
state.capture_executor.shutdown(wait=False)
state.capture_executor = None
if state.wled_client:
await state.wled_client.disconnect()
raise RuntimeError(f"Failed to initialize capture engine: {e}")
# Initialize pixel mapper # Initialize pixel mapper
state.pixel_mapper = PixelMapper( state.pixel_mapper = PixelMapper(
@@ -526,39 +446,26 @@ class ProcessorManager:
await state.wled_client.close() await state.wled_client.close()
state.wled_client = None state.wled_client = None
# Cleanup capture engine on the same dedicated thread it was created on # Release live stream
if state.capture_engine: if state.live_stream:
if state.capture_executor: try:
loop = asyncio.get_event_loop() self._live_stream_manager.release(state.picture_source_id)
try: except Exception as e:
await loop.run_in_executor( logger.warning(f"Error releasing live stream: {e}")
state.capture_executor, state.capture_engine.cleanup state.live_stream = None
)
except Exception as e:
logger.warning(f"Error cleaning up capture engine: {e}")
state.capture_executor.shutdown(wait=False)
state.capture_executor = None
else:
state.capture_engine.cleanup()
state.capture_engine = None
# Release cached static image
state.static_image = None
logger.info(f"Stopped processing for device {device_id}") logger.info(f"Stopped processing for device {device_id}")
async def _processing_loop(self, device_id: str): async def _processing_loop(self, device_id: str):
"""Main processing loop for a device. """Main processing loop for a device.
Uses resolved_* fields from stream resolution for display, FPS, Reads frames from the LiveStream (which handles capture and optional
and postprocessing. Falls back to device settings for LED projection PP filters via the picture source chain).
parameters (border_width, interpolation_mode) and WLED brightness.
""" """
state = self._processors[device_id] state = self._processors[device_id]
settings = state.settings settings = state.settings
# Use resolved values (populated by _resolve_stream_settings) # Use resolved values (populated by _resolve_stream_settings)
display_index = state.resolved_display_index or settings.display_index
target_fps = state.resolved_target_fps or settings.fps target_fps = state.resolved_target_fps or settings.fps
smoothing = settings.smoothing smoothing = settings.smoothing
@@ -566,35 +473,13 @@ class ProcessorManager:
border_width = settings.border_width border_width = settings.border_width
wled_brightness = settings.brightness # WLED hardware brightness wled_brightness = settings.brightness # WLED hardware brightness
# Instantiate filter objects once (not per-frame)
resolved_filters = state.resolved_filters or []
image_pool = ImagePool()
state.image_pool = image_pool
filter_objects = []
for fi in resolved_filters:
try:
filter_objects.append(FilterRegistry.create_instance(fi.filter_id, fi.options))
except ValueError as e:
logger.warning(f"Skipping unknown filter '{fi.filter_id}': {e}")
state.filter_instances = filter_objects
logger.info( logger.info(
f"Processing loop started for {device_id} " f"Processing loop started for {device_id} "
f"(display={display_index}, fps={target_fps}, filters={len(filter_objects)})" f"(display={state.resolved_display_index}, fps={target_fps})"
) )
frame_time = 1.0 / target_fps frame_time = 1.0 / target_fps
fps_samples = [] fps_samples = []
loop = asyncio.get_event_loop()
capture_executor = state.capture_executor # dedicated single-thread executor
def _apply_filters(image):
"""Apply all postprocessing filters to the captured image."""
for f in filter_objects:
result = f.process_image(image, image_pool)
if result is not None:
image = result
return image
try: try:
while state.is_running: while state.is_running:
@@ -606,18 +491,8 @@ class ProcessorManager:
continue continue
try: try:
# Get frame: static image or live capture # Get frame from live stream (handles capture + PP filters)
if state.static_image is not None: capture = await asyncio.to_thread(state.live_stream.get_latest_frame)
h, w = state.static_image.shape[:2]
capture = ScreenCapture(
image=state.static_image.copy(), width=w, height=h, display_index=-1
)
else:
capture = await loop.run_in_executor(
capture_executor,
state.capture_engine.capture_display,
display_index
)
# Skip processing if no new frame (screen unchanged) # Skip processing if no new frame (screen unchanged)
if capture is None: if capture is None:
@@ -626,10 +501,6 @@ class ProcessorManager:
await asyncio.sleep(frame_time) await asyncio.sleep(frame_time)
continue continue
# Apply postprocessing filters to the full captured image
if filter_objects:
capture.image = await asyncio.to_thread(_apply_filters, capture.image)
# Extract border pixels # Extract border pixels
border_pixels = await asyncio.to_thread(extract_border_pixels, capture, border_width) border_pixels = await asyncio.to_thread(extract_border_pixels, capture, border_width)
@@ -861,6 +732,9 @@ class ProcessorManager:
except Exception as e: except Exception as e:
logger.error(f"Error stopping device {device_id}: {e}") logger.error(f"Error stopping device {device_id}: {e}")
# Safety net: release any remaining managed live streams
self._live_stream_manager.release_all()
# Close shared HTTP client # Close shared HTTP client
if self._http_client and not self._http_client.is_closed: if self._http_client and not self._http_client.is_closed:
await self._http_client.aclose() await self._http_client.aclose()

View File

@@ -11,13 +11,13 @@ from fastapi.staticfiles import StaticFiles
from wled_controller import __version__ from wled_controller import __version__
from wled_controller.api import router from wled_controller.api import router
from wled_controller.api.routes import init_dependencies from wled_controller.api.dependencies import init_dependencies
from wled_controller.config import get_config from wled_controller.config import get_config
from wled_controller.core.processor_manager import ProcessorManager from wled_controller.core.processor_manager import ProcessorManager
from wled_controller.storage import DeviceStore from wled_controller.storage import DeviceStore
from wled_controller.storage.template_store import TemplateStore from wled_controller.storage.template_store import TemplateStore
from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore from wled_controller.storage.postprocessing_template_store import PostprocessingTemplateStore
from wled_controller.storage.picture_stream_store import PictureStreamStore from wled_controller.storage.picture_source_store import PictureSourceStore
from wled_controller.utils import setup_logging, get_logger from wled_controller.utils import setup_logging, get_logger
# Initialize logging # Initialize logging
@@ -31,61 +31,10 @@ config = get_config()
device_store = DeviceStore(config.storage.devices_file) device_store = DeviceStore(config.storage.devices_file)
template_store = TemplateStore(config.storage.templates_file) template_store = TemplateStore(config.storage.templates_file)
pp_template_store = PostprocessingTemplateStore(config.storage.postprocessing_templates_file) pp_template_store = PostprocessingTemplateStore(config.storage.postprocessing_templates_file)
picture_stream_store = PictureStreamStore(config.storage.picture_streams_file) picture_source_store = PictureSourceStore(config.storage.picture_sources_file)
# Assign first available template to devices with missing/invalid template
all_templates = template_store.get_all_templates()
if all_templates:
valid_ids = {t.id for t in all_templates}
for device in device_store.get_all_devices():
if not device.capture_template_id or device.capture_template_id not in valid_ids:
old_id = device.capture_template_id
device_store.update_device(device.id, capture_template_id=all_templates[0].id)
logger.info(
f"Assigned template '{all_templates[0].name}' to device '{device.name}' "
f"(was '{old_id}')"
)
# Migrate devices without picture_stream_id: create streams from legacy settings
for device in device_store.get_all_devices():
if not device.picture_stream_id:
try:
# Create a raw stream from the device's current capture settings
raw_stream = picture_stream_store.create_stream(
name=f"{device.name} - Raw",
stream_type="raw",
display_index=device.settings.display_index,
capture_template_id=device.capture_template_id,
target_fps=device.settings.fps,
description=f"Auto-migrated from device '{device.name}'",
)
# Create a processed stream with the first PP template
pp_templates = pp_template_store.get_all_templates()
if pp_templates:
processed_stream = picture_stream_store.create_stream(
name=f"{device.name} - Processed",
stream_type="processed",
source_stream_id=raw_stream.id,
postprocessing_template_id=pp_templates[0].id,
description=f"Auto-migrated from device '{device.name}'",
)
device_store.update_device(device.id, picture_stream_id=processed_stream.id)
logger.info(
f"Migrated device '{device.name}': created raw stream '{raw_stream.id}' "
f"+ processed stream '{processed_stream.id}'"
)
else:
# No PP templates, assign raw stream directly
device_store.update_device(device.id, picture_stream_id=raw_stream.id)
logger.info(
f"Migrated device '{device.name}': created raw stream '{raw_stream.id}'"
)
except Exception as e:
logger.error(f"Failed to migrate device '{device.name}': {e}")
processor_manager = ProcessorManager( processor_manager = ProcessorManager(
picture_stream_store=picture_stream_store, picture_source_store=picture_source_store,
capture_template_store=template_store, capture_template_store=template_store,
pp_template_store=pp_template_store, pp_template_store=pp_template_store,
) )
@@ -124,7 +73,7 @@ async def lifespan(app: FastAPI):
init_dependencies( init_dependencies(
device_store, template_store, processor_manager, device_store, template_store, processor_manager,
pp_template_store=pp_template_store, pp_template_store=pp_template_store,
picture_stream_store=picture_stream_store, picture_source_store=picture_source_store,
) )
# Load existing devices into processor manager # Load existing devices into processor manager
@@ -137,8 +86,7 @@ async def lifespan(app: FastAPI):
led_count=device.led_count, led_count=device.led_count,
settings=device.settings, settings=device.settings,
calibration=device.calibration, calibration=device.calibration,
capture_template_id=device.capture_template_id, picture_source_id=device.picture_source_id,
picture_stream_id=device.picture_stream_id,
) )
logger.info(f"Loaded device: {device.name} ({device.id})") logger.info(f"Loaded device: {device.name} ({device.id})")
except Exception as e: except Exception as e:

View File

@@ -91,7 +91,7 @@ function closeLightbox(event) {
async function openFullImageLightbox(imageSource) { async function openFullImageLightbox(imageSource) {
try { try {
const resp = await fetch(`${API_BASE}/picture-streams/full-image?source=${encodeURIComponent(imageSource)}`, { const resp = await fetch(`${API_BASE}/picture-sources/full-image?source=${encodeURIComponent(imageSource)}`, {
headers: getHeaders() headers: getHeaders()
}); });
if (!resp.ok) return; if (!resp.ok) return;
@@ -402,7 +402,7 @@ function updateAllText() {
if (apiKey) { if (apiKey) {
loadDisplays(); loadDisplays();
loadDevices(); loadDevices();
loadPictureStreams(); loadPictureSources();
} }
} }
@@ -576,15 +576,11 @@ async function loadDisplays() {
let _cachedDisplays = null; let _cachedDisplays = null;
function switchTab(name) { function switchTab(name) {
// Migrate legacy tab values from localStorage
if (name === 'templates' || name === 'pp-templates') {
name = 'streams';
}
document.querySelectorAll('.tab-btn').forEach(btn => btn.classList.toggle('active', btn.dataset.tab === name)); document.querySelectorAll('.tab-btn').forEach(btn => btn.classList.toggle('active', btn.dataset.tab === name));
document.querySelectorAll('.tab-panel').forEach(panel => panel.classList.toggle('active', panel.id === `tab-${name}`)); document.querySelectorAll('.tab-panel').forEach(panel => panel.classList.toggle('active', panel.id === `tab-${name}`));
localStorage.setItem('activeTab', name); localStorage.setItem('activeTab', name);
if (name === 'streams') { if (name === 'streams') {
loadPictureStreams(); loadPictureSources();
} }
} }
@@ -2532,7 +2528,7 @@ async function loadCaptureTemplates() {
const data = await response.json(); const data = await response.json();
_cachedCaptureTemplates = data.templates || []; _cachedCaptureTemplates = data.templates || [];
// Re-render the streams tab which now contains template sections // Re-render the streams tab which now contains template sections
renderPictureStreamsList(_cachedStreams); renderPictureSourcesList(_cachedStreams);
} catch (error) { } catch (error) {
console.error('Error loading capture templates:', error); console.error('Error loading capture templates:', error);
} }
@@ -2540,7 +2536,7 @@ async function loadCaptureTemplates() {
// Get engine icon // Get engine icon
function getEngineIcon(engineType) { function getEngineIcon(engineType) {
return '🖥️'; return '🚀';
} }
// Show add template modal // Show add template modal
@@ -2786,7 +2782,7 @@ async function loadAvailableEngines() {
availableEngines = data.engines || []; availableEngines = data.engines || [];
const select = document.getElementById('template-engine'); const select = document.getElementById('template-engine');
select.innerHTML = `<option value="">${t('templates.engine.select')}</option>`; select.innerHTML = '';
availableEngines.forEach(engine => { availableEngines.forEach(engine => {
const option = document.createElement('option'); const option = document.createElement('option');
@@ -2798,6 +2794,12 @@ async function loadAvailableEngines() {
} }
select.appendChild(option); select.appendChild(option);
}); });
// Auto-select first available engine if nothing selected
if (!select.value) {
const firstAvailable = availableEngines.find(e => e.available);
if (firstAvailable) select.value = firstAvailable.type;
}
} catch (error) { } catch (error) {
console.error('Error loading engines:', error); console.error('Error loading engines:', error);
showToast(t('templates.error.engines') + ': ' + error.message, 'error'); showToast(t('templates.error.engines') + ': ' + error.message, 'error');
@@ -3085,14 +3087,14 @@ async function deleteTemplate(templateId) {
} }
} }
// ===== Picture Streams ===== // ===== Picture Sources =====
let _cachedStreams = []; let _cachedStreams = [];
let _cachedPPTemplates = []; let _cachedPPTemplates = [];
let _cachedCaptureTemplates = []; let _cachedCaptureTemplates = [];
let _availableFilters = []; // Loaded from GET /filters let _availableFilters = []; // Loaded from GET /filters
async function loadPictureStreams() { async function loadPictureSources() {
try { try {
// Always fetch templates, filters, and streams in parallel // Always fetch templates, filters, and streams in parallel
// since templates are now rendered inside stream sub-tabs // since templates are now rendered inside stream sub-tabs
@@ -3100,7 +3102,7 @@ async function loadPictureStreams() {
_availableFilters.length === 0 ? fetchWithAuth('/filters') : Promise.resolve(null), _availableFilters.length === 0 ? fetchWithAuth('/filters') : Promise.resolve(null),
fetchWithAuth('/postprocessing-templates'), fetchWithAuth('/postprocessing-templates'),
fetchWithAuth('/capture-templates'), fetchWithAuth('/capture-templates'),
fetchWithAuth('/picture-streams') fetchWithAuth('/picture-sources')
]); ]);
if (filtersResp && filtersResp.ok) { if (filtersResp && filtersResp.ok) {
@@ -3120,9 +3122,9 @@ async function loadPictureStreams() {
} }
const data = await streamsResp.json(); const data = await streamsResp.json();
_cachedStreams = data.streams || []; _cachedStreams = data.streams || [];
renderPictureStreamsList(_cachedStreams); renderPictureSourcesList(_cachedStreams);
} catch (error) { } catch (error) {
console.error('Error loading picture streams:', error); console.error('Error loading picture sources:', error);
document.getElementById('streams-list').innerHTML = ` document.getElementById('streams-list').innerHTML = `
<div class="error-message">${t('streams.error.load')}: ${error.message}</div> <div class="error-message">${t('streams.error.load')}: ${error.message}</div>
`; `;
@@ -3139,7 +3141,7 @@ function switchStreamTab(tabKey) {
localStorage.setItem('activeStreamTab', tabKey); localStorage.setItem('activeStreamTab', tabKey);
} }
function renderPictureStreamsList(streams) { function renderPictureSourcesList(streams) {
const container = document.getElementById('streams-list'); const container = document.getElementById('streams-list');
const activeTab = localStorage.getItem('activeStreamTab') || 'raw'; const activeTab = localStorage.getItem('activeStreamTab') || 'raw';
@@ -3157,7 +3159,7 @@ function renderPictureStreamsList(streams) {
detailsHtml = `<div class="stream-card-props"> detailsHtml = `<div class="stream-card-props">
<span class="stream-card-prop" title="${t('streams.display')}">🖥️ ${stream.display_index ?? 0}</span> <span class="stream-card-prop" title="${t('streams.display')}">🖥️ ${stream.display_index ?? 0}</span>
<span class="stream-card-prop" title="${t('streams.target_fps')}">⚡ ${stream.target_fps ?? 30}</span> <span class="stream-card-prop" title="${t('streams.target_fps')}">⚡ ${stream.target_fps ?? 30}</span>
${capTmplName ? `<span class="stream-card-prop" title="${t('streams.capture_template')}">📷 ${capTmplName}</span>` : ''} ${capTmplName ? `<span class="stream-card-prop" title="${t('streams.capture_template')}">📋 ${capTmplName}</span>` : ''}
</div>`; </div>`;
} else if (stream.stream_type === 'processed') { } else if (stream.stream_type === 'processed') {
const sourceStream = _cachedStreams.find(s => s.id === stream.source_stream_id); const sourceStream = _cachedStreams.find(s => s.id === stream.source_stream_id);
@@ -3169,7 +3171,7 @@ function renderPictureStreamsList(streams) {
} }
detailsHtml = `<div class="stream-card-props"> detailsHtml = `<div class="stream-card-props">
<span class="stream-card-prop" title="${t('streams.source')}">📺 ${sourceName}</span> <span class="stream-card-prop" title="${t('streams.source')}">📺 ${sourceName}</span>
${ppTmplName ? `<span class="stream-card-prop" title="${t('streams.pp_template')}">🎨 ${ppTmplName}</span>` : ''} ${ppTmplName ? `<span class="stream-card-prop" title="${t('streams.pp_template')}">📋 ${ppTmplName}</span>` : ''}
</div>`; </div>`;
} else if (stream.stream_type === 'static_image') { } else if (stream.stream_type === 'static_image') {
const src = stream.image_source || ''; const src = stream.image_source || '';
@@ -3208,12 +3210,12 @@ function renderPictureStreamsList(streams) {
<button class="card-remove-btn" onclick="deleteTemplate('${template.id}')" title="${t('common.delete')}">&#x2715;</button> <button class="card-remove-btn" onclick="deleteTemplate('${template.id}')" title="${t('common.delete')}">&#x2715;</button>
<div class="template-card-header"> <div class="template-card-header">
<div class="template-name"> <div class="template-name">
${engineIcon} ${escapeHtml(template.name)} 📋 ${escapeHtml(template.name)}
</div> </div>
</div> </div>
${template.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(template.description)}</div>` : ''} ${template.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(template.description)}</div>` : ''}
<div class="stream-card-props"> <div class="stream-card-props">
<span class="stream-card-prop" title="${t('templates.engine')}">⚙️ ${template.engine_type.toUpperCase()}</span> <span class="stream-card-prop" title="${t('templates.engine')}">🚀 ${template.engine_type.toUpperCase()}</span>
${configEntries.length > 0 ? `<span class="stream-card-prop" title="${t('templates.config.show')}">🔧 ${configEntries.length}</span>` : ''} ${configEntries.length > 0 ? `<span class="stream-card-prop" title="${t('templates.config.show')}">🔧 ${configEntries.length}</span>` : ''}
</div> </div>
${configEntries.length > 0 ? ` ${configEntries.length > 0 ? `
@@ -3252,7 +3254,7 @@ function renderPictureStreamsList(streams) {
<button class="card-remove-btn" onclick="deletePPTemplate('${tmpl.id}')" title="${t('common.delete')}">&#x2715;</button> <button class="card-remove-btn" onclick="deletePPTemplate('${tmpl.id}')" title="${t('common.delete')}">&#x2715;</button>
<div class="template-card-header"> <div class="template-card-header">
<div class="template-name"> <div class="template-name">
🎨 ${escapeHtml(tmpl.name)} 📋 ${escapeHtml(tmpl.name)}
</div> </div>
</div> </div>
${tmpl.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(tmpl.description)}</div>` : ''} ${tmpl.description ? `<div class="template-config" style="opacity:0.7;">${escapeHtml(tmpl.description)}</div>` : ''}
@@ -3390,7 +3392,7 @@ async function showAddStreamModal(presetType) {
async function editStream(streamId) { async function editStream(streamId) {
try { try {
const response = await fetchWithAuth(`/picture-streams/${streamId}`); const response = await fetchWithAuth(`/picture-sources/${streamId}`);
if (!response.ok) throw new Error(`Failed to load stream: ${response.status}`); if (!response.ok) throw new Error(`Failed to load stream: ${response.status}`);
const stream = await response.json(); const stream = await response.json();
@@ -3450,7 +3452,7 @@ async function populateStreamModalDropdowns() {
const [displaysRes, captureTemplatesRes, streamsRes, ppTemplatesRes] = await Promise.all([ const [displaysRes, captureTemplatesRes, streamsRes, ppTemplatesRes] = await Promise.all([
fetch(`${API_BASE}/config/displays`, { headers: getHeaders() }), fetch(`${API_BASE}/config/displays`, { headers: getHeaders() }),
fetchWithAuth('/capture-templates'), fetchWithAuth('/capture-templates'),
fetchWithAuth('/picture-streams'), fetchWithAuth('/picture-sources'),
fetchWithAuth('/postprocessing-templates'), fetchWithAuth('/postprocessing-templates'),
]); ]);
@@ -3556,12 +3558,12 @@ async function saveStream() {
try { try {
let response; let response;
if (streamId) { if (streamId) {
response = await fetchWithAuth(`/picture-streams/${streamId}`, { response = await fetchWithAuth(`/picture-sources/${streamId}`, {
method: 'PUT', method: 'PUT',
body: JSON.stringify(payload) body: JSON.stringify(payload)
}); });
} else { } else {
response = await fetchWithAuth('/picture-streams', { response = await fetchWithAuth('/picture-sources', {
method: 'POST', method: 'POST',
body: JSON.stringify(payload) body: JSON.stringify(payload)
}); });
@@ -3574,7 +3576,7 @@ async function saveStream() {
showToast(streamId ? t('streams.updated') : t('streams.created'), 'success'); showToast(streamId ? t('streams.updated') : t('streams.created'), 'success');
closeStreamModal(); closeStreamModal();
await loadPictureStreams(); await loadPictureSources();
} catch (error) { } catch (error) {
console.error('Error saving stream:', error); console.error('Error saving stream:', error);
errorEl.textContent = error.message; errorEl.textContent = error.message;
@@ -3587,7 +3589,7 @@ async function deleteStream(streamId) {
if (!confirmed) return; if (!confirmed) return;
try { try {
const response = await fetchWithAuth(`/picture-streams/${streamId}`, { const response = await fetchWithAuth(`/picture-sources/${streamId}`, {
method: 'DELETE' method: 'DELETE'
}); });
@@ -3597,7 +3599,7 @@ async function deleteStream(streamId) {
} }
showToast(t('streams.deleted'), 'success'); showToast(t('streams.deleted'), 'success');
await loadPictureStreams(); await loadPictureSources();
} catch (error) { } catch (error) {
console.error('Error deleting stream:', error); console.error('Error deleting stream:', error);
showToast(t('streams.error.delete') + ': ' + error.message, 'error'); showToast(t('streams.error.delete') + ': ' + error.message, 'error');
@@ -3635,7 +3637,7 @@ async function validateStaticImage() {
previewContainer.style.display = 'none'; previewContainer.style.display = 'none';
try { try {
const response = await fetchWithAuth('/picture-streams/validate-image', { const response = await fetchWithAuth('/picture-sources/validate-image', {
method: 'POST', method: 'POST',
body: JSON.stringify({ image_source: source }), body: JSON.stringify({ image_source: source }),
}); });
@@ -3662,7 +3664,7 @@ async function validateStaticImage() {
} }
} }
// ===== Picture Stream Test ===== // ===== Picture Source Test =====
let _currentTestStreamId = null; let _currentTestStreamId = null;
@@ -3701,7 +3703,7 @@ async function runStreamTest() {
showOverlaySpinner(t('streams.test.running'), captureDuration); showOverlaySpinner(t('streams.test.running'), captureDuration);
try { try {
const response = await fetchWithAuth(`/picture-streams/${_currentTestStreamId}/test`, { const response = await fetchWithAuth(`/picture-sources/${_currentTestStreamId}/test`, {
method: 'POST', method: 'POST',
body: JSON.stringify({ capture_duration: captureDuration }) body: JSON.stringify({ capture_duration: captureDuration })
}); });
@@ -3740,7 +3742,7 @@ async function showTestPPTemplateModal(templateId) {
// Ensure streams are cached // Ensure streams are cached
if (_cachedStreams.length === 0) { if (_cachedStreams.length === 0) {
try { try {
const resp = await fetchWithAuth('/picture-streams'); const resp = await fetchWithAuth('/picture-sources');
if (resp.ok) { const d = await resp.json(); _cachedStreams = d.streams || []; } if (resp.ok) { const d = await resp.json(); _cachedStreams = d.streams || []; }
} catch (e) { console.warn('Could not load streams for PP test:', e); } } catch (e) { console.warn('Could not load streams for PP test:', e); }
} }
@@ -3842,7 +3844,7 @@ async function loadPPTemplates() {
const data = await response.json(); const data = await response.json();
_cachedPPTemplates = data.templates || []; _cachedPPTemplates = data.templates || [];
// Re-render the streams tab which now contains template sections // Re-render the streams tab which now contains template sections
renderPictureStreamsList(_cachedStreams); renderPictureSourcesList(_cachedStreams);
} catch (error) { } catch (error) {
console.error('Error loading PP templates:', error); console.error('Error loading PP templates:', error);
} }
@@ -4163,7 +4165,7 @@ async function showStreamSelector(deviceId) {
try { try {
const [deviceResponse, streamsResponse, settingsResponse] = await Promise.all([ const [deviceResponse, streamsResponse, settingsResponse] = await Promise.all([
fetch(`${API_BASE}/devices/${deviceId}`, { headers: getHeaders() }), fetch(`${API_BASE}/devices/${deviceId}`, { headers: getHeaders() }),
fetchWithAuth('/picture-streams'), fetchWithAuth('/picture-sources'),
fetch(`${API_BASE}/devices/${deviceId}/settings`, { headers: getHeaders() }), fetch(`${API_BASE}/devices/${deviceId}/settings`, { headers: getHeaders() }),
]); ]);
@@ -4195,7 +4197,7 @@ async function showStreamSelector(deviceId) {
}); });
} }
const currentStreamId = device.picture_stream_id || ''; const currentStreamId = device.picture_source_id || '';
streamSelect.value = currentStreamId; streamSelect.value = currentStreamId;
// Populate LED projection fields // Populate LED projection fields
@@ -4238,7 +4240,7 @@ async function updateStreamSelectorInfo(streamId) {
} }
try { try {
const response = await fetchWithAuth(`/picture-streams/${streamId}`); const response = await fetchWithAuth(`/picture-sources/${streamId}`);
if (!response.ok) { if (!response.ok) {
infoPanel.style.display = 'none'; infoPanel.style.display = 'none';
return; return;
@@ -4266,12 +4268,12 @@ async function updateStreamSelectorInfo(streamId) {
propsHtml = ` propsHtml = `
<span class="stream-card-prop" title="${t('streams.display')}">🖥️ ${stream.display_index ?? 0}</span> <span class="stream-card-prop" title="${t('streams.display')}">🖥️ ${stream.display_index ?? 0}</span>
<span class="stream-card-prop" title="${t('streams.target_fps')}">⚡ ${stream.target_fps ?? 30}</span> <span class="stream-card-prop" title="${t('streams.target_fps')}">⚡ ${stream.target_fps ?? 30}</span>
${capTmplName ? `<span class="stream-card-prop" title="${t('streams.capture_template')}">📷 ${capTmplName}</span>` : ''} ${capTmplName ? `<span class="stream-card-prop" title="${t('streams.capture_template')}">📋 ${capTmplName}</span>` : ''}
`; `;
} else if (stream.stream_type === 'processed') { } else if (stream.stream_type === 'processed') {
if ((!_cachedStreams || _cachedStreams.length === 0) && stream.source_stream_id) { if ((!_cachedStreams || _cachedStreams.length === 0) && stream.source_stream_id) {
try { try {
const streamsResp = await fetchWithAuth('/picture-streams'); const streamsResp = await fetchWithAuth('/picture-sources');
if (streamsResp.ok) { const d = await streamsResp.json(); _cachedStreams = d.streams || []; } if (streamsResp.ok) { const d = await streamsResp.json(); _cachedStreams = d.streams || []; }
} catch {} } catch {}
} }
@@ -4292,7 +4294,7 @@ async function updateStreamSelectorInfo(streamId) {
} }
propsHtml = ` propsHtml = `
<span class="stream-card-prop" title="${t('streams.source')}">📺 ${sourceName}</span> <span class="stream-card-prop" title="${t('streams.source')}">📺 ${sourceName}</span>
${ppTmplName ? `<span class="stream-card-prop" title="${t('streams.pp_template')}">🎨 ${ppTmplName}</span>` : ''} ${ppTmplName ? `<span class="stream-card-prop" title="${t('streams.pp_template')}">📋 ${ppTmplName}</span>` : ''}
`; `;
} else if (stream.stream_type === 'static_image') { } else if (stream.stream_type === 'static_image') {
const src = stream.image_source || ''; const src = stream.image_source || '';
@@ -4313,18 +4315,18 @@ async function updateStreamSelectorInfo(streamId) {
async function saveStreamSelector() { async function saveStreamSelector() {
const deviceId = document.getElementById('stream-selector-device-id').value; const deviceId = document.getElementById('stream-selector-device-id').value;
const pictureStreamId = document.getElementById('stream-selector-stream').value; const pictureSourceId = document.getElementById('stream-selector-stream').value;
const borderWidth = parseInt(document.getElementById('stream-selector-border-width').value) || 10; const borderWidth = parseInt(document.getElementById('stream-selector-border-width').value) || 10;
const interpolation = document.getElementById('stream-selector-interpolation').value; const interpolation = document.getElementById('stream-selector-interpolation').value;
const smoothing = parseFloat(document.getElementById('stream-selector-smoothing').value); const smoothing = parseFloat(document.getElementById('stream-selector-smoothing').value);
const errorEl = document.getElementById('stream-selector-error'); const errorEl = document.getElementById('stream-selector-error');
try { try {
// Save picture stream assignment // Save picture source assignment
const response = await fetch(`${API_BASE}/devices/${deviceId}`, { const response = await fetch(`${API_BASE}/devices/${deviceId}`, {
method: 'PUT', method: 'PUT',
headers: getHeaders(), headers: getHeaders(),
body: JSON.stringify({ picture_stream_id: pictureStreamId }) body: JSON.stringify({ picture_source_id: pictureSourceId })
}); });
if (response.status === 401) { if (response.status === 401) {

View File

@@ -36,7 +36,7 @@
<div class="tab-bar"> <div class="tab-bar">
<button class="tab-btn active" data-tab="devices" onclick="switchTab('devices')"><span data-i18n="devices.title">💡 Devices</span></button> <button class="tab-btn active" data-tab="devices" onclick="switchTab('devices')"><span data-i18n="devices.title">💡 Devices</span></button>
<button class="tab-btn" data-tab="streams" onclick="switchTab('streams')"><span data-i18n="streams.title">📺 Streams</span></button> <button class="tab-btn" data-tab="streams" onclick="switchTab('streams')"><span data-i18n="streams.title">📺 Sources</span></button>
</div> </div>
<div class="tab-panel active" id="tab-devices"> <div class="tab-panel active" id="tab-devices">
@@ -228,11 +228,11 @@
</div> </div>
</div> </div>
<!-- Stream Settings Modal (picture stream + LED projection settings) --> <!-- Stream Settings Modal (picture source + LED projection settings) -->
<div id="stream-selector-modal" class="modal"> <div id="stream-selector-modal" class="modal">
<div class="modal-content"> <div class="modal-content">
<div class="modal-header"> <div class="modal-header">
<h2 data-i18n="device.stream_settings.title">📺 Stream Settings</h2> <h2 data-i18n="device.stream_settings.title">📺 Source Settings</h2>
<button class="modal-close-btn" onclick="closeStreamSelectorModal()" title="Close">&#x2715;</button> <button class="modal-close-btn" onclick="closeStreamSelectorModal()" title="Close">&#x2715;</button>
</div> </div>
<div class="modal-body"> <div class="modal-body">
@@ -241,10 +241,10 @@
<div class="form-group"> <div class="form-group">
<div class="label-row"> <div class="label-row">
<label for="stream-selector-stream" data-i18n="device.stream_selector.label">Stream:</label> <label for="stream-selector-stream" data-i18n="device.stream_selector.label">Source:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button> <button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button>
</div> </div>
<small class="input-hint" style="display:none" data-i18n="device.stream_selector.hint">Select a stream that defines what this device captures and processes</small> <small class="input-hint" style="display:none" data-i18n="device.stream_selector.hint">Select a source that defines what this device captures and processes</small>
<select id="stream-selector-stream"></select> <select id="stream-selector-stream"></select>
<div id="stream-selector-info" class="stream-info-panel" style="display: none;"></div> <div id="stream-selector-info" class="stream-info-panel" style="display: none;"></div>
</div> </div>
@@ -405,7 +405,6 @@
</div> </div>
<small class="input-hint" style="display:none" data-i18n="templates.engine.hint">Select the screen capture technology to use</small> <small class="input-hint" style="display:none" data-i18n="templates.engine.hint">Select the screen capture technology to use</small>
<select id="template-engine" onchange="onEngineChange()" required> <select id="template-engine" onchange="onEngineChange()" required>
<option value="" data-i18n="templates.engine.select">Select an engine...</option>
</select> </select>
<small id="engine-availability-hint" class="form-hint" style="display: none;"></small> <small id="engine-availability-hint" class="form-hint" style="display: none;"></small>
</div> </div>
@@ -457,11 +456,11 @@
</div> </div>
</div> </div>
<!-- Test Stream Modal --> <!-- Test Source Modal -->
<div id="test-stream-modal" class="modal"> <div id="test-stream-modal" class="modal">
<div class="modal-content"> <div class="modal-content">
<div class="modal-header"> <div class="modal-header">
<h2 data-i18n="streams.test.title">Test Stream</h2> <h2 data-i18n="streams.test.title">Test Source</h2>
<button class="modal-close-btn" onclick="closeTestStreamModal()" title="Close">&#x2715;</button> <button class="modal-close-btn" onclick="closeTestStreamModal()" title="Close">&#x2715;</button>
</div> </div>
<div class="modal-body"> <div class="modal-body">
@@ -490,7 +489,7 @@
</div> </div>
<div class="modal-body"> <div class="modal-body">
<div class="form-group"> <div class="form-group">
<label data-i18n="postprocessing.test.source_stream">Source Stream:</label> <label data-i18n="postprocessing.test.source_stream">Source:</label>
<select id="test-pp-source-stream"></select> <select id="test-pp-source-stream"></select>
</div> </div>
<div class="form-group"> <div class="form-group">
@@ -509,24 +508,24 @@
</div> </div>
</div> </div>
<!-- Stream Modal --> <!-- Source Modal -->
<div id="stream-modal" class="modal"> <div id="stream-modal" class="modal">
<div class="modal-content"> <div class="modal-content">
<div class="modal-header"> <div class="modal-header">
<h2 id="stream-modal-title" data-i18n="streams.add">Add Stream</h2> <h2 id="stream-modal-title" data-i18n="streams.add">Add Source</h2>
<button class="modal-close-btn" onclick="closeStreamModal()" title="Close">&#x2715;</button> <button class="modal-close-btn" onclick="closeStreamModal()" title="Close">&#x2715;</button>
</div> </div>
<div class="modal-body"> <div class="modal-body">
<input type="hidden" id="stream-id"> <input type="hidden" id="stream-id">
<form id="stream-form"> <form id="stream-form">
<div class="form-group"> <div class="form-group">
<label for="stream-name" data-i18n="streams.name">Stream Name:</label> <label for="stream-name" data-i18n="streams.name">Source Name:</label>
<input type="text" id="stream-name" data-i18n-placeholder="streams.name.placeholder" placeholder="My Stream" required> <input type="text" id="stream-name" data-i18n-placeholder="streams.name.placeholder" placeholder="My Source" required>
</div> </div>
<input type="hidden" id="stream-type" value="raw"> <input type="hidden" id="stream-type" value="raw">
<!-- Raw stream fields --> <!-- Raw source fields -->
<div id="stream-raw-fields"> <div id="stream-raw-fields">
<div class="form-group"> <div class="form-group">
<div class="label-row"> <div class="label-row">
@@ -560,14 +559,14 @@
</div> </div>
</div> </div>
<!-- Processed stream fields --> <!-- Processed source fields -->
<div id="stream-processed-fields" style="display: none;"> <div id="stream-processed-fields" style="display: none;">
<div class="form-group"> <div class="form-group">
<div class="label-row"> <div class="label-row">
<label for="stream-source" data-i18n="streams.source">Source Stream:</label> <label for="stream-source" data-i18n="streams.source">Source:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button> <button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button>
</div> </div>
<small class="input-hint" style="display:none" data-i18n="streams.source.hint">The stream to apply processing filters to</small> <small class="input-hint" style="display:none" data-i18n="streams.source.hint">The source to apply processing filters to</small>
<select id="stream-source"></select> <select id="stream-source"></select>
</div> </div>
<div class="form-group"> <div class="form-group">
@@ -575,7 +574,7 @@
<label for="stream-pp-template" data-i18n="streams.pp_template">Processing Template:</label> <label for="stream-pp-template" data-i18n="streams.pp_template">Processing Template:</label>
<button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button> <button type="button" class="hint-toggle" onclick="toggleHint(this)" title="?">?</button>
</div> </div>
<small class="input-hint" style="display:none" data-i18n="streams.pp_template.hint">Filter template to apply to the source stream</small> <small class="input-hint" style="display:none" data-i18n="streams.pp_template.hint">Filter template to apply to the source</small>
<select id="stream-pp-template"></select> <select id="stream-pp-template"></select>
</div> </div>
</div> </div>
@@ -599,7 +598,7 @@
<div class="form-group"> <div class="form-group">
<label for="stream-description" data-i18n="streams.description_label">Description (optional):</label> <label for="stream-description" data-i18n="streams.description_label">Description (optional):</label>
<input type="text" id="stream-description" data-i18n-placeholder="streams.description_placeholder" placeholder="Describe this stream..."> <input type="text" id="stream-description" data-i18n-placeholder="streams.description_placeholder" placeholder="Describe this source...">
</div> </div>
<div id="stream-error" class="error-message" style="display: none;"></div> <div id="stream-error" class="error-message" style="display: none;"></div>

View File

@@ -199,19 +199,19 @@
"confirm.no": "No", "confirm.no": "No",
"common.delete": "Delete", "common.delete": "Delete",
"common.edit": "Edit", "common.edit": "Edit",
"streams.title": "\uD83D\uDCFA Streams", "streams.title": "\uD83D\uDCFA Sources",
"streams.description": "Streams define the capture pipeline. A raw stream captures from a display using a capture template. A processed stream applies postprocessing to another stream. Assign streams to devices.", "streams.description": "Sources define the capture pipeline. A raw source captures from a display using a capture template. A processed source applies postprocessing to another source. Assign sources to devices.",
"streams.group.raw": "Screen Capture", "streams.group.raw": "Screen Capture",
"streams.group.processed": "Processed", "streams.group.processed": "Processed",
"streams.section.streams": "\uD83D\uDCFA Streams", "streams.section.streams": "\uD83D\uDCFA Sources",
"streams.add": "Add Stream", "streams.add": "Add Source",
"streams.add.raw": "Add Screen Capture", "streams.add.raw": "Add Screen Capture",
"streams.add.processed": "Add Processed Stream", "streams.add.processed": "Add Processed Source",
"streams.edit": "Edit Stream", "streams.edit": "Edit Source",
"streams.edit.raw": "Edit Screen Capture", "streams.edit.raw": "Edit Screen Capture",
"streams.edit.processed": "Edit Processed Stream", "streams.edit.processed": "Edit Processed Source",
"streams.name": "Stream Name:", "streams.name": "Source Name:",
"streams.name.placeholder": "My Stream", "streams.name.placeholder": "My Source",
"streams.type": "Type:", "streams.type": "Type:",
"streams.type.raw": "Screen Capture", "streams.type.raw": "Screen Capture",
"streams.type.processed": "Processed", "streams.type.processed": "Processed",
@@ -221,26 +221,26 @@
"streams.capture_template.hint": "Engine template defining how the screen is captured", "streams.capture_template.hint": "Engine template defining how the screen is captured",
"streams.target_fps": "Target FPS:", "streams.target_fps": "Target FPS:",
"streams.target_fps.hint": "Target frames per second for capture (10-90)", "streams.target_fps.hint": "Target frames per second for capture (10-90)",
"streams.source": "Source Stream:", "streams.source": "Source:",
"streams.source.hint": "The stream to apply processing filters to", "streams.source.hint": "The source to apply processing filters to",
"streams.pp_template": "Filter Template:", "streams.pp_template": "Filter Template:",
"streams.pp_template.hint": "Filter template to apply to the source stream", "streams.pp_template.hint": "Filter template to apply to the source",
"streams.description_label": "Description (optional):", "streams.description_label": "Description (optional):",
"streams.description_placeholder": "Describe this stream...", "streams.description_placeholder": "Describe this source...",
"streams.created": "Stream created successfully", "streams.created": "Source created successfully",
"streams.updated": "Stream updated successfully", "streams.updated": "Source updated successfully",
"streams.deleted": "Stream deleted successfully", "streams.deleted": "Source deleted successfully",
"streams.delete.confirm": "Are you sure you want to delete this stream?", "streams.delete.confirm": "Are you sure you want to delete this source?",
"streams.error.load": "Failed to load streams", "streams.error.load": "Failed to load sources",
"streams.error.required": "Please fill in all required fields", "streams.error.required": "Please fill in all required fields",
"streams.error.delete": "Failed to delete stream", "streams.error.delete": "Failed to delete source",
"streams.test.title": "Test Stream", "streams.test.title": "Test Source",
"streams.test.run": "🧪 Run", "streams.test.run": "🧪 Run",
"streams.test.running": "Testing stream...", "streams.test.running": "Testing source...",
"streams.test.duration": "Capture Duration (s):", "streams.test.duration": "Capture Duration (s):",
"streams.test.error.failed": "Stream test failed", "streams.test.error.failed": "Source test failed",
"postprocessing.title": "\uD83D\uDCC4 Filter Templates", "postprocessing.title": "\uD83D\uDCC4 Filter Templates",
"postprocessing.description": "Processing templates define image filters and color correction. Assign them to processed picture streams for consistent postprocessing across devices.", "postprocessing.description": "Processing templates define image filters and color correction. Assign them to processed picture sources for consistent postprocessing across devices.",
"postprocessing.add": "Add Filter Template", "postprocessing.add": "Add Filter Template",
"postprocessing.edit": "Edit Filter Template", "postprocessing.edit": "Edit Filter Template",
"postprocessing.name": "Template Name:", "postprocessing.name": "Template Name:",
@@ -269,16 +269,16 @@
"postprocessing.error.delete": "Failed to delete processing template", "postprocessing.error.delete": "Failed to delete processing template",
"postprocessing.config.show": "Show settings", "postprocessing.config.show": "Show settings",
"postprocessing.test.title": "Test Filter Template", "postprocessing.test.title": "Test Filter Template",
"postprocessing.test.source_stream": "Source Stream:", "postprocessing.test.source_stream": "Source:",
"postprocessing.test.running": "Testing processing template...", "postprocessing.test.running": "Testing processing template...",
"postprocessing.test.error.no_stream": "Please select a source stream", "postprocessing.test.error.no_stream": "Please select a source",
"postprocessing.test.error.failed": "Processing template test failed", "postprocessing.test.error.failed": "Processing template test failed",
"device.button.stream_selector": "Stream Settings", "device.button.stream_selector": "Source Settings",
"device.stream_settings.title": "📺 Stream Settings", "device.stream_settings.title": "📺 Source Settings",
"device.stream_selector.label": "Stream:", "device.stream_selector.label": "Source:",
"device.stream_selector.hint": "Select a stream that defines what this device captures and processes", "device.stream_selector.hint": "Select a source that defines what this device captures and processes",
"device.stream_selector.none": "-- No stream assigned --", "device.stream_selector.none": "-- No source assigned --",
"device.stream_selector.saved": "Stream settings updated", "device.stream_selector.saved": "Source settings updated",
"device.stream_settings.border_width": "Border Width (px):", "device.stream_settings.border_width": "Border Width (px):",
"device.stream_settings.border_width_hint": "How many pixels from the screen edge to sample for LED colors (1-100)", "device.stream_settings.border_width_hint": "How many pixels from the screen edge to sample for LED colors (1-100)",
"device.stream_settings.interpolation": "Interpolation Mode:", "device.stream_settings.interpolation": "Interpolation Mode:",
@@ -288,10 +288,10 @@
"device.stream_settings.interpolation_hint": "How to calculate LED color from sampled pixels", "device.stream_settings.interpolation_hint": "How to calculate LED color from sampled pixels",
"device.stream_settings.smoothing": "Smoothing:", "device.stream_settings.smoothing": "Smoothing:",
"device.stream_settings.smoothing_hint": "Temporal blending between frames (0=none, 1=full). Reduces flicker.", "device.stream_settings.smoothing_hint": "Temporal blending between frames (0=none, 1=full). Reduces flicker.",
"device.tip.stream_selector": "Configure picture stream and LED projection settings for this device", "device.tip.stream_selector": "Configure picture source and LED projection settings for this device",
"streams.group.static_image": "Static Image", "streams.group.static_image": "Static Image",
"streams.add.static_image": "Add Static Image", "streams.add.static_image": "Add Static Image Source",
"streams.edit.static_image": "Edit Static Image", "streams.edit.static_image": "Edit Static Image Source",
"streams.type.static_image": "Static Image", "streams.type.static_image": "Static Image",
"streams.image_source": "Image Source:", "streams.image_source": "Image Source:",
"streams.image_source.placeholder": "https://example.com/image.jpg or C:\\path\\to\\image.png", "streams.image_source.placeholder": "https://example.com/image.jpg or C:\\path\\to\\image.png",

View File

@@ -199,19 +199,19 @@
"confirm.no": "Нет", "confirm.no": "Нет",
"common.delete": "Удалить", "common.delete": "Удалить",
"common.edit": "Редактировать", "common.edit": "Редактировать",
"streams.title": "\uD83D\uDCFA Потоки", "streams.title": "\uD83D\uDCFA Источники",
"streams.description": "Потоки определяют конвейер захвата. Сырой поток захватывает экран с помощью шаблона захвата. Обработанный поток применяет постобработку к другому потоку. Назначайте потоки устройствам.", "streams.description": "Источники определяют конвейер захвата. Сырой источник захватывает экран с помощью шаблона захвата. Обработанный источник применяет постобработку к другому источнику. Назначайте источники устройствам.",
"streams.group.raw": "Захват Экрана", "streams.group.raw": "Захват Экрана",
"streams.group.processed": "Обработанные", "streams.group.processed": "Обработанные",
"streams.section.streams": "\uD83D\uDCFA Потоки", "streams.section.streams": "\uD83D\uDCFA Источники",
"streams.add": "Добавить Поток", "streams.add": "Добавить Источник",
"streams.add.raw": "Добавить Захват Экрана", "streams.add.raw": "Добавить Захват Экрана",
"streams.add.processed": "Добавить Обработанный", "streams.add.processed": "Добавить Обработанный",
"streams.edit": "Редактировать Поток", "streams.edit": "Редактировать Источник",
"streams.edit.raw": "Редактировать Захват Экрана", "streams.edit.raw": "Редактировать Захват Экрана",
"streams.edit.processed": "Редактировать Обработанный Поток", "streams.edit.processed": "Редактировать Обработанный Источник",
"streams.name": "Имя Потока:", "streams.name": "Имя Источника:",
"streams.name.placeholder": "Мой Поток", "streams.name.placeholder": "Мой Источник",
"streams.type": "Тип:", "streams.type": "Тип:",
"streams.type.raw": "Захват экрана", "streams.type.raw": "Захват экрана",
"streams.type.processed": "Обработанный", "streams.type.processed": "Обработанный",
@@ -221,26 +221,26 @@
"streams.capture_template.hint": "Шаблон движка, определяющий способ захвата экрана", "streams.capture_template.hint": "Шаблон движка, определяющий способ захвата экрана",
"streams.target_fps": "Целевой FPS:", "streams.target_fps": "Целевой FPS:",
"streams.target_fps.hint": "Целевое количество кадров в секунду (10-90)", "streams.target_fps.hint": "Целевое количество кадров в секунду (10-90)",
"streams.source": "Исходный Поток:", "streams.source": "Источник:",
"streams.source.hint": "Поток, к которому применяются фильтры обработки", "streams.source.hint": "Источник, к которому применяются фильтры обработки",
"streams.pp_template": "Шаблон Фильтра:", "streams.pp_template": "Шаблон Фильтра:",
"streams.pp_template.hint": "Шаблон фильтра для применения к исходному потоку", "streams.pp_template.hint": "Шаблон фильтра для применения к источнику",
"streams.description_label": "Описание (необязательно):", "streams.description_label": "Описание (необязательно):",
"streams.description_placeholder": "Опишите этот поток...", "streams.description_placeholder": "Опишите этот источник...",
"streams.created": "Поток успешно создан", "streams.created": "Источник успешно создан",
"streams.updated": "Поток успешно обновлён", "streams.updated": "Источник успешно обновлён",
"streams.deleted": "Поток успешно удалён", "streams.deleted": "Источник успешно удалён",
"streams.delete.confirm": "Вы уверены, что хотите удалить этот поток?", "streams.delete.confirm": "Вы уверены, что хотите удалить этот источник?",
"streams.error.load": "Не удалось загрузить потоки", "streams.error.load": "Не удалось загрузить источники",
"streams.error.required": "Пожалуйста, заполните все обязательные поля", "streams.error.required": "Пожалуйста, заполните все обязательные поля",
"streams.error.delete": "Не удалось удалить поток", "streams.error.delete": "Не удалось удалить источник",
"streams.test.title": "Тест Потока", "streams.test.title": "Тест Источника",
"streams.test.run": "🧪 Запустить", "streams.test.run": "🧪 Запустить",
"streams.test.running": "Тестирование потока...", "streams.test.running": "Тестирование источника...",
"streams.test.duration": "Длительность Захвата (с):", "streams.test.duration": "Длительность Захвата (с):",
"streams.test.error.failed": "Тест потока не удался", "streams.test.error.failed": "Тест источника не удался",
"postprocessing.title": "\uD83D\uDCC4 Шаблоны Фильтров", "postprocessing.title": "\uD83D\uDCC4 Шаблоны Фильтров",
"postprocessing.description": "Шаблоны обработки определяют фильтры изображений и цветокоррекцию. Назначайте их обработанным видеопотокам для единообразной постобработки на всех устройствах.", "postprocessing.description": "Шаблоны обработки определяют фильтры изображений и цветокоррекцию. Назначайте их обработанным источникам для единообразной постобработки на всех устройствах.",
"postprocessing.add": "Добавить Шаблон Фильтра", "postprocessing.add": "Добавить Шаблон Фильтра",
"postprocessing.edit": "Редактировать Шаблон Фильтра", "postprocessing.edit": "Редактировать Шаблон Фильтра",
"postprocessing.name": "Имя Шаблона:", "postprocessing.name": "Имя Шаблона:",
@@ -269,16 +269,16 @@
"postprocessing.error.delete": "Не удалось удалить шаблон фильтра", "postprocessing.error.delete": "Не удалось удалить шаблон фильтра",
"postprocessing.config.show": "Показать настройки", "postprocessing.config.show": "Показать настройки",
"postprocessing.test.title": "Тест шаблона фильтра", "postprocessing.test.title": "Тест шаблона фильтра",
"postprocessing.test.source_stream": "Источник потока:", "postprocessing.test.source_stream": "Источник:",
"postprocessing.test.running": "Тестирование шаблона фильтра...", "postprocessing.test.running": "Тестирование шаблона фильтра...",
"postprocessing.test.error.no_stream": "Пожалуйста, выберите источник потока", "postprocessing.test.error.no_stream": "Пожалуйста, выберите источник",
"postprocessing.test.error.failed": "Тест шаблона фильтра не удался", "postprocessing.test.error.failed": "Тест шаблона фильтра не удался",
"device.button.stream_selector": "Настройки потока", "device.button.stream_selector": "Настройки источника",
"device.stream_settings.title": "📺 Настройки потока", "device.stream_settings.title": "📺 Настройки источника",
"device.stream_selector.label": "Поток:", "device.stream_selector.label": "Источник:",
"device.stream_selector.hint": "Выберите поток, определяющий что это устройство захватывает и обрабатывает", "device.stream_selector.hint": "Выберите источник, определяющий что это устройство захватывает и обрабатывает",
"device.stream_selector.none": "-- Поток не назначен --", "device.stream_selector.none": "-- Источник не назначен --",
"device.stream_selector.saved": "Настройки потока обновлены", "device.stream_selector.saved": "Настройки источника обновлены",
"device.stream_settings.border_width": "Ширина границы (px):", "device.stream_settings.border_width": "Ширина границы (px):",
"device.stream_settings.border_width_hint": "Сколько пикселей от края экрана выбирать для цвета LED (1-100)", "device.stream_settings.border_width_hint": "Сколько пикселей от края экрана выбирать для цвета LED (1-100)",
"device.stream_settings.interpolation": "Режим интерполяции:", "device.stream_settings.interpolation": "Режим интерполяции:",
@@ -288,10 +288,10 @@
"device.stream_settings.interpolation_hint": "Как вычислять цвет LED из выбранных пикселей", "device.stream_settings.interpolation_hint": "Как вычислять цвет LED из выбранных пикселей",
"device.stream_settings.smoothing": "Сглаживание:", "device.stream_settings.smoothing": "Сглаживание:",
"device.stream_settings.smoothing_hint": "Временное смешивание между кадрами (0=нет, 1=полное). Уменьшает мерцание.", "device.stream_settings.smoothing_hint": "Временное смешивание между кадрами (0=нет, 1=полное). Уменьшает мерцание.",
"device.tip.stream_selector": "Настройки видеопотока и проекции LED для этого устройства", "device.tip.stream_selector": "Настройки источника и проекции LED для этого устройства",
"streams.group.static_image": "Статические", "streams.group.static_image": "Статические",
"streams.add.static_image": "Добавить статическое изображение", "streams.add.static_image": "Добавить статическое изображение (источник)",
"streams.edit.static_image": "Редактировать статическое изображение", "streams.edit.static_image": "Редактировать статическое изображение (источник)",
"streams.type.static_image": "Статическое изображение", "streams.type.static_image": "Статическое изображение",
"streams.image_source": "Источник изображения:", "streams.image_source": "Источник изображения:",
"streams.image_source.placeholder": "https://example.com/image.jpg или C:\\path\\to\\image.png", "streams.image_source.placeholder": "https://example.com/image.jpg или C:\\path\\to\\image.png",

View File

@@ -1,7 +1,7 @@
"""Storage layer for device and configuration persistence.""" """Storage layer for device and configuration persistence."""
from .device_store import DeviceStore from .device_store import DeviceStore
from .picture_stream_store import PictureStreamStore from .picture_source_store import PictureSourceStore
from .postprocessing_template_store import PostprocessingTemplateStore from .postprocessing_template_store import PostprocessingTemplateStore
__all__ = ["DeviceStore", "PictureStreamStore", "PostprocessingTemplateStore"] __all__ = ["DeviceStore", "PictureSourceStore", "PostprocessingTemplateStore"]

View File

@@ -30,8 +30,7 @@ class Device:
enabled: bool = True, enabled: bool = True,
settings: Optional[ProcessingSettings] = None, settings: Optional[ProcessingSettings] = None,
calibration: Optional[CalibrationConfig] = None, calibration: Optional[CalibrationConfig] = None,
capture_template_id: str = "", picture_source_id: str = "",
picture_stream_id: str = "",
created_at: Optional[datetime] = None, created_at: Optional[datetime] = None,
updated_at: Optional[datetime] = None, updated_at: Optional[datetime] = None,
): ):
@@ -45,8 +44,7 @@ class Device:
enabled: Whether device is enabled enabled: Whether device is enabled
settings: Processing settings settings: Processing settings
calibration: Calibration configuration calibration: Calibration configuration
capture_template_id: ID of assigned capture template (legacy, use picture_stream_id) picture_source_id: ID of assigned picture source
picture_stream_id: ID of assigned picture stream
created_at: Creation timestamp created_at: Creation timestamp
updated_at: Last update timestamp updated_at: Last update timestamp
""" """
@@ -57,8 +55,7 @@ class Device:
self.enabled = enabled self.enabled = enabled
self.settings = settings or ProcessingSettings() self.settings = settings or ProcessingSettings()
self.calibration = calibration or create_default_calibration(led_count) self.calibration = calibration or create_default_calibration(led_count)
self.capture_template_id = capture_template_id self.picture_source_id = picture_source_id
self.picture_stream_id = picture_stream_id
self.created_at = created_at or datetime.utcnow() self.created_at = created_at or datetime.utcnow()
self.updated_at = updated_at or datetime.utcnow() self.updated_at = updated_at or datetime.utcnow()
@@ -86,8 +83,7 @@ class Device:
"state_check_interval": self.settings.state_check_interval, "state_check_interval": self.settings.state_check_interval,
}, },
"calibration": calibration_to_dict(self.calibration), "calibration": calibration_to_dict(self.calibration),
"capture_template_id": self.capture_template_id, "picture_source_id": self.picture_source_id,
"picture_stream_id": self.picture_stream_id,
"created_at": self.created_at.isoformat(), "created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(), "updated_at": self.updated_at.isoformat(),
} }
@@ -112,10 +108,7 @@ class Device:
saturation=settings_data.get("saturation", 1.0), saturation=settings_data.get("saturation", 1.0),
smoothing=settings_data.get("smoothing", 0.3), smoothing=settings_data.get("smoothing", 0.3),
interpolation_mode=settings_data.get("interpolation_mode", "average"), interpolation_mode=settings_data.get("interpolation_mode", "average"),
state_check_interval=settings_data.get( state_check_interval=settings_data.get("state_check_interval", DEFAULT_STATE_CHECK_INTERVAL),
"state_check_interval",
settings_data.get("health_check_interval", DEFAULT_STATE_CHECK_INTERVAL),
),
) )
calibration_data = data.get("calibration") calibration_data = data.get("calibration")
@@ -125,8 +118,7 @@ class Device:
else create_default_calibration(data["led_count"]) else create_default_calibration(data["led_count"])
) )
capture_template_id = data.get("capture_template_id", "") picture_source_id = data.get("picture_source_id", "")
picture_stream_id = data.get("picture_stream_id", "")
return cls( return cls(
device_id=data["id"], device_id=data["id"],
@@ -136,8 +128,7 @@ class Device:
enabled=data.get("enabled", True), enabled=data.get("enabled", True),
settings=settings, settings=settings,
calibration=calibration, calibration=calibration,
capture_template_id=capture_template_id, picture_source_id=picture_source_id,
picture_stream_id=picture_stream_id,
created_at=datetime.fromisoformat(data.get("created_at", datetime.utcnow().isoformat())), created_at=datetime.fromisoformat(data.get("created_at", datetime.utcnow().isoformat())),
updated_at=datetime.fromisoformat(data.get("updated_at", datetime.utcnow().isoformat())), updated_at=datetime.fromisoformat(data.get("updated_at", datetime.utcnow().isoformat())),
) )
@@ -219,8 +210,7 @@ class DeviceStore:
led_count: int, led_count: int,
settings: Optional[ProcessingSettings] = None, settings: Optional[ProcessingSettings] = None,
calibration: Optional[CalibrationConfig] = None, calibration: Optional[CalibrationConfig] = None,
capture_template_id: str = "", picture_source_id: str = "",
picture_stream_id: str = "",
) -> Device: ) -> Device:
"""Create a new device. """Create a new device.
@@ -230,7 +220,7 @@ class DeviceStore:
led_count: Number of LEDs led_count: Number of LEDs
settings: Processing settings settings: Processing settings
calibration: Calibration configuration calibration: Calibration configuration
capture_template_id: ID of assigned capture template picture_source_id: ID of assigned picture source
Returns: Returns:
Created device Created device
@@ -249,8 +239,7 @@ class DeviceStore:
led_count=led_count, led_count=led_count,
settings=settings, settings=settings,
calibration=calibration, calibration=calibration,
capture_template_id=capture_template_id, picture_source_id=picture_source_id,
picture_stream_id=picture_stream_id,
) )
# Store # Store
@@ -288,8 +277,7 @@ class DeviceStore:
enabled: Optional[bool] = None, enabled: Optional[bool] = None,
settings: Optional[ProcessingSettings] = None, settings: Optional[ProcessingSettings] = None,
calibration: Optional[CalibrationConfig] = None, calibration: Optional[CalibrationConfig] = None,
capture_template_id: Optional[str] = None, picture_source_id: Optional[str] = None,
picture_stream_id: Optional[str] = None,
) -> Device: ) -> Device:
"""Update device. """Update device.
@@ -301,7 +289,7 @@ class DeviceStore:
enabled: New enabled state (optional) enabled: New enabled state (optional)
settings: New settings (optional) settings: New settings (optional)
calibration: New calibration (optional) calibration: New calibration (optional)
capture_template_id: New capture template ID (optional) picture_source_id: New picture source ID (optional)
Returns: Returns:
Updated device Updated device
@@ -334,10 +322,8 @@ class DeviceStore:
f"does not match device LED count ({device.led_count})" f"does not match device LED count ({device.led_count})"
) )
device.calibration = calibration device.calibration = calibration
if capture_template_id is not None: if picture_source_id is not None:
device.capture_template_id = capture_template_id device.picture_source_id = picture_source_id
if picture_stream_id is not None:
device.picture_stream_id = picture_stream_id
device.updated_at = datetime.utcnow() device.updated_at = datetime.utcnow()

View File

@@ -0,0 +1,128 @@
"""Picture source data model with inheritance-based stream types."""
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
@dataclass
class PictureSource:
"""Base class for picture source configurations.
A picture source is either:
- "raw": captures from a display using a capture engine template at a target FPS
- "processed": applies postprocessing to another picture source
- "static_image": returns a static frame from a URL or local file path
"""
id: str
name: str
stream_type: str # "raw", "processed", or "static_image"
created_at: datetime
updated_at: datetime
description: Optional[str] = None
def to_dict(self) -> dict:
"""Convert stream to dictionary. Subclasses extend this."""
return {
"id": self.id,
"name": self.name,
"stream_type": self.stream_type,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(),
"description": self.description,
# Subclass fields default to None for backward compat
"display_index": None,
"capture_template_id": None,
"target_fps": None,
"source_stream_id": None,
"postprocessing_template_id": None,
"image_source": None,
}
@staticmethod
def from_dict(data: dict) -> "PictureSource":
"""Factory: dispatch to the correct subclass based on stream_type."""
stream_type: str = data.get("stream_type", "raw") or "raw"
sid: str = data["id"]
name: str = data["name"]
description: str | None = data.get("description")
raw_created = data.get("created_at")
created_at: datetime = (
datetime.fromisoformat(raw_created)
if isinstance(raw_created, str)
else raw_created if isinstance(raw_created, datetime)
else datetime.utcnow()
)
raw_updated = data.get("updated_at")
updated_at: datetime = (
datetime.fromisoformat(raw_updated)
if isinstance(raw_updated, str)
else raw_updated if isinstance(raw_updated, datetime)
else datetime.utcnow()
)
if stream_type == "processed":
return ProcessedPictureSource(
id=sid, name=name, stream_type=stream_type,
created_at=created_at, updated_at=updated_at, description=description,
source_stream_id=data.get("source_stream_id") or "",
postprocessing_template_id=data.get("postprocessing_template_id") or "",
)
elif stream_type == "static_image":
return StaticImagePictureSource(
id=sid, name=name, stream_type=stream_type,
created_at=created_at, updated_at=updated_at, description=description,
image_source=data.get("image_source") or "",
)
else:
return ScreenCapturePictureSource(
id=sid, name=name, stream_type=stream_type,
created_at=created_at, updated_at=updated_at, description=description,
display_index=data.get("display_index") or 0,
capture_template_id=data.get("capture_template_id") or "",
target_fps=data.get("target_fps") or 30,
)
@dataclass
class ScreenCapturePictureSource(PictureSource):
"""A raw capture stream from a display."""
display_index: int = 0
capture_template_id: str = ""
target_fps: int = 30
def to_dict(self) -> dict:
d = super().to_dict()
d["display_index"] = self.display_index
d["capture_template_id"] = self.capture_template_id
d["target_fps"] = self.target_fps
return d
@dataclass
class ProcessedPictureSource(PictureSource):
"""A processed stream that applies postprocessing to another stream."""
source_stream_id: str = ""
postprocessing_template_id: str = ""
def to_dict(self) -> dict:
d = super().to_dict()
d["source_stream_id"] = self.source_stream_id
d["postprocessing_template_id"] = self.postprocessing_template_id
return d
@dataclass
class StaticImagePictureSource(PictureSource):
"""A static image stream from a URL or file path."""
image_source: str = ""
def to_dict(self) -> dict:
d = super().to_dict()
d["image_source"] = self.image_source
return d

View File

@@ -1,4 +1,4 @@
"""Picture stream storage using JSON files.""" """Picture source storage using JSON files."""
import json import json
import uuid import uuid
@@ -6,27 +6,32 @@ from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional, Set from typing import Dict, List, Optional, Set
from wled_controller.storage.picture_stream import PictureStream from wled_controller.storage.picture_source import (
PictureSource,
ScreenCapturePictureSource,
ProcessedPictureSource,
StaticImagePictureSource,
)
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
class PictureStreamStore: class PictureSourceStore:
"""Storage for picture streams. """Storage for picture sources.
Supports raw and processed stream types with cycle detection Supports raw and processed stream types with cycle detection
for processed streams that reference other streams. for processed streams that reference other streams.
""" """
def __init__(self, file_path: str): def __init__(self, file_path: str):
"""Initialize picture stream store. """Initialize picture source store.
Args: Args:
file_path: Path to streams JSON file file_path: Path to streams JSON file
""" """
self.file_path = Path(file_path) self.file_path = Path(file_path)
self._streams: Dict[str, PictureStream] = {} self._streams: Dict[str, PictureSource] = {}
self._load() self._load()
def _load(self) -> None: def _load(self) -> None:
@@ -38,27 +43,27 @@ class PictureStreamStore:
with open(self.file_path, "r", encoding="utf-8") as f: with open(self.file_path, "r", encoding="utf-8") as f:
data = json.load(f) data = json.load(f)
streams_data = data.get("picture_streams", {}) streams_data = data.get("picture_sources", {})
loaded = 0 loaded = 0
for stream_id, stream_dict in streams_data.items(): for stream_id, stream_dict in streams_data.items():
try: try:
stream = PictureStream.from_dict(stream_dict) stream = PictureSource.from_dict(stream_dict)
self._streams[stream_id] = stream self._streams[stream_id] = stream
loaded += 1 loaded += 1
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Failed to load picture stream {stream_id}: {e}", f"Failed to load picture source {stream_id}: {e}",
exc_info=True, exc_info=True,
) )
if loaded > 0: if loaded > 0:
logger.info(f"Loaded {loaded} picture streams from storage") logger.info(f"Loaded {loaded} picture sources from storage")
except Exception as e: except Exception as e:
logger.error(f"Failed to load picture streams from {self.file_path}: {e}") logger.error(f"Failed to load picture sources from {self.file_path}: {e}")
raise raise
logger.info(f"Picture stream store initialized with {len(self._streams)} streams") logger.info(f"Picture source store initialized with {len(self._streams)} streams")
def _save(self) -> None: def _save(self) -> None:
"""Save all streams to file.""" """Save all streams to file."""
@@ -72,14 +77,14 @@ class PictureStreamStore:
data = { data = {
"version": "1.0.0", "version": "1.0.0",
"picture_streams": streams_dict, "picture_sources": streams_dict,
} }
with open(self.file_path, "w", encoding="utf-8") as f: with open(self.file_path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False) json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e: except Exception as e:
logger.error(f"Failed to save picture streams to {self.file_path}: {e}") logger.error(f"Failed to save picture sources to {self.file_path}: {e}")
raise raise
def _detect_cycle(self, source_stream_id: str, exclude_stream_id: Optional[str] = None) -> bool: def _detect_cycle(self, source_stream_id: str, exclude_stream_id: Optional[str] = None) -> bool:
@@ -105,24 +110,24 @@ class PictureStreamStore:
current_stream = self._streams.get(current_id) current_stream = self._streams.get(current_id)
if not current_stream: if not current_stream:
break break
if current_stream.stream_type != "processed": if not isinstance(current_stream, ProcessedPictureSource):
break break
current_id = current_stream.source_stream_id current_id = current_stream.source_stream_id
return False return False
def get_all_streams(self) -> List[PictureStream]: def get_all_streams(self) -> List[PictureSource]:
"""Get all picture streams.""" """Get all picture sources."""
return list(self._streams.values()) return list(self._streams.values())
def get_stream(self, stream_id: str) -> PictureStream: def get_stream(self, stream_id: str) -> PictureSource:
"""Get stream by ID. """Get stream by ID.
Raises: Raises:
ValueError: If stream not found ValueError: If stream not found
""" """
if stream_id not in self._streams: if stream_id not in self._streams:
raise ValueError(f"Picture stream not found: {stream_id}") raise ValueError(f"Picture source not found: {stream_id}")
return self._streams[stream_id] return self._streams[stream_id]
def create_stream( def create_stream(
@@ -136,8 +141,8 @@ class PictureStreamStore:
postprocessing_template_id: Optional[str] = None, postprocessing_template_id: Optional[str] = None,
image_source: Optional[str] = None, image_source: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
) -> PictureStream: ) -> PictureSource:
"""Create a new picture stream. """Create a new picture source.
Args: Args:
name: Stream name name: Stream name
@@ -181,30 +186,40 @@ class PictureStreamStore:
# Check for duplicate name # Check for duplicate name
for stream in self._streams.values(): for stream in self._streams.values():
if stream.name == name: if stream.name == name:
raise ValueError(f"Picture stream with name '{name}' already exists") raise ValueError(f"Picture source with name '{name}' already exists")
stream_id = f"ps_{uuid.uuid4().hex[:8]}" stream_id = f"ps_{uuid.uuid4().hex[:8]}"
now = datetime.utcnow() now = datetime.utcnow()
stream = PictureStream( common = dict(
id=stream_id, id=stream_id, name=name, stream_type=stream_type,
name=name, created_at=now, updated_at=now, description=description,
stream_type=stream_type,
display_index=display_index,
capture_template_id=capture_template_id,
target_fps=target_fps,
source_stream_id=source_stream_id,
postprocessing_template_id=postprocessing_template_id,
image_source=image_source,
created_at=now,
updated_at=now,
description=description,
) )
stream: PictureSource
if stream_type == "raw":
stream = ScreenCapturePictureSource(
**common,
display_index=display_index, # type: ignore[arg-type]
capture_template_id=capture_template_id, # type: ignore[arg-type]
target_fps=target_fps, # type: ignore[arg-type]
)
elif stream_type == "processed":
stream = ProcessedPictureSource(
**common,
source_stream_id=source_stream_id, # type: ignore[arg-type]
postprocessing_template_id=postprocessing_template_id, # type: ignore[arg-type]
)
else:
stream = StaticImagePictureSource(
**common,
image_source=image_source, # type: ignore[arg-type]
)
self._streams[stream_id] = stream self._streams[stream_id] = stream
self._save() self._save()
logger.info(f"Created picture stream: {name} ({stream_id}, type={stream_type})") logger.info(f"Created picture source: {name} ({stream_id}, type={stream_type})")
return stream return stream
def update_stream( def update_stream(
@@ -218,19 +233,19 @@ class PictureStreamStore:
postprocessing_template_id: Optional[str] = None, postprocessing_template_id: Optional[str] = None,
image_source: Optional[str] = None, image_source: Optional[str] = None,
description: Optional[str] = None, description: Optional[str] = None,
) -> PictureStream: ) -> PictureSource:
"""Update an existing picture stream. """Update an existing picture source.
Raises: Raises:
ValueError: If stream not found, validation fails, or cycle detected ValueError: If stream not found, validation fails, or cycle detected
""" """
if stream_id not in self._streams: if stream_id not in self._streams:
raise ValueError(f"Picture stream not found: {stream_id}") raise ValueError(f"Picture source not found: {stream_id}")
stream = self._streams[stream_id] stream = self._streams[stream_id]
# If changing source_stream_id on a processed stream, check for cycles # If changing source_stream_id on a processed stream, check for cycles
if source_stream_id is not None and stream.stream_type == "processed": if source_stream_id is not None and isinstance(stream, ProcessedPictureSource):
if source_stream_id not in self._streams: if source_stream_id not in self._streams:
raise ValueError(f"Source stream not found: {source_stream_id}") raise ValueError(f"Source stream not found: {source_stream_id}")
if self._detect_cycle(source_stream_id, exclude_stream_id=stream_id): if self._detect_cycle(source_stream_id, exclude_stream_id=stream_id):
@@ -238,40 +253,44 @@ class PictureStreamStore:
if name is not None: if name is not None:
stream.name = name stream.name = name
if display_index is not None:
stream.display_index = display_index
if capture_template_id is not None:
stream.capture_template_id = capture_template_id
if target_fps is not None:
stream.target_fps = target_fps
if source_stream_id is not None:
stream.source_stream_id = source_stream_id
if postprocessing_template_id is not None:
stream.postprocessing_template_id = postprocessing_template_id
if image_source is not None:
stream.image_source = image_source
if description is not None: if description is not None:
stream.description = description stream.description = description
if isinstance(stream, ScreenCapturePictureSource):
if display_index is not None:
stream.display_index = display_index
if capture_template_id is not None:
stream.capture_template_id = capture_template_id
if target_fps is not None:
stream.target_fps = target_fps
elif isinstance(stream, ProcessedPictureSource):
if source_stream_id is not None:
stream.source_stream_id = source_stream_id
if postprocessing_template_id is not None:
stream.postprocessing_template_id = postprocessing_template_id
elif isinstance(stream, StaticImagePictureSource):
if image_source is not None:
stream.image_source = image_source
stream.updated_at = datetime.utcnow() stream.updated_at = datetime.utcnow()
self._save() self._save()
logger.info(f"Updated picture stream: {stream_id}") logger.info(f"Updated picture source: {stream_id}")
return stream return stream
def delete_stream(self, stream_id: str) -> None: def delete_stream(self, stream_id: str) -> None:
"""Delete a picture stream. """Delete a picture source.
Raises: Raises:
ValueError: If stream not found or is referenced by another stream ValueError: If stream not found or is referenced by another stream
""" """
if stream_id not in self._streams: if stream_id not in self._streams:
raise ValueError(f"Picture stream not found: {stream_id}") raise ValueError(f"Picture source not found: {stream_id}")
# Check if any other stream references this one as source # Check if any other stream references this one as source
for other_stream in self._streams.values(): for other_stream in self._streams.values():
if other_stream.source_stream_id == stream_id: if isinstance(other_stream, ProcessedPictureSource) and other_stream.source_stream_id == stream_id:
raise ValueError( raise ValueError(
f"Cannot delete stream '{self._streams[stream_id].name}': " f"Cannot delete stream '{self._streams[stream_id].name}': "
f"it is referenced by stream '{other_stream.name}'" f"it is referenced by stream '{other_stream.name}'"
@@ -280,7 +299,7 @@ class PictureStreamStore:
del self._streams[stream_id] del self._streams[stream_id]
self._save() self._save()
logger.info(f"Deleted picture stream: {stream_id}") logger.info(f"Deleted picture source: {stream_id}")
def is_referenced_by_device(self, stream_id: str, device_store) -> bool: def is_referenced_by_device(self, stream_id: str, device_store) -> bool:
"""Check if this stream is referenced by any device. """Check if this stream is referenced by any device.
@@ -293,7 +312,7 @@ class PictureStreamStore:
True if any device references this stream True if any device references this stream
""" """
for device in device_store.get_all_devices(): for device in device_store.get_all_devices():
if getattr(device, "picture_stream_id", None) == stream_id: if getattr(device, "picture_source_id", None) == stream_id:
return True return True
return False return False
@@ -308,7 +327,7 @@ class PictureStreamStore:
Returns: Returns:
Dict with: Dict with:
- raw_stream: The terminal PictureStream (raw or static_image) - raw_stream: The terminal PictureSource (raw or static_image)
- postprocessing_template_ids: List of PP template IDs (in chain order) - postprocessing_template_ids: List of PP template IDs (in chain order)
Raises: Raises:
@@ -325,7 +344,7 @@ class PictureStreamStore:
stream = self.get_stream(current_id) stream = self.get_stream(current_id)
if stream.stream_type != "processed": if not isinstance(stream, ProcessedPictureSource):
return { return {
"raw_stream": stream, "raw_stream": stream,
"postprocessing_template_ids": postprocessing_template_ids, "postprocessing_template_ids": postprocessing_template_ids,

View File

@@ -1,75 +0,0 @@
"""Picture stream data model."""
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
@dataclass
class PictureStream:
"""Represents a picture stream configuration.
A picture stream is either:
- "raw": captures from a display using a capture engine template at a target FPS
- "processed": applies postprocessing to another picture stream
- "static_image": returns a static frame from a URL or local file path
"""
id: str
name: str
stream_type: str # "raw", "processed", or "static_image"
created_at: datetime
updated_at: datetime
# Raw stream fields (used when stream_type == "raw")
display_index: Optional[int] = None
capture_template_id: Optional[str] = None
target_fps: Optional[int] = None
# Processed stream fields (used when stream_type == "processed")
source_stream_id: Optional[str] = None
postprocessing_template_id: Optional[str] = None
# Static image fields (used when stream_type == "static_image")
image_source: Optional[str] = None
description: Optional[str] = None
def to_dict(self) -> dict:
"""Convert stream to dictionary."""
return {
"id": self.id,
"name": self.name,
"stream_type": self.stream_type,
"display_index": self.display_index,
"capture_template_id": self.capture_template_id,
"target_fps": self.target_fps,
"source_stream_id": self.source_stream_id,
"postprocessing_template_id": self.postprocessing_template_id,
"image_source": self.image_source,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(),
"description": self.description,
}
@classmethod
def from_dict(cls, data: dict) -> "PictureStream":
"""Create stream from dictionary."""
return cls(
id=data["id"],
name=data["name"],
stream_type=data["stream_type"],
display_index=data.get("display_index"),
capture_template_id=data.get("capture_template_id"),
target_fps=data.get("target_fps"),
source_stream_id=data.get("source_stream_id"),
postprocessing_template_id=data.get("postprocessing_template_id"),
image_source=data.get("image_source"),
created_at=datetime.fromisoformat(data["created_at"])
if isinstance(data.get("created_at"), str)
else data.get("created_at", datetime.utcnow()),
updated_at=datetime.fromisoformat(data["updated_at"])
if isinstance(data.get("updated_at"), str)
else data.get("updated_at", datetime.utcnow()),
description=data.get("description"),
)

View File

@@ -31,25 +31,8 @@ class PostprocessingTemplate:
@classmethod @classmethod
def from_dict(cls, data: dict) -> "PostprocessingTemplate": def from_dict(cls, data: dict) -> "PostprocessingTemplate":
"""Create template from dictionary. """Create template from dictionary."""
filters = [FilterInstance.from_dict(f) for f in data.get("filters", [])]
Supports migration from legacy flat-field format (gamma/saturation/brightness)
to the new filters list format.
"""
if "filters" in data:
filters = [FilterInstance.from_dict(f) for f in data["filters"]]
else:
# Legacy migration: construct filters from flat fields
filters = []
brightness = data.get("brightness", 1.0)
if brightness != 1.0:
filters.append(FilterInstance("brightness", {"value": brightness}))
saturation = data.get("saturation", 1.0)
if saturation != 1.0:
filters.append(FilterInstance("saturation", {"value": saturation}))
gamma = data.get("gamma", 2.2)
if gamma != 2.2:
filters.append(FilterInstance("gamma", {"value": gamma}))
return cls( return cls(
id=data["id"], id=data["id"],

View File

@@ -8,6 +8,7 @@ from typing import Dict, List, Optional
from wled_controller.core.filters.filter_instance import FilterInstance from wled_controller.core.filters.filter_instance import FilterInstance
from wled_controller.core.filters.registry import FilterRegistry from wled_controller.core.filters.registry import FilterRegistry
from wled_controller.storage.picture_source import ProcessedPictureSource
from wled_controller.storage.postprocessing_template import PostprocessingTemplate from wled_controller.storage.postprocessing_template import PostprocessingTemplate
from wled_controller.utils import get_logger from wled_controller.utils import get_logger
@@ -209,7 +210,7 @@ class PostprocessingTemplateStore:
"""Delete a postprocessing template. """Delete a postprocessing template.
Raises: Raises:
ValueError: If template not found or is referenced by a picture stream ValueError: If template not found or is referenced by a picture source
""" """
if template_id not in self._templates: if template_id not in self._templates:
raise ValueError(f"Postprocessing template not found: {template_id}") raise ValueError(f"Postprocessing template not found: {template_id}")
@@ -219,17 +220,17 @@ class PostprocessingTemplateStore:
logger.info(f"Deleted postprocessing template: {template_id}") logger.info(f"Deleted postprocessing template: {template_id}")
def is_referenced_by(self, template_id: str, picture_stream_store) -> bool: def is_referenced_by(self, template_id: str, picture_source_store) -> bool:
"""Check if this template is referenced by any picture stream. """Check if this template is referenced by any picture source.
Args: Args:
template_id: Template ID to check template_id: Template ID to check
picture_stream_store: PictureStreamStore instance picture_source_store: PictureSourceStore instance
Returns: Returns:
True if any picture stream references this template True if any picture source references this template
""" """
for stream in picture_stream_store.get_all_streams(): for stream in picture_source_store.get_all_streams():
if stream.postprocessing_template_id == template_id: if isinstance(stream, ProcessedPictureSource) and stream.postprocessing_template_id == template_id:
return True return True
return False return False