Refactor capture engine architecture, rename PictureStream to PictureSource, and split API modules

- Separate CaptureEngine into stateless factory + stateful CaptureStream session
- Add LiveStream/LiveStreamManager for shared capture with reference counting
- Rename PictureStream to PictureSource across storage, API, and UI
- Remove legacy migration logic and unused compatibility code
- Split monolithic routes.py (1935 lines) into 5 focused route modules
- Split schemas.py (480 lines) into 7 schema modules with re-exports
- Extract dependency injection into dedicated dependencies.py

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-12 14:27:00 +03:00
parent b8389f080a
commit c3828e10fa
42 changed files with 4047 additions and 3797 deletions

View File

@@ -349,9 +349,6 @@ def create_default_calibration(led_count: int) -> CalibrationConfig:
def calibration_from_dict(data: dict) -> CalibrationConfig:
"""Create calibration configuration from dictionary.
Supports both new format (leds_top/right/bottom/left) and legacy format
(segments list) for backward compatibility.
Args:
data: Dictionary with calibration data

View File

@@ -2,14 +2,15 @@
from wled_controller.core.capture_engines.base import (
CaptureEngine,
CaptureStream,
DisplayInfo,
ScreenCapture,
)
from wled_controller.core.capture_engines.factory import EngineRegistry
from wled_controller.core.capture_engines.mss_engine import MSSEngine
from wled_controller.core.capture_engines.dxcam_engine import DXcamEngine
from wled_controller.core.capture_engines.bettercam_engine import BetterCamEngine
from wled_controller.core.capture_engines.wgc_engine import WGCEngine
from wled_controller.core.capture_engines.mss_engine import MSSEngine, MSSCaptureStream
from wled_controller.core.capture_engines.dxcam_engine import DXcamEngine, DXcamCaptureStream
from wled_controller.core.capture_engines.bettercam_engine import BetterCamEngine, BetterCamCaptureStream
from wled_controller.core.capture_engines.wgc_engine import WGCEngine, WGCCaptureStream
# Auto-register available engines
EngineRegistry.register(MSSEngine)
@@ -19,11 +20,16 @@ EngineRegistry.register(WGCEngine)
__all__ = [
"CaptureEngine",
"CaptureStream",
"DisplayInfo",
"ScreenCapture",
"EngineRegistry",
"MSSEngine",
"MSSCaptureStream",
"DXcamEngine",
"DXcamCaptureStream",
"BetterCamEngine",
"BetterCamCaptureStream",
"WGCEngine",
"WGCCaptureStream",
]

View File

@@ -31,31 +31,33 @@ class ScreenCapture:
display_index: int
class CaptureEngine(ABC):
"""Abstract base class for screen capture engines.
class CaptureStream(ABC):
"""Abstract base class for a display capture session.
All screen capture engines must implement this interface to be
compatible with the WLED Grab system.
A CaptureStream is a stateful session bound to a specific display.
It holds all display-specific resources and provides frame capture.
Created by CaptureEngine.create_stream().
Lifecycle:
stream = engine.create_stream(display_index, config)
stream.initialize()
frame = stream.capture_frame()
stream.cleanup()
Or via context manager:
with engine.create_stream(display_index, config) as stream:
frame = stream.capture_frame()
"""
ENGINE_TYPE: str = "base" # Override in subclasses
ENGINE_PRIORITY: int = 0 # Higher = preferred. Override in subclasses.
def __init__(self, config: Dict[str, Any]):
"""Initialize engine with configuration.
Args:
config: Engine-specific configuration dict
"""
def __init__(self, display_index: int, config: Dict[str, Any]):
self.display_index = display_index
self.config = config
self._initialized = False
@abstractmethod
def initialize(self) -> None:
"""Initialize the capture engine.
This method should prepare any resources needed for screen capture
(e.g., creating capture objects, allocating buffers).
"""Initialize capture resources for this display.
Raises:
RuntimeError: If initialization fails
@@ -64,17 +66,64 @@ class CaptureEngine(ABC):
@abstractmethod
def cleanup(self) -> None:
"""Cleanup engine resources.
This method should release any resources allocated during
initialization or capture operations.
"""
"""Release all capture resources for this display."""
pass
@abstractmethod
def get_available_displays(self) -> List[DisplayInfo]:
def capture_frame(self) -> Optional[ScreenCapture]:
"""Capture one frame from the bound display.
Returns:
ScreenCapture with image data (RGB), or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
pass
def __enter__(self):
"""Context manager entry - initialize stream."""
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - cleanup stream."""
self.cleanup()
class CaptureEngine(ABC):
"""Abstract base class for screen capture engines.
A CaptureEngine is a stateless factory that knows about a capture
technology. It can enumerate displays, check availability, and create
CaptureStream instances for specific displays.
All methods are classmethods — no instance creation needed.
"""
ENGINE_TYPE: str = "base"
ENGINE_PRIORITY: int = 0
@classmethod
@abstractmethod
def is_available(cls) -> bool:
"""Check if this engine is available on current system."""
pass
@classmethod
@abstractmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default configuration for this engine."""
pass
@classmethod
@abstractmethod
def get_available_displays(cls) -> List[DisplayInfo]:
"""Get list of available displays.
This method works without prior initialization — implementations
create temporary resources as needed.
Returns:
List of DisplayInfo objects describing available displays
@@ -83,61 +132,17 @@ class CaptureEngine(ABC):
"""
pass
@classmethod
@abstractmethod
def capture_display(self, display_index: int) -> Optional[ScreenCapture]:
"""Capture the specified display.
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> CaptureStream:
"""Create a capture stream for the specified display.
Args:
display_index: Index of display to capture (0-based)
config: Engine-specific configuration dict
Returns:
ScreenCapture object with image data as numpy array (RGB format),
or None if no new frame is available (screen unchanged).
Raises:
ValueError: If display_index is invalid
RuntimeError: If capture fails
Uninitialized CaptureStream. Caller must call initialize()
or use as context manager.
"""
pass
@classmethod
@abstractmethod
def is_available(cls) -> bool:
"""Check if this engine is available on current system.
Returns:
True if engine can be used on this platform
Examples:
>>> MSSEngine.is_available()
True # MSS is available on all platforms
>>> DXcamEngine.is_available()
True # On Windows 8.1+
False # On Linux/macOS
"""
pass
@classmethod
@abstractmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default configuration for this engine.
Returns:
Default config dict with engine-specific options
Examples:
>>> MSSEngine.get_default_config()
{}
>>> DXcamEngine.get_default_config()
{'device_idx': 0, 'output_color': 'RGB', 'max_buffer_len': 64}
"""
pass
def __enter__(self):
"""Context manager entry - initialize engine."""
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - cleanup engine."""
self.cleanup()

View File

@@ -7,6 +7,7 @@ import numpy as np
from wled_controller.core.capture_engines.base import (
CaptureEngine,
CaptureStream,
DisplayInfo,
ScreenCapture,
)
@@ -15,32 +16,15 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__)
class BetterCamEngine(CaptureEngine):
"""BetterCam-based screen capture engine.
class BetterCamCaptureStream(CaptureStream):
"""BetterCam capture stream for a specific display."""
Uses the bettercam library (a high-performance fork of DXCam) which leverages
DXGI Desktop Duplication API for ultra-fast screen capture on Windows.
Offers better performance than DXCam with multi-GPU support.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "bettercam"
ENGINE_PRIORITY = 4
def __init__(self, config: Dict[str, Any]):
"""Initialize BetterCam engine."""
super().__init__(config)
def __init__(self, display_index: int, config: Dict[str, Any]):
super().__init__(display_index, config)
self._camera = None
self._bettercam = None
self._current_output = None
def initialize(self) -> None:
"""Initialize BetterCam capture.
Raises:
RuntimeError: If bettercam not installed or initialization fails
"""
try:
import bettercam
self._bettercam = bettercam
@@ -49,49 +33,24 @@ class BetterCamEngine(CaptureEngine):
"BetterCam not installed. Install with: pip install bettercam"
)
self._initialized = True
logger.info("BetterCam engine initialized")
def _ensure_camera(self, display_index: int) -> None:
"""Ensure camera is created for the requested display.
Creates or recreates the BetterCam camera if needed.
"""
if self._camera and self._current_output == display_index:
return
# Stop and release existing camera
if self._camera:
try:
if self._camera.is_capturing:
self._camera.stop()
except Exception:
pass
try:
self._camera.release()
except Exception:
pass
self._camera = None
# Clear global camera cache to avoid stale DXGI state
# Clear global camera cache for fresh DXGI state
try:
self._bettercam.__factory.clean_up()
except Exception:
pass
self._camera = self._bettercam.create(
output_idx=display_index,
output_idx=self.display_index,
output_color="RGB",
)
if not self._camera:
raise RuntimeError(f"Failed to create BetterCam camera for display {display_index}")
raise RuntimeError(f"Failed to create BetterCam camera for display {self.display_index}")
self._current_output = display_index
logger.info(f"BetterCam camera created (output={display_index})")
self._initialized = True
logger.info(f"BetterCam capture stream initialized (display={self.display_index})")
def cleanup(self) -> None:
"""Cleanup BetterCam resources."""
if self._camera:
try:
if self._camera.is_capturing:
@@ -104,94 +63,27 @@ class BetterCamEngine(CaptureEngine):
logger.error(f"Error releasing BetterCam camera: {e}")
self._camera = None
# Clear global cache so next create() gets fresh DXGI state
if self._bettercam:
try:
self._bettercam.__factory.clean_up()
except Exception:
pass
self._current_output = None
self._initialized = False
logger.info("BetterCam engine cleaned up")
logger.info(f"BetterCam capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]:
"""Get list of available displays using BetterCam.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If not initialized or detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
displays = []
output_idx = self._current_output or 0
if self._camera and hasattr(self._camera, "width") and hasattr(self._camera, "height"):
display_info = DisplayInfo(
index=output_idx,
name=f"BetterCam Display {output_idx}",
width=self._camera.width,
height=self._camera.height,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
else:
display_info = DisplayInfo(
index=output_idx,
name=f"BetterCam Display {output_idx}",
width=1920,
height=1080,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
logger.debug(f"BetterCam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with BetterCam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> Optional[ScreenCapture]:
"""Capture display using BetterCam.
Args:
display_index: Index of display to capture (0-based).
Returns:
ScreenCapture object with image data, or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
def capture_frame(self) -> Optional[ScreenCapture]:
if not self._initialized:
self.initialize()
# Ensure camera is ready for the requested display
self._ensure_camera(display_index)
try:
# grab() uses AcquireNextFrame with timeout=0 (non-blocking).
# Returns None if screen content hasn't changed since last grab.
frame = self._camera.grab()
if frame is None:
return None
logger.debug(
f"BetterCam captured display {display_index}: "
f"BetterCam captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}"
)
@@ -199,27 +91,32 @@ class BetterCamEngine(CaptureEngine):
image=frame,
width=frame.shape[1],
height=frame.shape[0],
display_index=display_index,
display_index=self.display_index,
)
except ValueError:
raise
except Exception as e:
logger.error(f"Failed to capture display {display_index} with BetterCam: {e}")
logger.error(f"Failed to capture display {self.display_index} with BetterCam: {e}")
raise RuntimeError(f"Screen capture failed: {e}")
class BetterCamEngine(CaptureEngine):
"""BetterCam-based screen capture engine.
Uses the bettercam library (a high-performance fork of DXCam) which leverages
DXGI Desktop Duplication API for ultra-fast screen capture on Windows.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "bettercam"
ENGINE_PRIORITY = 4
@classmethod
def is_available(cls) -> bool:
"""Check if BetterCam is available.
BetterCam requires Windows 8.1+ and the bettercam package.
Returns:
True if bettercam is available on this system
"""
if sys.platform != "win32":
return False
try:
import bettercam
return True
@@ -228,9 +125,34 @@ class BetterCamEngine(CaptureEngine):
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default BetterCam configuration.
Returns:
Default config dict with BetterCam options
"""
return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
displays = []
with mss.mss() as sct:
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Display {i}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"BetterCam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with BetterCam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> BetterCamCaptureStream:
return BetterCamCaptureStream(display_index, config)

View File

@@ -7,6 +7,7 @@ import numpy as np
from wled_controller.core.capture_engines.base import (
CaptureEngine,
CaptureStream,
DisplayInfo,
ScreenCapture,
)
@@ -15,32 +16,15 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__)
class DXcamEngine(CaptureEngine):
"""DXcam-based screen capture engine.
class DXcamCaptureStream(CaptureStream):
"""DXcam capture stream for a specific display."""
Uses the dxcam library which leverages DXGI Desktop Duplication API for
ultra-fast screen capture on Windows. Offers significantly better performance
than MSS and eliminates cursor flickering.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "dxcam"
ENGINE_PRIORITY = 3
def __init__(self, config: Dict[str, Any]):
"""Initialize DXcam engine."""
super().__init__(config)
def __init__(self, display_index: int, config: Dict[str, Any]):
super().__init__(display_index, config)
self._camera = None
self._dxcam = None
self._current_output = None
def initialize(self) -> None:
"""Initialize DXcam capture.
Raises:
RuntimeError: If DXcam not installed or initialization fails
"""
try:
import dxcam
self._dxcam = dxcam
@@ -49,51 +33,24 @@ class DXcamEngine(CaptureEngine):
"DXcam not installed. Install with: pip install dxcam"
)
self._initialized = True
logger.info("DXcam engine initialized")
def _ensure_camera(self, display_index: int) -> None:
"""Ensure camera is created for the requested display.
Creates or recreates the DXcam camera if needed.
DXcam caches cameras globally per (device, output). We clear the
cache before creating to avoid stale DXGI state from prior requests.
"""
if self._camera and self._current_output == display_index:
return
# Stop and release existing camera
if self._camera:
try:
if self._camera.is_capturing:
self._camera.stop()
except Exception:
pass
try:
self._camera.release()
except Exception:
pass
self._camera = None
# Clear dxcam's global camera cache to avoid stale DXGI state
# Clear global camera cache for fresh DXGI state
try:
self._dxcam.__factory.clean_up()
except Exception:
pass
self._camera = self._dxcam.create(
output_idx=display_index,
output_idx=self.display_index,
output_color="RGB",
)
if not self._camera:
raise RuntimeError(f"Failed to create DXcam camera for display {display_index}")
raise RuntimeError(f"Failed to create DXcam camera for display {self.display_index}")
self._current_output = display_index
logger.info(f"DXcam camera created (output={display_index})")
self._initialized = True
logger.info(f"DXcam capture stream initialized (display={self.display_index})")
def cleanup(self) -> None:
"""Cleanup DXcam resources."""
if self._camera:
try:
if self._camera.is_capturing:
@@ -106,104 +63,27 @@ class DXcamEngine(CaptureEngine):
logger.error(f"Error releasing DXcam camera: {e}")
self._camera = None
# Clear dxcam's global cache so next create() gets fresh DXGI state
if self._dxcam:
try:
self._dxcam.__factory.clean_up()
except Exception:
pass
self._current_output = None
self._initialized = False
logger.info("DXcam engine cleaned up")
logger.info(f"DXcam capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]:
"""Get list of available displays using DXcam.
Note: DXcam provides limited display enumeration. This method
returns basic information for the configured output.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If not initialized or detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
displays = []
# Get output information from DXcam
# Note: DXcam doesn't provide comprehensive display enumeration
# We report the single configured output
output_idx = self._current_output or 0
# DXcam camera has basic output info
if self._camera and hasattr(self._camera, "width") and hasattr(self._camera, "height"):
display_info = DisplayInfo(
index=output_idx,
name=f"DXcam Display {output_idx}",
width=self._camera.width,
height=self._camera.height,
x=0, # DXcam doesn't provide position info
y=0,
is_primary=(output_idx == 0),
refresh_rate=60, # DXcam doesn't report refresh rate
)
displays.append(display_info)
else:
# Fallback if camera doesn't have dimensions
display_info = DisplayInfo(
index=output_idx,
name=f"DXcam Display {output_idx}",
width=1920, # Reasonable default
height=1080,
x=0,
y=0,
is_primary=(output_idx == 0),
refresh_rate=60,
)
displays.append(display_info)
logger.debug(f"DXcam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with DXcam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> Optional[ScreenCapture]:
"""Capture display using DXcam.
Args:
display_index: Index of display to capture (0-based).
Returns:
ScreenCapture object with image data, or None if screen unchanged.
Raises:
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
def capture_frame(self) -> Optional[ScreenCapture]:
if not self._initialized:
self.initialize()
# Ensure camera is ready for the requested display
self._ensure_camera(display_index)
try:
# grab() uses AcquireNextFrame with timeout=0 (non-blocking).
# Returns None if screen content hasn't changed since last grab.
frame = self._camera.grab()
if frame is None:
return None
# DXcam returns numpy array directly in configured color format
logger.debug(
f"DXcam captured display {display_index}: "
f"DXcam captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}"
)
@@ -211,29 +91,32 @@ class DXcamEngine(CaptureEngine):
image=frame,
width=frame.shape[1],
height=frame.shape[0],
display_index=display_index,
display_index=self.display_index,
)
except ValueError:
raise
except Exception as e:
logger.error(f"Failed to capture display {display_index} with DXcam: {e}")
logger.error(f"Failed to capture display {self.display_index} with DXcam: {e}")
raise RuntimeError(f"Screen capture failed: {e}")
class DXcamEngine(CaptureEngine):
"""DXcam-based screen capture engine.
Uses the dxcam library which leverages DXGI Desktop Duplication API for
ultra-fast screen capture on Windows.
Requires: Windows 8.1+
"""
ENGINE_TYPE = "dxcam"
ENGINE_PRIORITY = 3
@classmethod
def is_available(cls) -> bool:
"""Check if DXcam is available.
DXcam requires Windows 8.1+ and the dxcam package.
Returns:
True if dxcam is available on this system
"""
# Check platform
if sys.platform != "win32":
return False
# Check if dxcam is installed
try:
import dxcam
return True
@@ -242,9 +125,34 @@ class DXcamEngine(CaptureEngine):
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default DXcam configuration.
Returns:
Default config dict with DXcam options
"""
return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
displays = []
with mss.mss() as sct:
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Display {i}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"DXcam detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with DXcam: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> DXcamCaptureStream:
return DXcamCaptureStream(display_index, config)

View File

@@ -2,7 +2,7 @@
from typing import Any, Dict, List, Optional, Type
from wled_controller.core.capture_engines.base import CaptureEngine
from wled_controller.core.capture_engines.base import CaptureEngine, CaptureStream
from wled_controller.utils import get_logger
logger = get_logger(__name__)
@@ -11,8 +11,8 @@ logger = get_logger(__name__)
class EngineRegistry:
"""Registry for available capture engines.
This class maintains a registry of all capture engine implementations
and provides factory methods for creating engine instances.
Maintains a registry of all capture engine implementations
and provides factory methods for creating capture streams.
"""
_engines: Dict[str, Type[CaptureEngine]] = {}
@@ -26,7 +26,6 @@ class EngineRegistry:
Raises:
ValueError: If engine_class is not a subclass of CaptureEngine
ValueError: If an engine with the same ENGINE_TYPE is already registered
"""
if not issubclass(engine_class, CaptureEngine):
raise ValueError(f"{engine_class} must be a subclass of CaptureEngine")
@@ -66,12 +65,7 @@ class EngineRegistry:
"""Get list of available engine types on this system.
Returns:
List of engine type identifiers that are available on the current platform
Examples:
>>> EngineRegistry.get_available_engines()
['mss'] # On Linux
['mss', 'dxcam', 'wgc'] # On Windows 10+
List of engine type identifiers that are available
"""
available = []
for engine_type, engine_class in cls._engines.items():
@@ -115,19 +109,27 @@ class EngineRegistry:
return cls._engines.copy()
@classmethod
def create_engine(cls, engine_type: str, config: Dict[str, Any]) -> CaptureEngine:
"""Create engine instance with configuration.
def create_stream(
cls,
engine_type: str,
display_index: int,
config: Dict[str, Any],
) -> CaptureStream:
"""Create a CaptureStream for the specified engine and display.
Looks up the engine class, validates availability, and creates
an uninitialized CaptureStream for the specified display.
Args:
engine_type: Engine type identifier
display_index: Display index for the stream
config: Engine-specific configuration
Returns:
Initialized engine instance
Uninitialized CaptureStream instance
Raises:
ValueError: If engine type not found or not available
RuntimeError: If engine initialization fails
"""
engine_class = cls.get_engine(engine_type)
@@ -137,18 +139,15 @@ class EngineRegistry:
)
try:
engine = engine_class(config)
logger.debug(f"Created engine instance: {engine_type}")
return engine
stream = engine_class.create_stream(display_index, config)
logger.debug(f"Created capture stream: {engine_type} (display={display_index})")
return stream
except Exception as e:
logger.error(f"Failed to create engine '{engine_type}': {e}")
raise RuntimeError(f"Failed to create engine '{engine_type}': {e}")
logger.error(f"Failed to create stream for engine '{engine_type}': {e}")
raise RuntimeError(f"Failed to create stream for engine '{engine_type}': {e}")
@classmethod
def clear_registry(cls):
"""Clear all registered engines.
This is primarily useful for testing.
"""
"""Clear all registered engines (for testing)."""
cls._engines.clear()
logger.debug("Cleared engine registry")

View File

@@ -1,6 +1,6 @@
"""MSS-based screen capture engine (cross-platform)."""
from typing import Any, Dict, List
from typing import Any, Dict, List, Optional
import mss
import numpy as np
@@ -8,6 +8,7 @@ from PIL import Image
from wled_controller.core.capture_engines.base import (
CaptureEngine,
CaptureStream,
DisplayInfo,
ScreenCapture,
)
@@ -16,126 +17,43 @@ from wled_controller.utils import get_logger, get_monitor_names, get_monitor_ref
logger = get_logger(__name__)
class MSSEngine(CaptureEngine):
"""MSS-based screen capture engine.
class MSSCaptureStream(CaptureStream):
"""MSS capture stream for a specific display."""
Uses the mss library for cross-platform screen capture support.
Works on Windows, macOS, and Linux.
Note: May experience cursor flickering on some systems.
"""
ENGINE_TYPE = "mss"
ENGINE_PRIORITY = 1
def __init__(self, config: Dict[str, Any]):
"""Initialize MSS engine.
Args:
config: Engine configuration (currently unused for MSS)
"""
super().__init__(config)
def __init__(self, display_index: int, config: Dict[str, Any]):
super().__init__(display_index, config)
self._sct = None
def initialize(self) -> None:
"""Initialize MSS capture context.
Raises:
RuntimeError: If MSS initialization fails
"""
try:
self._sct = mss.mss()
self._initialized = True
logger.info("MSS engine initialized")
logger.info(f"MSS capture stream initialized (display={self.display_index})")
except Exception as e:
raise RuntimeError(f"Failed to initialize MSS: {e}")
def cleanup(self) -> None:
"""Cleanup MSS resources."""
if self._sct:
self._sct.close()
self._sct = None
self._initialized = False
logger.info("MSS engine cleaned up")
logger.info(f"MSS capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]:
"""Get list of available displays using MSS.
Returns:
List of DisplayInfo objects for each available monitor
Raises:
RuntimeError: If not initialized or display detection fails
"""
if not self._initialized:
raise RuntimeError("Engine not initialized")
try:
# Get friendly monitor names (Windows only, falls back to generic names)
monitor_names = get_monitor_names()
# Get monitor refresh rates (Windows only, falls back to 60Hz)
refresh_rates = get_monitor_refresh_rates()
displays = []
# Skip the first monitor (combined virtual screen on multi-monitor setups)
for idx, monitor in enumerate(self._sct.monitors[1:], start=0):
# Use friendly name from WMI if available, otherwise generic name
friendly_name = monitor_names.get(idx, f"Display {idx}")
# Use detected refresh rate or default to 60Hz
refresh_rate = refresh_rates.get(idx, 60)
display_info = DisplayInfo(
index=idx,
name=friendly_name,
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(idx == 0),
refresh_rate=refresh_rate,
)
displays.append(display_info)
logger.debug(f"MSS detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with MSS: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> ScreenCapture:
"""Capture display using MSS.
Args:
display_index: Index of display to capture (0-based)
Returns:
ScreenCapture object with image data
Raises:
ValueError: If display_index is invalid
RuntimeError: If capture fails
"""
# Auto-initialize if not already initialized
def capture_frame(self) -> Optional[ScreenCapture]:
if not self._initialized:
self.initialize()
try:
# mss monitors[0] is the combined screen, monitors[1+] are individual displays
monitor_index = display_index + 1
monitor_index = self.display_index + 1
if monitor_index >= len(self._sct.monitors):
raise ValueError(
f"Invalid display index {display_index}. "
f"Invalid display index {self.display_index}. "
f"Available displays: 0-{len(self._sct.monitors) - 2}"
)
monitor = self._sct.monitors[monitor_index]
# Capture screenshot
screenshot = self._sct.grab(monitor)
# Convert to numpy array (RGB)
@@ -143,31 +61,35 @@ class MSSEngine(CaptureEngine):
img_array = np.array(img)
logger.debug(
f"MSS captured display {display_index}: {monitor['width']}x{monitor['height']}"
f"MSS captured display {self.display_index}: {monitor['width']}x{monitor['height']}"
)
return ScreenCapture(
image=img_array,
width=monitor["width"],
height=monitor["height"],
display_index=display_index,
display_index=self.display_index,
)
except ValueError:
raise
except Exception as e:
logger.error(f"Failed to capture display {display_index} with MSS: {e}")
logger.error(f"Failed to capture display {self.display_index} with MSS: {e}")
raise RuntimeError(f"Screen capture failed: {e}")
class MSSEngine(CaptureEngine):
"""MSS-based screen capture engine.
Uses the mss library for cross-platform screen capture support.
Works on Windows, macOS, and Linux.
"""
ENGINE_TYPE = "mss"
ENGINE_PRIORITY = 1
@classmethod
def is_available(cls) -> bool:
"""Check if MSS is available.
MSS is cross-platform and should always be available.
Returns:
True if mss library is available
"""
try:
import mss
return True
@@ -176,11 +98,38 @@ class MSSEngine(CaptureEngine):
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default MSS configuration.
MSS has no configurable options.
Returns:
Empty dict (MSS has no configuration)
"""
return {}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
monitor_names = get_monitor_names()
refresh_rates = get_monitor_refresh_rates()
displays = []
with mss.mss() as sct:
for idx, monitor in enumerate(sct.monitors[1:], start=0):
friendly_name = monitor_names.get(idx, f"Display {idx}")
refresh_rate = refresh_rates.get(idx, 60)
displays.append(DisplayInfo(
index=idx,
name=friendly_name,
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(idx == 0),
refresh_rate=refresh_rate,
))
logger.debug(f"MSS detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays with MSS: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> MSSCaptureStream:
return MSSCaptureStream(display_index, config)

View File

@@ -2,14 +2,14 @@
import gc
import sys
import time
import threading
from typing import Any, Dict, List
from typing import Any, Dict, List, Optional
import numpy as np
from wled_controller.core.capture_engines.base import (
CaptureEngine,
CaptureStream,
DisplayInfo,
ScreenCapture,
)
@@ -18,54 +18,20 @@ from wled_controller.utils import get_logger
logger = get_logger(__name__)
class WGCEngine(CaptureEngine):
"""Windows Graphics Capture engine.
class WGCCaptureStream(CaptureStream):
"""WGC capture stream for a specific display."""
Uses the windows-capture library which leverages Windows.Graphics.Capture API.
This is Microsoft's recommended modern screen capture API for Windows 10+.
Features:
- Cross-GPU support (works regardless of GPU routing)
- Hardware cursor exclusion (no cursor flickering)
- GPU-accelerated with direct texture sharing
- Modern, future-proof API
Requires: Windows 10 1803+
"""
ENGINE_TYPE = "wgc"
ENGINE_PRIORITY = 2
def __init__(self, config: Dict[str, Any]):
"""Initialize WGC engine.
Args:
config: Engine configuration
- capture_cursor (bool): Include cursor in capture (default: False)
- draw_border (bool): Draw border around capture (default: False)
Note: monitor_index is NOT in config - WGC maintains separate instances per monitor
to support simultaneous capture from multiple monitors.
"""
super().__init__(config)
def __init__(self, display_index: int, config: Dict[str, Any]):
super().__init__(display_index, config)
self._wgc = None
# Per-monitor capture instances: {monitor_index: (instance, control, frame, frame_event)}
self._monitor_captures = {}
self._capture_instance = None
self._capture_control = None
self._latest_frame = None
self._frame_event = threading.Event()
self._closed_event = threading.Event()
self._frame_lock = threading.Lock()
def initialize(self, monitor_index: int = 0) -> None:
"""Initialize WGC capture for a specific monitor.
Maintains separate capture instances per monitor to support simultaneous
capture from multiple monitors.
Args:
monitor_index: Monitor index to capture (0-based)
Raises:
RuntimeError: If windows-capture not installed or initialization fails
"""
# Import windows_capture if not already imported
def initialize(self) -> None:
if self._wgc is None:
try:
import windows_capture
@@ -75,249 +41,110 @@ class WGCEngine(CaptureEngine):
"windows-capture not installed. Install with: pip install windows-capture"
)
# Skip if already initialized for this monitor
if monitor_index in self._monitor_captures:
logger.debug(f"WGC already initialized for monitor {monitor_index}")
return
try:
capture_cursor = self.config.get("capture_cursor", False)
# Note: draw_border is not supported by WGC API on most platforms
# WGC uses 1-based monitor indexing (1, 2, 3...) while we use 0-based (0, 1, 2...)
wgc_monitor_index = monitor_index + 1
# WGC uses 1-based monitor indexing
wgc_monitor_index = self.display_index + 1
# Create per-monitor events and storage
frame_event = threading.Event()
closed_event = threading.Event()
latest_frame = None
# Create capture instance
# Note: draw_border parameter not supported on all platforms
capture_instance = self._wgc.WindowsCapture(
self._capture_instance = self._wgc.WindowsCapture(
cursor_capture=capture_cursor,
monitor_index=wgc_monitor_index,
)
# Define event handlers as local functions that capture monitor_index
def on_frame_arrived(frame, capture_control):
"""Called when a new frame is captured."""
nonlocal latest_frame
try:
logger.debug(f"WGC frame callback triggered for monitor {monitor_index}")
# Get frame buffer as numpy array
frame_buffer = frame.frame_buffer
width = frame.width
height = frame.height
# Reshape to image dimensions (height, width, channels)
# WGC provides BGRA format
# WGC provides BGRA format, convert to RGB
frame_array = frame_buffer.reshape((height, width, 4))
frame_rgb = frame_array[:, :, [2, 1, 0]]
# Convert BGRA to RGB
frame_rgb = frame_array[:, :, [2, 1, 0]] # Take BGR channels
# Store the latest frame for this monitor
with self._frame_lock:
if monitor_index in self._monitor_captures:
self._monitor_captures[monitor_index]['latest_frame'] = frame_rgb.copy()
self._monitor_captures[monitor_index]['frame_event'].set()
self._latest_frame = frame_rgb.copy()
self._frame_event.set()
except Exception as e:
logger.error(f"Error processing WGC frame for monitor {monitor_index}: {e}", exc_info=True)
logger.error(f"Error processing WGC frame: {e}", exc_info=True)
def on_closed():
"""Called when capture session is closed."""
logger.debug(f"WGC capture session closed for monitor {monitor_index}")
# Signal that the capture session has fully closed and resources are released
with self._frame_lock:
if monitor_index in self._monitor_captures:
self._monitor_captures[monitor_index]['closed_event'].set()
logger.debug(f"WGC capture session closed for display {self.display_index}")
self._closed_event.set()
# Set handlers directly as attributes
capture_instance.frame_handler = on_frame_arrived
capture_instance.closed_handler = on_closed
self._capture_instance.frame_handler = on_frame_arrived
self._capture_instance.closed_handler = on_closed
# Start capture using free-threaded mode (non-blocking)
# IMPORTANT: start_free_threaded() returns a CaptureControl object for cleanup
logger.debug(f"Starting WGC capture for monitor {monitor_index} (free-threaded mode)...")
capture_control = capture_instance.start_free_threaded()
logger.debug(f"Starting WGC capture for display {self.display_index} (free-threaded mode)...")
self._capture_control = self._capture_instance.start_free_threaded()
# Store all per-monitor data
self._monitor_captures[monitor_index] = {
'instance': capture_instance,
'control': capture_control,
'latest_frame': None,
'frame_event': frame_event,
'closed_event': closed_event,
}
# Wait for first frame
logger.debug(f"Waiting for first WGC frame from display {self.display_index}...")
frame_received = self._frame_event.wait(timeout=5.0)
# Wait for first frame to arrive (with timeout)
logger.debug(f"Waiting for first WGC frame from monitor {monitor_index}...")
frame_received = frame_event.wait(timeout=5.0)
if not frame_received or self._monitor_captures[monitor_index]['latest_frame'] is None:
# Cleanup on failure
with self._frame_lock:
if monitor_index in self._monitor_captures:
del self._monitor_captures[monitor_index]
if not frame_received or self._latest_frame is None:
self._cleanup_internal()
raise RuntimeError(
f"WGC capture started for monitor {monitor_index} but no frames received within 5 seconds. "
"This may indicate the capture session failed to start or "
"the display is not actively updating."
f"WGC capture started for display {self.display_index} but no frames received within 5 seconds."
)
self._initialized = True
logger.info(
f"WGC engine initialized (monitor={monitor_index}, "
f"WGC capture stream initialized (display={self.display_index}, "
f"cursor={capture_cursor})"
)
except RuntimeError:
raise
except Exception as e:
logger.error(f"Failed to initialize WGC for monitor {monitor_index}: {e}", exc_info=True)
raise RuntimeError(f"Failed to initialize WGC for monitor {monitor_index}: {e}")
logger.error(f"Failed to initialize WGC for display {self.display_index}: {e}", exc_info=True)
raise RuntimeError(f"Failed to initialize WGC for display {self.display_index}: {e}")
def _cleanup_internal(self) -> None:
"""Internal cleanup helper."""
if self._capture_control:
try:
logger.debug(f"Stopping WGC capture thread for display {self.display_index}...")
self._capture_control.stop()
self._capture_control.wait()
logger.debug(f"WGC capture thread finished for display {self.display_index}")
except Exception as e:
logger.error(f"Error during WGC capture control cleanup: {e}", exc_info=True)
self._capture_control = None
if self._capture_instance:
try:
del self._capture_instance
except Exception:
pass
self._capture_instance = None
self._frame_event.clear()
self._closed_event.clear()
self._latest_frame = None
def cleanup(self) -> None:
"""Cleanup WGC resources for all monitors."""
# Proper cleanup for free-threaded captures:
# 1. Stop capture via CaptureControl.stop() (signals thread to stop)
# 2. Wait for thread to finish using CaptureControl.wait() (blocks until done)
# 3. Delete capture instance (releases COM objects)
# 4. Force garbage collection (ensures COM cleanup)
with self._frame_lock:
monitors_to_cleanup = list(self._monitor_captures.keys())
for monitor_index in monitors_to_cleanup:
logger.debug(f"Cleaning up WGC resources for monitor {monitor_index}...")
with self._frame_lock:
if monitor_index not in self._monitor_captures:
continue
monitor_data = self._monitor_captures[monitor_index]
# Stop and wait for capture thread
capture_control = monitor_data.get('control')
if capture_control:
try:
logger.debug(f"Stopping WGC capture thread for monitor {monitor_index}...")
capture_control.stop()
logger.debug(f"Waiting for WGC capture thread to finish (monitor {monitor_index})...")
# This will block until the capture thread actually finishes
capture_control.wait()
logger.debug(f"WGC capture thread finished successfully for monitor {monitor_index}")
except Exception as e:
logger.error(f"Error during WGC capture control cleanup for monitor {monitor_index}: {e}", exc_info=True)
# Delete capture instance
capture_instance = monitor_data.get('instance')
if capture_instance:
try:
logger.debug(f"Deleting WGC capture instance for monitor {monitor_index}...")
del capture_instance
logger.debug(f"WGC capture instance deleted for monitor {monitor_index}")
except Exception as e:
logger.error(f"Error deleting WGC capture instance for monitor {monitor_index}: {e}", exc_info=True)
# Clear events
frame_event = monitor_data.get('frame_event')
if frame_event:
frame_event.clear()
closed_event = monitor_data.get('closed_event')
if closed_event:
closed_event.clear()
# Remove from dictionary
with self._frame_lock:
if monitor_index in self._monitor_captures:
del self._monitor_captures[monitor_index]
logger.info(f"WGC engine cleaned up for monitor {monitor_index}")
self._cleanup_internal()
self._initialized = False
# Force garbage collection to release COM objects
logger.debug("Running garbage collection for COM cleanup...")
gc.collect()
logger.debug("Garbage collection completed")
logger.info(f"WGC capture stream cleaned up (display={self.display_index})")
def get_available_displays(self) -> List[DisplayInfo]:
"""Get list of available displays using MSS.
Note: WGC doesn't provide a direct API for enumerating monitors,
so we use MSS for display detection.
Returns:
List of DisplayInfo objects
Raises:
RuntimeError: If detection fails
"""
try:
import mss
with mss.mss() as sct:
displays = []
# Skip monitor 0 (all monitors combined)
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(
DisplayInfo(
index=i,
name=f"Monitor {i+1}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
)
)
logger.debug(f"WGC detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
def capture_display(self, display_index: int) -> ScreenCapture:
"""Capture display using WGC.
WGC dynamically initializes for the requested display if needed.
Supports simultaneous capture from multiple monitors.
Args:
display_index: Index of display to capture (0-based)
Returns:
ScreenCapture object with image data
Raises:
RuntimeError: If initialization or capture fails
"""
# Initialize for this monitor if not already initialized
self.initialize(display_index)
def capture_frame(self) -> Optional[ScreenCapture]:
if not self._initialized:
self.initialize()
try:
# Get the latest frame for this monitor
with self._frame_lock:
if display_index not in self._monitor_captures:
if self._latest_frame is None:
raise RuntimeError(
f"Monitor {display_index} not initialized. This should not happen."
f"No frame available yet for display {self.display_index}."
)
monitor_data = self._monitor_captures[display_index]
latest_frame = monitor_data.get('latest_frame')
if latest_frame is None:
raise RuntimeError(
f"No frame available yet for monitor {display_index}. "
"The capture may not have started or the screen hasn't updated. "
"Wait a moment and try again."
)
frame = latest_frame.copy()
frame = self._latest_frame.copy()
logger.debug(
f"WGC captured display {display_index}: "
f"WGC captured display {self.display_index}: "
f"{frame.shape[1]}x{frame.shape[0]}"
)
@@ -325,46 +152,51 @@ class WGCEngine(CaptureEngine):
image=frame,
width=frame.shape[1],
height=frame.shape[0],
display_index=display_index,
display_index=self.display_index,
)
except ValueError:
except RuntimeError:
raise
except Exception as e:
logger.error(f"Failed to capture display {display_index} with WGC: {e}")
logger.error(f"Failed to capture display {self.display_index} with WGC: {e}")
raise RuntimeError(f"Screen capture failed: {e}")
class WGCEngine(CaptureEngine):
"""Windows Graphics Capture engine.
Uses the windows-capture library which leverages Windows.Graphics.Capture API.
This is Microsoft's recommended modern screen capture API for Windows 10+.
Features:
- Cross-GPU support
- Hardware cursor exclusion
- GPU-accelerated with direct texture sharing
Requires: Windows 10 1803+
"""
ENGINE_TYPE = "wgc"
ENGINE_PRIORITY = 2
@classmethod
def is_available(cls) -> bool:
"""Check if WGC is available.
WGC requires Windows 10 1803+ and the windows-capture package.
Returns:
True if windows-capture is available on this system
"""
# Check platform
if sys.platform != "win32":
return False
# Check Windows version (Windows 10 1803 = version 10.0.17134)
try:
import platform
version = platform.version()
# Parse version string like "10.0.19045"
parts = version.split(".")
if len(parts) >= 3:
major = int(parts[0])
minor = int(parts[1])
build = int(parts[2])
# Check for Windows 10 1803+ (build 17134+)
if major < 10 or (major == 10 and minor == 0 and build < 17134):
return False
except Exception:
# If we can't parse version, assume it might work
pass
# Check if windows-capture is installed
try:
import windows_capture
return True
@@ -373,15 +205,37 @@ class WGCEngine(CaptureEngine):
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Get default WGC configuration.
Note: monitor_index is NOT in config - WGC dynamically initializes
for the requested monitor at capture time.
Returns:
Default config dict with WGC options
"""
return {
"capture_cursor": False, # Exclude cursor (hardware exclusion)
"draw_border": False, # Don't draw border around capture
"capture_cursor": False,
"draw_border": False,
}
@classmethod
def get_available_displays(cls) -> List[DisplayInfo]:
try:
import mss
with mss.mss() as sct:
displays = []
for i, monitor in enumerate(sct.monitors[1:], start=0):
displays.append(DisplayInfo(
index=i,
name=f"Monitor {i+1}",
width=monitor["width"],
height=monitor["height"],
x=monitor["left"],
y=monitor["top"],
is_primary=(i == 0),
refresh_rate=60,
))
logger.debug(f"WGC detected {len(displays)} display(s)")
return displays
except Exception as e:
logger.error(f"Failed to detect displays: {e}")
raise RuntimeError(f"Failed to detect displays: {e}")
@classmethod
def create_stream(cls, display_index: int, config: Dict[str, Any]) -> WGCCaptureStream:
return WGCCaptureStream(display_index, config)

View File

@@ -0,0 +1,240 @@
"""Runtime live stream abstractions for frame production.
LiveStream is the runtime counterpart of PictureSource (config/storage layer).
Each PictureSource type maps to a LiveStream implementation:
ScreenCapturePictureSource → ScreenCaptureLiveStream
ProcessedPictureSource → ProcessedLiveStream
StaticImagePictureSource → StaticImageLiveStream
LiveStreams are managed by LiveStreamManager which handles sharing and
reference counting — multiple devices using the same PictureSource
share a single LiveStream instance.
"""
import threading
import time
from abc import ABC, abstractmethod
from typing import List, Optional
import numpy as np
from wled_controller.core.capture_engines.base import CaptureStream, ScreenCapture
from wled_controller.core.filters import ImagePool, PostprocessingFilter
from wled_controller.utils import get_logger
logger = get_logger(__name__)
class LiveStream(ABC):
"""Abstract base for a runtime frame source.
A LiveStream produces frames at some frequency. Consumers call
get_latest_frame() to read the most recent frame (non-blocking).
"""
@property
@abstractmethod
def target_fps(self) -> int:
"""Frame rate this stream targets."""
@property
@abstractmethod
def display_index(self) -> Optional[int]:
"""Display index being captured, or None for non-capture streams."""
@abstractmethod
def start(self) -> None:
"""Start producing frames. Called once when the stream is first needed."""
@abstractmethod
def stop(self) -> None:
"""Stop producing frames and release resources."""
@abstractmethod
def get_latest_frame(self) -> Optional[ScreenCapture]:
"""Get the most recent frame.
Returns:
ScreenCapture with image data (RGB), or None if no frame available yet.
"""
class ScreenCaptureLiveStream(LiveStream):
"""Live stream backed by a CaptureStream with a dedicated capture thread.
Runs a background thread that captures frames at the target FPS and stores
the latest frame under a lock. Consumers read the cached frame via
get_latest_frame() (non-blocking).
The dedicated thread naturally satisfies thread affinity requirements
for capture libraries (DXGI, GDI, WGC).
"""
def __init__(self, capture_stream: CaptureStream, fps: int):
self._capture_stream = capture_stream
self._fps = fps
self._latest_frame: Optional[ScreenCapture] = None
self._frame_lock = threading.Lock()
self._running = False
self._thread: Optional[threading.Thread] = None
@property
def target_fps(self) -> int:
return self._fps
@property
def display_index(self) -> Optional[int]:
return self._capture_stream.display_index
def start(self) -> None:
if self._running:
return
self._capture_stream.initialize()
self._running = True
self._thread = threading.Thread(
target=self._capture_loop,
name=f"live-capture-{self._capture_stream.display_index}",
daemon=True,
)
self._thread.start()
logger.info(
f"ScreenCaptureLiveStream started "
f"(display={self._capture_stream.display_index}, fps={self._fps})"
)
def stop(self) -> None:
if not self._running:
return
self._running = False
if self._thread:
self._thread.join(timeout=5.0)
if self._thread.is_alive():
logger.warning("Capture thread did not terminate within 5s")
self._thread = None
self._capture_stream.cleanup()
self._latest_frame = None
logger.info(
f"ScreenCaptureLiveStream stopped "
f"(display={self._capture_stream.display_index})"
)
def get_latest_frame(self) -> Optional[ScreenCapture]:
with self._frame_lock:
return self._latest_frame
def _capture_loop(self) -> None:
frame_time = 1.0 / self._fps if self._fps > 0 else 1.0
while self._running:
loop_start = time.time()
try:
frame = self._capture_stream.capture_frame()
if frame is not None:
with self._frame_lock:
self._latest_frame = frame
except Exception as e:
logger.error(f"Capture error (display={self._capture_stream.display_index}): {e}")
elapsed = time.time() - loop_start
sleep_time = max(0, frame_time - elapsed)
if sleep_time > 0:
time.sleep(sleep_time)
class ProcessedLiveStream(LiveStream):
"""Live stream that applies postprocessing filters to a source stream.
Reads frames from a source LiveStream and applies a chain of filters.
Uses identity caching — if the source frame hasn't changed, returns
the previously processed result without recomputing.
Thread-safe: a lock protects the filter application so concurrent
consumers don't duplicate work.
"""
def __init__(
self,
source: LiveStream,
filters: List[PostprocessingFilter],
):
self._source = source
self._filters = filters
self._image_pool = ImagePool()
self._process_lock = threading.Lock()
self._cached_source_frame: Optional[ScreenCapture] = None
self._cached_result: Optional[ScreenCapture] = None
@property
def target_fps(self) -> int:
return self._source.target_fps
@property
def display_index(self) -> Optional[int]:
return self._source.display_index
def start(self) -> None:
# Source lifecycle managed by LiveStreamManager
pass
def stop(self) -> None:
# Source lifecycle managed by LiveStreamManager
self._cached_source_frame = None
self._cached_result = None
def get_latest_frame(self) -> Optional[ScreenCapture]:
source_frame = self._source.get_latest_frame()
if source_frame is None:
return None
with self._process_lock:
# Identity cache: if source frame object hasn't changed, reuse result
if source_frame is self._cached_source_frame and self._cached_result is not None:
return self._cached_result
# Apply filters to a copy of the source image
image = source_frame.image.copy()
for f in self._filters:
result = f.process_image(image, self._image_pool)
if result is not None:
image = result
processed = ScreenCapture(
image=image,
width=source_frame.width,
height=source_frame.height,
display_index=source_frame.display_index,
)
self._cached_source_frame = source_frame
self._cached_result = processed
return processed
class StaticImageLiveStream(LiveStream):
"""Live stream that always returns the same static image."""
def __init__(self, image: np.ndarray):
self._image = image
h, w = image.shape[:2]
self._frame = ScreenCapture(
image=image, width=w, height=h, display_index=-1
)
@property
def target_fps(self) -> int:
return 1
@property
def display_index(self) -> Optional[int]:
return None
def start(self) -> None:
pass
def stop(self) -> None:
pass
def get_latest_frame(self) -> Optional[ScreenCapture]:
return self._frame

View File

@@ -0,0 +1,272 @@
"""Shared live stream management with reference counting.
LiveStreamManager creates LiveStream instances from PictureSource configs
and shares them across multiple consumers (devices). When multiple devices
reference the same PictureSource, they share a single LiveStream instance.
Reference counting ensures streams are cleaned up when the last consumer
releases them.
"""
from dataclasses import dataclass
from typing import Dict, Optional
import httpx
import numpy as np
from wled_controller.core.capture_engines import EngineRegistry
from wled_controller.core.filters import FilterRegistry, PostprocessingFilter
from wled_controller.core.live_stream import (
LiveStream,
ProcessedLiveStream,
ScreenCaptureLiveStream,
StaticImageLiveStream,
)
from wled_controller.utils import get_logger
logger = get_logger(__name__)
@dataclass
class _LiveStreamEntry:
"""Internal tracking entry for a managed live stream."""
live_stream: LiveStream
ref_count: int
# For ProcessedLiveStream: the source stream ID whose live stream we depend on.
# Used to recursively release the source when this stream's ref count hits 0.
source_stream_id: Optional[str] = None
class LiveStreamManager:
"""Manages shared LiveStream instances with reference counting.
Multiple devices using the same PictureSource share a single LiveStream.
Streams are created on first acquire and cleaned up when the last
consumer releases.
For ProcessedPictureSources, the source stream is recursively acquired,
enabling sharing at every level of the stream chain.
"""
def __init__(self, picture_source_store, capture_template_store=None, pp_template_store=None):
"""Initialize the live stream manager.
Args:
picture_source_store: PictureSourceStore for resolving stream configs
capture_template_store: TemplateStore for resolving capture engine settings
pp_template_store: PostprocessingTemplateStore for resolving filter chains
"""
self._picture_source_store = picture_source_store
self._capture_template_store = capture_template_store
self._pp_template_store = pp_template_store
self._streams: Dict[str, _LiveStreamEntry] = {}
def acquire(self, picture_source_id: str) -> LiveStream:
"""Get or create a LiveStream for the given PictureSource config.
If a LiveStream already exists for this picture_source_id, increments
the reference count and returns the existing instance.
Otherwise, creates a new LiveStream from the PictureSource config,
starts it, and stores it with ref_count=1.
Args:
picture_source_id: ID of the PictureSource config
Returns:
LiveStream instance (shared if already exists)
Raises:
ValueError: If PictureSource not found or config invalid
RuntimeError: If stream creation/start fails
"""
if picture_source_id in self._streams:
entry = self._streams[picture_source_id]
entry.ref_count += 1
logger.info(
f"Reusing live stream for picture source {picture_source_id} "
f"(ref_count={entry.ref_count})"
)
return entry.live_stream
# Create new live stream from config
live_stream, source_stream_id = self._create_live_stream(picture_source_id)
try:
live_stream.start()
except Exception as e:
# If start fails, release any source dependency we acquired
if source_stream_id:
self.release(source_stream_id)
raise RuntimeError(
f"Failed to start live stream for picture source {picture_source_id}: {e}"
)
self._streams[picture_source_id] = _LiveStreamEntry(
live_stream=live_stream,
ref_count=1,
source_stream_id=source_stream_id,
)
logger.info(f"Created live stream for picture source {picture_source_id}")
return live_stream
def release(self, picture_source_id: str) -> None:
"""Release a reference to a LiveStream.
Decrements the reference count. When it reaches 0, stops the
LiveStream and removes it from the registry.
For ProcessedLiveStreams, recursively releases the source dependency.
Args:
picture_source_id: ID of the PictureSource to release
"""
entry = self._streams.get(picture_source_id)
if not entry:
logger.warning(f"Attempted to release unknown live stream: {picture_source_id}")
return
entry.ref_count -= 1
logger.debug(
f"Released live stream {picture_source_id} (ref_count={entry.ref_count})"
)
if entry.ref_count <= 0:
# Stop and remove
try:
entry.live_stream.stop()
except Exception as e:
logger.error(f"Error stopping live stream {picture_source_id}: {e}")
source_stream_id = entry.source_stream_id
del self._streams[picture_source_id]
logger.info(f"Removed live stream for picture source {picture_source_id}")
# Recursively release source dependency
if source_stream_id:
self.release(source_stream_id)
def release_all(self) -> None:
"""Stop and remove all managed live streams. Safety net for shutdown."""
stream_ids = list(self._streams.keys())
for stream_id in stream_ids:
entry = self._streams.get(stream_id)
if entry:
try:
entry.live_stream.stop()
except Exception as e:
logger.error(f"Error stopping live stream {stream_id}: {e}")
self._streams.clear()
logger.info("Released all managed live streams")
def get_active_stream_ids(self) -> list:
"""Get list of currently active stream IDs (for diagnostics)."""
return [
{"id": sid, "ref_count": entry.ref_count}
for sid, entry in self._streams.items()
]
def _create_live_stream(self, picture_source_id: str) -> tuple:
"""Create a LiveStream from a PictureSource config.
Returns:
Tuple of (LiveStream, source_stream_id or None)
"""
from wled_controller.storage.picture_source import (
ProcessedPictureSource,
ScreenCapturePictureSource,
StaticImagePictureSource,
)
stream_config = self._picture_source_store.get_stream(picture_source_id)
if isinstance(stream_config, ScreenCapturePictureSource):
return self._create_screen_capture_live_stream(stream_config), None
elif isinstance(stream_config, ProcessedPictureSource):
return self._create_processed_live_stream(stream_config)
elif isinstance(stream_config, StaticImagePictureSource):
return self._create_static_image_live_stream(stream_config), None
else:
raise ValueError(f"Unknown picture source type: {type(stream_config)}")
def _create_screen_capture_live_stream(self, config) -> ScreenCaptureLiveStream:
"""Create a ScreenCaptureLiveStream from a ScreenCapturePictureSource config."""
# Resolve capture engine from template
engine_type = "mss"
engine_config = {}
if config.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(config.capture_template_id)
engine_type = tpl.engine_type
engine_config = tpl.engine_config
except ValueError:
logger.warning(
f"Capture template {config.capture_template_id} not found, using MSS fallback"
)
capture_stream = EngineRegistry.create_stream(
engine_type, config.display_index, engine_config
)
return ScreenCaptureLiveStream(capture_stream, config.target_fps)
def _create_processed_live_stream(self, config) -> tuple:
"""Create a ProcessedLiveStream from a ProcessedPictureSource config.
Returns:
Tuple of (ProcessedLiveStream, source_stream_id)
"""
# Recursively acquire source stream (with ref counting)
source_stream_id = config.source_stream_id
source_live = self.acquire(source_stream_id)
# Resolve postprocessing filters
filters = []
if config.postprocessing_template_id and self._pp_template_store:
try:
pp = self._pp_template_store.get_template(config.postprocessing_template_id)
for fi in pp.filters:
try:
filters.append(
FilterRegistry.create_instance(fi.filter_id, fi.options)
)
except ValueError as e:
logger.warning(f"Skipping unknown filter '{fi.filter_id}': {e}")
except ValueError:
logger.warning(
f"PP template {config.postprocessing_template_id} not found, no filters applied"
)
return ProcessedLiveStream(source_live, filters), source_stream_id
def _create_static_image_live_stream(self, config) -> StaticImageLiveStream:
"""Create a StaticImageLiveStream from a StaticImagePictureSource config."""
image = self._load_static_image(config.image_source)
return StaticImageLiveStream(image)
@staticmethod
def _load_static_image(image_source: str) -> np.ndarray:
"""Load a static image from URL or file path, return as RGB numpy array."""
from io import BytesIO
from pathlib import Path
from PIL import Image
if image_source.startswith(("http://", "https://")):
response = httpx.get(image_source, timeout=15.0, follow_redirects=True)
response.raise_for_status()
pil_image = Image.open(BytesIO(response.content))
else:
path = Path(image_source)
if not path.exists():
raise FileNotFoundError(f"Image file not found: {image_source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
return np.array(pil_image)

View File

@@ -1,24 +1,22 @@
"""Processing manager for coordinating screen capture and WLED updates."""
import asyncio
import concurrent.futures
import time
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import httpx
import numpy as np
from wled_controller.core.calibration import (
CalibrationConfig,
PixelMapper,
create_default_calibration,
)
import numpy as np
from wled_controller.core.capture_engines import CaptureEngine, EngineRegistry
from wled_controller.core.capture_engines.base import ScreenCapture
from wled_controller.core.filters import FilterInstance, FilterRegistry, ImagePool, PostprocessingFilter
from wled_controller.core.live_stream import LiveStream
from wled_controller.core.live_stream_manager import LiveStreamManager
from wled_controller.core.pixel_processor import smooth_colors
from wled_controller.core.screen_capture import extract_border_pixels
from wled_controller.core.wled_client import WLEDClient
@@ -93,11 +91,9 @@ class ProcessorState:
led_count: int
settings: ProcessingSettings
calibration: CalibrationConfig
capture_template_id: str = ""
picture_stream_id: str = ""
picture_source_id: str = ""
wled_client: Optional[WLEDClient] = None
pixel_mapper: Optional[PixelMapper] = None
capture_engine: Optional[CaptureEngine] = None
is_running: bool = False
task: Optional[asyncio.Task] = None
metrics: ProcessingMetrics = field(default_factory=ProcessingMetrics)
@@ -111,15 +107,8 @@ class ProcessorState:
resolved_target_fps: Optional[int] = None
resolved_engine_type: Optional[str] = None
resolved_engine_config: Optional[dict] = None
resolved_filters: Optional[List[FilterInstance]] = None
# Static image: cached frame for static_image streams (no engine needed)
static_image: Optional[np.ndarray] = None
image_pool: Optional[ImagePool] = None
filter_instances: Optional[List[PostprocessingFilter]] = None
# Dedicated single-thread executor for capture engine calls.
# Capture libraries (BetterCam, MSS, DXcam) use thread-local state,
# so all calls must run on the same thread.
capture_executor: Optional[concurrent.futures.ThreadPoolExecutor] = None
# LiveStream: runtime frame source (shared via LiveStreamManager)
live_stream: Optional[LiveStream] = None
# WLED state snapshot taken before streaming starts (to restore on stop)
wled_state_before: Optional[dict] = None
@@ -127,20 +116,23 @@ class ProcessorState:
class ProcessorManager:
"""Manages screen processing for multiple WLED devices."""
def __init__(self, picture_stream_store=None, capture_template_store=None, pp_template_store=None):
def __init__(self, picture_source_store=None, capture_template_store=None, pp_template_store=None):
"""Initialize processor manager.
Args:
picture_stream_store: PictureStreamStore instance (for stream resolution)
picture_source_store: PictureSourceStore instance (for stream resolution)
capture_template_store: TemplateStore instance (for engine lookup)
pp_template_store: PostprocessingTemplateStore instance (for PP settings)
"""
self._processors: Dict[str, ProcessorState] = {}
self._health_monitoring_active = False
self._http_client: Optional[httpx.AsyncClient] = None
self._picture_stream_store = picture_stream_store
self._picture_source_store = picture_source_store
self._capture_template_store = capture_template_store
self._pp_template_store = pp_template_store
self._live_stream_manager = LiveStreamManager(
picture_source_store, capture_template_store, pp_template_store
)
logger.info("Processor manager initialized")
async def _get_http_client(self) -> httpx.AsyncClient:
@@ -156,8 +148,7 @@ class ProcessorManager:
led_count: int,
settings: Optional[ProcessingSettings] = None,
calibration: Optional[CalibrationConfig] = None,
capture_template_id: str = "",
picture_stream_id: str = "",
picture_source_id: str = "",
):
"""Add a device for processing.
@@ -167,8 +158,7 @@ class ProcessorManager:
led_count: Number of LEDs
settings: Processing settings (uses defaults if None)
calibration: Calibration config (creates default if None)
capture_template_id: Legacy template ID for screen capture engine
picture_stream_id: Picture stream ID (preferred over capture_template_id)
picture_source_id: Picture source ID
"""
if device_id in self._processors:
raise ValueError(f"Device {device_id} already exists")
@@ -185,8 +175,7 @@ class ProcessorManager:
led_count=led_count,
settings=settings,
calibration=calibration,
capture_template_id=capture_template_id,
picture_stream_id=picture_stream_id,
picture_source_id=picture_source_id,
)
self._processors[device_id] = state
@@ -280,116 +269,56 @@ class ProcessorManager:
logger.info(f"Updated calibration for device {device_id}")
def _resolve_stream_settings(self, state: ProcessorState):
"""Resolve picture stream chain to populate resolved_* fields on state.
"""Resolve picture source chain to populate resolved_* metadata fields.
If device has a picture_stream_id and stores are available, resolves the
stream chain to get display_index, fps, engine type/config, and PP settings.
Otherwise falls back to legacy device settings.
Resolves metadata (display_index, fps, engine info) for status reporting.
Actual stream creation is handled by LiveStreamManager.
"""
if state.picture_stream_id and self._picture_stream_store:
try:
chain = self._picture_stream_store.resolve_stream_chain(state.picture_stream_id)
raw_stream = chain["raw_stream"]
pp_template_ids = chain["postprocessing_template_ids"]
if not state.picture_source_id or not self._picture_source_store:
raise ValueError(f"Device {state.device_id} has no picture source assigned")
if raw_stream.stream_type == "static_image":
# Static image stream: load image once, no engine needed
state.resolved_display_index = -1
state.resolved_target_fps = 1
state.resolved_engine_type = None
state.resolved_engine_config = None
state.static_image = self._load_static_image(raw_stream.image_source)
else:
# Raw capture stream
state.resolved_display_index = raw_stream.display_index
state.resolved_target_fps = raw_stream.target_fps
from wled_controller.storage.picture_source import ScreenCapturePictureSource, StaticImagePictureSource
# Resolve capture engine from raw stream's capture template
if raw_stream.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(raw_stream.capture_template_id)
state.resolved_engine_type = tpl.engine_type
state.resolved_engine_config = tpl.engine_config
except ValueError:
logger.warning(f"Capture template {raw_stream.capture_template_id} not found, using MSS fallback")
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
chain = self._picture_source_store.resolve_stream_chain(state.picture_source_id)
raw_stream = chain["raw_stream"]
pp_template_ids = chain["postprocessing_template_ids"]
# Resolve postprocessing: use first PP template in chain
if pp_template_ids and self._pp_template_store:
try:
pp = self._pp_template_store.get_template(pp_template_ids[0])
state.resolved_filters = pp.filters
except ValueError:
logger.warning(f"PP template {pp_template_ids[0]} not found, using defaults")
if isinstance(raw_stream, StaticImagePictureSource):
state.resolved_display_index = -1
state.resolved_target_fps = 1
state.resolved_engine_type = None
state.resolved_engine_config = None
elif isinstance(raw_stream, ScreenCapturePictureSource):
state.resolved_display_index = raw_stream.display_index
state.resolved_target_fps = raw_stream.target_fps
logger.info(
f"Resolved stream chain for {state.device_id}: "
f"display={state.resolved_display_index}, fps={state.resolved_target_fps}, "
f"engine={state.resolved_engine_type}, pp_templates={len(pp_template_ids)}"
)
return
except ValueError as e:
logger.warning(f"Failed to resolve stream {state.picture_stream_id}: {e}, falling back to legacy settings")
if raw_stream.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(raw_stream.capture_template_id)
state.resolved_engine_type = tpl.engine_type
state.resolved_engine_config = tpl.engine_config
except ValueError:
logger.warning(f"Capture template {raw_stream.capture_template_id} not found, using MSS fallback")
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
# Fallback: use legacy device settings (construct filters from flat fields)
state.resolved_display_index = state.settings.display_index
state.resolved_target_fps = state.settings.fps
legacy_filters = []
if state.settings.brightness != 1.0:
legacy_filters.append(FilterInstance("brightness", {"value": state.settings.brightness}))
if state.settings.saturation != 1.0:
legacy_filters.append(FilterInstance("saturation", {"value": state.settings.saturation}))
if state.settings.gamma != 1.0:
legacy_filters.append(FilterInstance("gamma", {"value": state.settings.gamma}))
state.resolved_filters = legacy_filters
# Resolve engine from legacy capture_template_id
if state.capture_template_id and self._capture_template_store:
try:
tpl = self._capture_template_store.get_template(state.capture_template_id)
state.resolved_engine_type = tpl.engine_type
state.resolved_engine_config = tpl.engine_config
except ValueError:
logger.warning(f"Capture template {state.capture_template_id} not found, using MSS fallback")
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
else:
state.resolved_engine_type = "mss"
state.resolved_engine_config = {}
@staticmethod
def _load_static_image(image_source: str) -> np.ndarray:
"""Load a static image from URL or file path, return as RGB numpy array."""
from io import BytesIO
from pathlib import Path
from PIL import Image
if image_source.startswith(("http://", "https://")):
response = httpx.get(image_source, timeout=15.0, follow_redirects=True)
response.raise_for_status()
pil_image = Image.open(BytesIO(response.content))
else:
path = Path(image_source)
if not path.exists():
raise FileNotFoundError(f"Image file not found: {image_source}")
pil_image = Image.open(path)
pil_image = pil_image.convert("RGB")
return np.array(pil_image)
logger.info(
f"Resolved stream metadata for {state.device_id}: "
f"display={state.resolved_display_index}, fps={state.resolved_target_fps}, "
f"engine={state.resolved_engine_type}, pp_templates={len(pp_template_ids)}"
)
async def start_processing(self, device_id: str):
"""Start screen processing for a device.
Resolves the picture stream chain (if assigned) to determine capture engine,
display, FPS, and postprocessing settings. Falls back to legacy device settings.
Resolves the picture source chain to determine capture engine,
display, FPS, and postprocessing settings.
Args:
device_id: Device identifier
Raises:
ValueError: If device not found
ValueError: If device not found or no picture source assigned
RuntimeError: If processing already running
"""
if device_id not in self._processors:
@@ -433,34 +362,25 @@ class ProcessorManager:
logger.error(f"Failed to connect to WLED device {device_id}: {e}")
raise RuntimeError(f"Failed to connect to WLED device: {e}")
# Initialize capture engine from resolved settings (skip for static_image)
if state.static_image is not None:
logger.info(f"Using static image for device {device_id} ({state.static_image.shape[1]}x{state.static_image.shape[0]})")
else:
try:
engine_type = state.resolved_engine_type or "mss"
engine_config = state.resolved_engine_config or {}
engine = EngineRegistry.create_engine(engine_type, engine_config)
# Create a dedicated single-thread executor for capture calls.
# Capture libraries use thread-local state (DXGI contexts, GDI DCs)
# so initialize + capture + cleanup must all run on the same thread.
state.capture_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1, thread_name_prefix=f"capture-{device_id}"
)
loop = asyncio.get_event_loop()
await loop.run_in_executor(state.capture_executor, engine.initialize)
state.capture_engine = engine
logger.info(f"Initialized capture engine for device {device_id}: {engine_type}")
except Exception as e:
logger.error(f"Failed to initialize capture engine for device {device_id}: {e}")
if state.capture_executor:
state.capture_executor.shutdown(wait=False)
state.capture_executor = None
if state.wled_client:
await state.wled_client.disconnect()
raise RuntimeError(f"Failed to initialize capture engine: {e}")
# Acquire live stream via LiveStreamManager (shared across devices)
try:
live_stream = await asyncio.to_thread(
self._live_stream_manager.acquire, state.picture_source_id
)
state.live_stream = live_stream
# Update resolved metadata from actual live stream
if live_stream.display_index is not None:
state.resolved_display_index = live_stream.display_index
state.resolved_target_fps = live_stream.target_fps
logger.info(
f"Acquired live stream for device {device_id} "
f"(picture_source={state.picture_source_id})"
)
except Exception as e:
logger.error(f"Failed to initialize live stream for device {device_id}: {e}")
if state.wled_client:
await state.wled_client.disconnect()
raise RuntimeError(f"Failed to initialize live stream: {e}")
# Initialize pixel mapper
state.pixel_mapper = PixelMapper(
@@ -526,39 +446,26 @@ class ProcessorManager:
await state.wled_client.close()
state.wled_client = None
# Cleanup capture engine on the same dedicated thread it was created on
if state.capture_engine:
if state.capture_executor:
loop = asyncio.get_event_loop()
try:
await loop.run_in_executor(
state.capture_executor, state.capture_engine.cleanup
)
except Exception as e:
logger.warning(f"Error cleaning up capture engine: {e}")
state.capture_executor.shutdown(wait=False)
state.capture_executor = None
else:
state.capture_engine.cleanup()
state.capture_engine = None
# Release cached static image
state.static_image = None
# Release live stream
if state.live_stream:
try:
self._live_stream_manager.release(state.picture_source_id)
except Exception as e:
logger.warning(f"Error releasing live stream: {e}")
state.live_stream = None
logger.info(f"Stopped processing for device {device_id}")
async def _processing_loop(self, device_id: str):
"""Main processing loop for a device.
Uses resolved_* fields from stream resolution for display, FPS,
and postprocessing. Falls back to device settings for LED projection
parameters (border_width, interpolation_mode) and WLED brightness.
Reads frames from the LiveStream (which handles capture and optional
PP filters via the picture source chain).
"""
state = self._processors[device_id]
settings = state.settings
# Use resolved values (populated by _resolve_stream_settings)
display_index = state.resolved_display_index or settings.display_index
target_fps = state.resolved_target_fps or settings.fps
smoothing = settings.smoothing
@@ -566,35 +473,13 @@ class ProcessorManager:
border_width = settings.border_width
wled_brightness = settings.brightness # WLED hardware brightness
# Instantiate filter objects once (not per-frame)
resolved_filters = state.resolved_filters or []
image_pool = ImagePool()
state.image_pool = image_pool
filter_objects = []
for fi in resolved_filters:
try:
filter_objects.append(FilterRegistry.create_instance(fi.filter_id, fi.options))
except ValueError as e:
logger.warning(f"Skipping unknown filter '{fi.filter_id}': {e}")
state.filter_instances = filter_objects
logger.info(
f"Processing loop started for {device_id} "
f"(display={display_index}, fps={target_fps}, filters={len(filter_objects)})"
f"(display={state.resolved_display_index}, fps={target_fps})"
)
frame_time = 1.0 / target_fps
fps_samples = []
loop = asyncio.get_event_loop()
capture_executor = state.capture_executor # dedicated single-thread executor
def _apply_filters(image):
"""Apply all postprocessing filters to the captured image."""
for f in filter_objects:
result = f.process_image(image, image_pool)
if result is not None:
image = result
return image
try:
while state.is_running:
@@ -606,18 +491,8 @@ class ProcessorManager:
continue
try:
# Get frame: static image or live capture
if state.static_image is not None:
h, w = state.static_image.shape[:2]
capture = ScreenCapture(
image=state.static_image.copy(), width=w, height=h, display_index=-1
)
else:
capture = await loop.run_in_executor(
capture_executor,
state.capture_engine.capture_display,
display_index
)
# Get frame from live stream (handles capture + PP filters)
capture = await asyncio.to_thread(state.live_stream.get_latest_frame)
# Skip processing if no new frame (screen unchanged)
if capture is None:
@@ -626,10 +501,6 @@ class ProcessorManager:
await asyncio.sleep(frame_time)
continue
# Apply postprocessing filters to the full captured image
if filter_objects:
capture.image = await asyncio.to_thread(_apply_filters, capture.image)
# Extract border pixels
border_pixels = await asyncio.to_thread(extract_border_pixels, capture, border_width)
@@ -861,6 +732,9 @@ class ProcessorManager:
except Exception as e:
logger.error(f"Error stopping device {device_id}: {e}")
# Safety net: release any remaining managed live streams
self._live_stream_manager.release_all()
# Close shared HTTP client
if self._http_client and not self._http_client.is_closed:
await self._http_client.aclose()