diff --git a/server/src/wled_controller/core/capture_engines/wgc_engine.py b/server/src/wled_controller/core/capture_engines/wgc_engine.py index f4045f8..cdc0b87 100644 --- a/server/src/wled_controller/core/capture_engines/wgc_engine.py +++ b/server/src/wled_controller/core/capture_engines/wgc_engine.py @@ -59,11 +59,12 @@ class WGCCaptureStream(CaptureStream): height = frame.height # WGC provides BGRA format, convert to RGB + # Fancy indexing creates a new contiguous array — no .copy() needed frame_array = frame_buffer.reshape((height, width, 4)) frame_rgb = frame_array[:, :, [2, 1, 0]] with self._frame_lock: - self._latest_frame = frame_rgb.copy() + self._latest_frame = frame_rgb self._frame_event.set() except Exception as e: logger.error(f"Error processing WGC frame: {e}", exc_info=True) @@ -141,7 +142,7 @@ class WGCCaptureStream(CaptureStream): raise RuntimeError( f"No frame available yet for display {self.display_index}." ) - frame = self._latest_frame.copy() + frame = self._latest_frame logger.debug( f"WGC captured display {self.display_index}: " diff --git a/server/src/wled_controller/core/filters/builtin.py b/server/src/wled_controller/core/filters/builtin.py index d0c2cd1..cbec04e 100644 --- a/server/src/wled_controller/core/filters/builtin.py +++ b/server/src/wled_controller/core/filters/builtin.py @@ -16,6 +16,12 @@ class BrightnessFilter(PostprocessingFilter): filter_id = "brightness" filter_name = "Brightness" + def __init__(self, options: Dict[str, Any]): + super().__init__(options) + value = self.options["value"] + lut = np.clip(np.arange(256, dtype=np.float32) * value, 0, 255) + self._lut = lut.astype(np.uint8) + @classmethod def get_options_schema(cls) -> List[FilterOptionDef]: return [ @@ -31,14 +37,9 @@ class BrightnessFilter(PostprocessingFilter): ] def process_image(self, image: np.ndarray, image_pool: ImagePool) -> Optional[np.ndarray]: - value = self.options["value"] - if value == 1.0: + if self.options["value"] == 1.0: return None - # In-place float operation - arr = image.astype(np.float32) - arr *= value - np.clip(arr, 0, 255, out=arr) - np.copyto(image, arr.astype(np.uint8)) + image[:] = self._lut[image] return None @@ -49,6 +50,10 @@ class SaturationFilter(PostprocessingFilter): filter_id = "saturation" filter_name = "Saturation" + def __init__(self, options: Dict[str, Any]): + super().__init__(options) + self._float_buf: Optional[np.ndarray] = None + @classmethod def get_options_schema(cls) -> List[FilterOptionDef]: return [ @@ -67,11 +72,17 @@ class SaturationFilter(PostprocessingFilter): value = self.options["value"] if value == 1.0: return None - arr = image.astype(np.float32) / 255.0 + h, w, c = image.shape + if self._float_buf is None or self._float_buf.shape != (h, w, c): + self._float_buf = np.empty((h, w, c), dtype=np.float32) + arr = self._float_buf + np.copyto(arr, image) + arr *= (1.0 / 255.0) lum = np.dot(arr[..., :3], [0.299, 0.587, 0.114])[..., np.newaxis] arr[..., :3] = lum + (arr[..., :3] - lum) * value - np.clip(arr * 255.0, 0, 255, out=arr) - np.copyto(image, arr.astype(np.uint8)) + np.clip(arr, 0, 1.0, out=arr) + arr *= 255.0 + np.copyto(image, arr, casting='unsafe') return None @@ -82,6 +93,13 @@ class GammaFilter(PostprocessingFilter): filter_id = "gamma" filter_name = "Gamma" + def __init__(self, options: Dict[str, Any]): + super().__init__(options) + value = self.options["value"] + lut = np.arange(256, dtype=np.float32) / 255.0 + np.power(lut, 1.0 / value, out=lut) + self._lut = np.clip(lut * 255.0, 0, 255).astype(np.uint8) + @classmethod def get_options_schema(cls) -> List[FilterOptionDef]: return [ @@ -97,13 +115,9 @@ class GammaFilter(PostprocessingFilter): ] def process_image(self, image: np.ndarray, image_pool: ImagePool) -> Optional[np.ndarray]: - value = self.options["value"] - if value == 1.0: + if self.options["value"] == 1.0: return None - arr = image.astype(np.float32) / 255.0 - np.power(arr, 1.0 / value, out=arr) - np.clip(arr * 255.0, 0, 255, out=arr) - np.copyto(image, arr.astype(np.uint8)) + image[:] = self._lut[image] return None @@ -315,8 +329,14 @@ class FlipFilter(PostprocessingFilter): def process_image(self, image: np.ndarray, image_pool: ImagePool) -> Optional[np.ndarray]: h = self.options.get("horizontal", False) v = self.options.get("vertical", False) - if h: - image[:] = np.fliplr(image) - if v: - image[:] = np.flipud(image) - return None + if not h and not v: + return None + height, width, c = image.shape + result = image_pool.acquire(height, width, c) + if h and v: + np.copyto(result, image[::-1, ::-1]) + elif h: + np.copyto(result, image[:, ::-1]) + else: + np.copyto(result, image[::-1]) + return result diff --git a/server/src/wled_controller/core/live_stream.py b/server/src/wled_controller/core/live_stream.py index 0eb11f3..e78f2eb 100644 --- a/server/src/wled_controller/core/live_stream.py +++ b/server/src/wled_controller/core/live_stream.py @@ -203,6 +203,12 @@ class ProcessedLiveStream(LiveStream): def _process_loop(self) -> None: """Background thread: poll source, apply filters, cache result.""" cached_source_frame: Optional[ScreenCapture] = None + # Ring buffer: 3 slots guarantees consumer finished with oldest buffer. + # At most 2 frames are in flight (one in _latest_frame, one being + # processed by a consumer), so the 3rd slot is always safe to reuse. + _ring: List[Optional[np.ndarray]] = [None, None, None] + _ring_idx = 0 + while self._running: source_frame = self._source.get_latest_frame() if source_frame is None: @@ -216,11 +222,25 @@ class ProcessedLiveStream(LiveStream): cached_source_frame = source_frame - # Apply filters to a copy of the source image - image = source_frame.image.copy() + # Reuse ring buffer slot instead of allocating a new copy each frame + src = source_frame.image + h, w, c = src.shape + buf = _ring[_ring_idx] + if buf is None or buf.shape != (h, w, c): + buf = np.empty((h, w, c), dtype=np.uint8) + _ring[_ring_idx] = buf + _ring_idx = (_ring_idx + 1) % 3 + + np.copyto(buf, src) + image = buf + for f in self._filters: result = f.process_image(image, self._image_pool) if result is not None: + # Release intermediate filter output back to pool + # (don't release the ring buffer itself) + if image is not buf: + self._image_pool.release(image) image = result processed = ScreenCapture(