Optimize processing pipeline and fix multi-target crash

Performance optimizations across 5 phases:
- Saturation filter: float32 → int32 integer math (~2-3x faster)
- Frame interpolation: pre-allocated uint16 scratch buffers
- Color correction: single-pass cv2.LUT instead of 3 channel lookups
- DDP: numpy vectorized color reorder + pre-allocated RGBW buffer
- Calibration boundaries: vectorized with np.arange + np.maximum
- wled_client: vectorized pixel validation and HTTP pixel list
- _fit_to_device: cached linspace arrays (now per-instance)
- Diagnostic lists: bounded deque(maxlen=...) instead of unbounded list
- Health checks: adaptive intervals (10s streaming, 60s idle)
- Profile engine: poll interval 3s → 1s

Bug fixes:
- Fix deque slicing crash killing targets when multiple run in parallel
  (deque doesn't support [-1:] or [:5] slice syntax unlike list)
- Fix numpy array boolean ambiguity in send_pixels() validation
- Persist fatal processing loop errors to metrics for API visibility
- Move _fit_to_device cache from class-level to instance-level to
  prevent cross-target cache thrashing

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 21:28:17 +03:00
parent fda040ae18
commit 6f5bda6d8f
9 changed files with 135 additions and 92 deletions

View File

@@ -264,15 +264,11 @@ class PixelMapper:
# Compute segment boundaries (matching get_edge_segments float stepping) # Compute segment boundaries (matching get_edge_segments float stepping)
step = edge_len / led_count step = edge_len / led_count
boundaries = np.empty(led_count + 1, dtype=np.int64) boundaries = (np.arange(led_count + 1, dtype=np.float64) * step).astype(np.int64)
for i in range(led_count + 1):
boundaries[i] = int(i * step)
# Ensure each segment has at least 1 pixel # Ensure each segment has at least 1 pixel
for i in range(led_count): boundaries[1:] = np.maximum(boundaries[1:], boundaries[:-1] + 1)
if boundaries[i + 1] <= boundaries[i]:
boundaries[i + 1] = boundaries[i] + 1
# Clamp all boundaries to edge_len (not just the last one) # Clamp all boundaries to edge_len (not just the last one)
boundaries = np.minimum(boundaries, edge_len) np.minimum(boundaries, edge_len, out=boundaries)
# Cumulative sum for O(1) range means — no per-LED Python numpy calls # Cumulative sum for O(1) range means — no per-LED Python numpy calls
cumsum = np.zeros((edge_len + 1, 3), dtype=np.float64) cumsum = np.zeros((edge_len + 1, 3), dtype=np.float64)

View File

@@ -52,6 +52,9 @@ class DDPClient:
self._protocol = None self._protocol = None
self._sequence = 0 self._sequence = 0
self._buses: List[BusConfig] = [] self._buses: List[BusConfig] = []
# Pre-allocated RGBW buffer (resized on demand)
self._rgbw_buf: Optional[np.ndarray] = None
self._rgbw_buf_n: int = 0
async def connect(self): async def connect(self):
"""Establish UDP connection.""" """Establish UDP connection."""
@@ -136,26 +139,23 @@ class DDPClient:
return header + rgb_data return header + rgb_data
def _reorder_pixels( def _reorder_pixels_numpy(self, pixel_array: np.ndarray) -> np.ndarray:
self, """Apply per-bus color order reordering using numpy fancy indexing.
pixels: List[Tuple[int, int, int]],
) -> List[Tuple[int, int, int]]:
"""Apply per-bus color order reordering.
WLED may not apply per-bus color order conversion for DDP data on WLED may not apply per-bus color order conversion for DDP data on
all buses (observed in multi-bus setups). We reorder pixel channels all buses (observed in multi-bus setups). We reorder pixel channels
here so the hardware receives the correct byte order directly. here so the hardware receives the correct byte order directly.
Args: Args:
pixels: List of (R, G, B) tuples in standard RGB order pixel_array: (N, 3) uint8 numpy array in RGB order
Returns: Returns:
List of reordered tuples matching each bus's hardware color order Reordered array (may be a view or copy depending on buses)
""" """
if not self._buses: if not self._buses:
return pixels return pixel_array
result = list(pixels) result = pixel_array.copy()
for bus in self._buses: for bus in self._buses:
order_map = COLOR_ORDER_MAP.get(bus.color_order) order_map = COLOR_ORDER_MAP.get(bus.color_order)
if not order_map or order_map == (0, 1, 2): if not order_map or order_map == (0, 1, 2):
@@ -163,10 +163,7 @@ class DDPClient:
start = bus.start start = bus.start
end = min(bus.start + bus.length, len(result)) end = min(bus.start + bus.length, len(result))
for i in range(start, end): result[start:end] = result[start:end][:, order_map]
r, g, b = result[i]
rgb = (r, g, b)
result[i] = (rgb[order_map[0]], rgb[order_map[1]], rgb[order_map[2]])
return result return result
@@ -197,8 +194,12 @@ class DDPClient:
bpp = 4 if self.rgbw else 3 # bytes per pixel bpp = 4 if self.rgbw else 3 # bytes per pixel
pixel_array = np.array(pixels, dtype=np.uint8) pixel_array = np.array(pixels, dtype=np.uint8)
if self.rgbw: if self.rgbw:
white = np.zeros((pixel_array.shape[0], 1), dtype=np.uint8) n = pixel_array.shape[0]
pixel_array = np.hstack((pixel_array, white)) if n != self._rgbw_buf_n:
self._rgbw_buf = np.zeros((n, 4), dtype=np.uint8)
self._rgbw_buf_n = n
self._rgbw_buf[:, :3] = pixel_array
pixel_array = self._rgbw_buf
pixel_bytes = pixel_array.tobytes() pixel_bytes = pixel_array.tobytes()
total_bytes = len(pixel_bytes) total_bytes = len(pixel_bytes)
@@ -256,10 +257,14 @@ class DDPClient:
if not self._transport: if not self._transport:
raise RuntimeError("DDP client not connected") raise RuntimeError("DDP client not connected")
# Handle RGBW: insert zero white channel column # Handle RGBW: copy RGB into pre-allocated (N, 4) buffer
if self.rgbw: if self.rgbw:
white = np.zeros((pixel_array.shape[0], 1), dtype=np.uint8) n = pixel_array.shape[0]
pixel_array = np.hstack((pixel_array, white)) if n != self._rgbw_buf_n:
self._rgbw_buf = np.zeros((n, 4), dtype=np.uint8)
self._rgbw_buf_n = n
self._rgbw_buf[:, :3] = pixel_array
pixel_array = self._rgbw_buf
pixel_bytes = pixel_array.tobytes() pixel_bytes = pixel_array.tobytes()

View File

@@ -333,18 +333,25 @@ class WLEDClient(LEDClient):
RuntimeError: If request fails RuntimeError: If request fails
""" """
# Validate inputs # Validate inputs
if not pixels: if isinstance(pixels, np.ndarray):
raise ValueError("Pixels list cannot be empty") if pixels.size == 0:
raise ValueError("Pixels array cannot be empty")
pixel_arr = pixels
else:
if not pixels:
raise ValueError("Pixels list cannot be empty")
pixel_arr = np.array(pixels, dtype=np.int16)
if not 0 <= brightness <= 255: if not 0 <= brightness <= 255:
raise ValueError(f"Brightness must be 0-255, got {brightness}") raise ValueError(f"Brightness must be 0-255, got {brightness}")
# Validate pixel values # Validate pixel values using vectorized bounds check
validated_pixels = [] if pixel_arr.dtype != np.uint8:
for i, (r, g, b) in enumerate(pixels): if np.any((pixel_arr < 0) | (pixel_arr > 255)):
if not (0 <= r <= 255 and 0 <= g <= 255 and 0 <= b <= 255): bad_mask = np.any((pixel_arr < 0) | (pixel_arr > 255), axis=1)
raise ValueError(f"Invalid RGB values at index {i}: ({r}, {g}, {b})") idx = int(np.argmax(bad_mask))
validated_pixels.append((int(r), int(g), int(b))) raise ValueError(f"Invalid RGB values at index {idx}: {tuple(pixel_arr[idx])}")
validated_pixels = pixel_arr.astype(np.uint8) if pixel_arr.dtype != np.uint8 else pixel_arr
# Use DDP protocol if enabled # Use DDP protocol if enabled
if self.use_ddp and self._ddp_client: if self.use_ddp and self._ddp_client:
@@ -354,33 +361,24 @@ class WLEDClient(LEDClient):
async def _send_pixels_ddp( async def _send_pixels_ddp(
self, self,
pixels: List[Tuple[int, int, int]], pixels: np.ndarray,
brightness: int = 255, brightness: int = 255,
) -> bool: ) -> bool:
"""Send pixels via DDP protocol. """Send pixels via DDP protocol.
Args: Args:
pixels: List of (R, G, B) tuples pixels: (N, 3) uint8 numpy array of RGB values
brightness: Global brightness (0-255) brightness: Global brightness (0-255)
Returns: Returns:
True if successful True if successful
""" """
try: try:
# Apply brightness to pixels
if brightness < 255: if brightness < 255:
brightness_factor = brightness / 255.0 pixels = (pixels.astype(np.uint16) * brightness >> 8).astype(np.uint8)
pixels = [
(
int(r * brightness_factor),
int(g * brightness_factor),
int(b * brightness_factor)
)
for r, g, b in pixels
]
logger.debug(f"Sending {len(pixels)} LEDs via DDP") logger.debug(f"Sending {len(pixels)} LEDs via DDP")
await self._ddp_client.send_pixels(pixels) self._ddp_client.send_pixels_numpy(pixels)
logger.debug(f"Successfully sent pixel colors via DDP") logger.debug(f"Successfully sent pixel colors via DDP")
return True return True
@@ -390,14 +388,14 @@ class WLEDClient(LEDClient):
async def _send_pixels_http( async def _send_pixels_http(
self, self,
pixels: List[Tuple[int, int, int]], pixels: np.ndarray,
brightness: int = 255, brightness: int = 255,
segment_id: int = 0, segment_id: int = 0,
) -> bool: ) -> bool:
"""Send pixels via HTTP JSON API. """Send pixels via HTTP JSON API.
Args: Args:
pixels: List of (R, G, B) tuples pixels: (N, 3) uint8 numpy array of RGB values
brightness: Global brightness (0-255) brightness: Global brightness (0-255)
segment_id: Segment ID to update segment_id: Segment ID to update
@@ -406,9 +404,8 @@ class WLEDClient(LEDClient):
""" """
try: try:
# Build indexed pixel array: [led_index, r, g, b, ...] # Build indexed pixel array: [led_index, r, g, b, ...]
indexed_pixels = [] indices = np.arange(len(pixels), dtype=np.int32).reshape(-1, 1)
for i, (r, g, b) in enumerate(pixels): indexed_pixels = np.hstack([indices, pixels.astype(np.int32)]).ravel().tolist()
indexed_pixels.extend([i, int(r), int(g), int(b)])
# Build WLED JSON state # Build WLED JSON state
payload = { payload = {

View File

@@ -3,6 +3,7 @@
import math import math
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
import cv2
import numpy as np import numpy as np
from wled_controller.core.filters.base import FilterOptionDef, PostprocessingFilter from wled_controller.core.filters.base import FilterOptionDef, PostprocessingFilter
@@ -68,11 +69,12 @@ class ColorCorrectionFilter(PostprocessingFilter):
g_mult = (tg / _REF_G) * gg g_mult = (tg / _REF_G) * gg
b_mult = (tb / _REF_B) * bg b_mult = (tb / _REF_B) * bg
# Build per-channel LUTs # Build merged (256, 1, 3) LUT for single-pass cv2.LUT
src = np.arange(256, dtype=np.float32) src = np.arange(256, dtype=np.float32)
self._lut_r = np.clip(src * r_mult, 0, 255).astype(np.uint8) lut_r = np.clip(src * r_mult, 0, 255).astype(np.uint8)
self._lut_g = np.clip(src * g_mult, 0, 255).astype(np.uint8) lut_g = np.clip(src * g_mult, 0, 255).astype(np.uint8)
self._lut_b = np.clip(src * b_mult, 0, 255).astype(np.uint8) lut_b = np.clip(src * b_mult, 0, 255).astype(np.uint8)
self._lut = np.stack([lut_r, lut_g, lut_b], axis=-1).reshape(256, 1, 3)
self._is_neutral = (temp == 6500 and rg == 1.0 and gg == 1.0 and bg == 1.0) self._is_neutral = (temp == 6500 and rg == 1.0 and gg == 1.0 and bg == 1.0)
@@ -120,7 +122,5 @@ class ColorCorrectionFilter(PostprocessingFilter):
def process_image(self, image: np.ndarray, image_pool: ImagePool) -> Optional[np.ndarray]: def process_image(self, image: np.ndarray, image_pool: ImagePool) -> Optional[np.ndarray]:
if self._is_neutral: if self._is_neutral:
return None return None
image[:, :, 0] = self._lut_r[image[:, :, 0]] cv2.LUT(image, self._lut, dst=image)
image[:, :, 1] = self._lut_g[image[:, :, 1]]
image[:, :, 2] = self._lut_b[image[:, :, 2]]
return None return None

View File

@@ -43,6 +43,10 @@ class FrameInterpolationFilter(PostprocessingFilter):
self._time_a: float = 0.0 self._time_a: float = 0.0
self._time_b: float = 0.0 self._time_b: float = 0.0
self._sig_b: Optional[bytes] = None # 64-byte signature of frame_b input self._sig_b: Optional[bytes] = None # 64-byte signature of frame_b input
# Pre-allocated uint16 scratch buffers for blending
self._u16_a: Optional[np.ndarray] = None
self._u16_b: Optional[np.ndarray] = None
self._blend_shape: Optional[tuple] = None
@classmethod @classmethod
def get_options_schema(cls) -> List[FilterOptionDef]: def get_options_schema(cls) -> List[FilterOptionDef]:
@@ -80,10 +84,20 @@ class FrameInterpolationFilter(PostprocessingFilter):
# Blend: output = (1 - alpha)*A + alpha*B (integer fast path) # Blend: output = (1 - alpha)*A + alpha*B (integer fast path)
alpha_i = int(alpha * 256) alpha_i = int(alpha * 256)
h, w, c = image.shape h, w, c = image.shape
shape = (h, w, c)
# Resize scratch buffers on shape change
if self._blend_shape != shape:
self._u16_a = np.empty(shape, dtype=np.uint16)
self._u16_b = np.empty(shape, dtype=np.uint16)
self._blend_shape = shape
out = image_pool.acquire(h, w, c) out = image_pool.acquire(h, w, c)
blended = ( np.copyto(self._u16_a, self._frame_a, casting='unsafe')
(256 - alpha_i) * self._frame_a.astype(np.uint16) np.copyto(self._u16_b, image, casting='unsafe')
+ alpha_i * image.astype(np.uint16) self._u16_a *= (256 - alpha_i)
) >> 8 self._u16_b *= alpha_i
np.copyto(out, blended, casting="unsafe") self._u16_a += self._u16_b
self._u16_a >>= 8
np.copyto(out, self._u16_a, casting='unsafe')
return out return out

View File

@@ -11,14 +11,15 @@ from wled_controller.core.filters.registry import FilterRegistry
@FilterRegistry.register @FilterRegistry.register
class SaturationFilter(PostprocessingFilter): class SaturationFilter(PostprocessingFilter):
"""Adjusts color saturation via luminance blending.""" """Adjusts color saturation via luminance blending (integer math)."""
filter_id = "saturation" filter_id = "saturation"
filter_name = "Saturation" filter_name = "Saturation"
def __init__(self, options: Dict[str, Any]): def __init__(self, options: Dict[str, Any]):
super().__init__(options) super().__init__(options)
self._float_buf: Optional[np.ndarray] = None self._i32_buf: Optional[np.ndarray] = None
self._i32_gray: Optional[np.ndarray] = None
@classmethod @classmethod
def get_options_schema(cls) -> List[FilterOptionDef]: def get_options_schema(cls) -> List[FilterOptionDef]:
@@ -39,14 +40,22 @@ class SaturationFilter(PostprocessingFilter):
if value == 1.0: if value == 1.0:
return None return None
h, w, c = image.shape h, w, c = image.shape
if self._float_buf is None or self._float_buf.shape != (h, w, c): shape3 = (h, w, c)
self._float_buf = np.empty((h, w, c), dtype=np.float32) shape1 = (h, w, 1)
arr = self._float_buf if self._i32_buf is None or self._i32_buf.shape != shape3:
np.copyto(arr, image) self._i32_buf = np.empty(shape3, dtype=np.int32)
arr *= (1.0 / 255.0) self._i32_gray = np.empty(shape1, dtype=np.int32)
lum = np.dot(arr[..., :3], [0.299, 0.587, 0.114])[..., np.newaxis] i32 = self._i32_buf
arr[..., :3] = lum + (arr[..., :3] - lum) * value gray = self._i32_gray
np.clip(arr, 0, 1.0, out=arr) sat_i = int(value * 256)
arr *= 255.0 # Rec.601 luminance: (R*299 + G*587 + B*114) / 1000
np.copyto(image, arr, casting='unsafe') np.copyto(i32, image, casting='unsafe')
gray[:, :, 0] = (i32[:, :, 0] * 299 + i32[:, :, 1] * 587 + i32[:, :, 2] * 114) // 1000
# Blend: out = ((256 - sat) * gray + sat * color) >> 8
i32 *= sat_i
gray *= (256 - sat_i)
i32 += gray
i32 >>= 8
np.clip(i32, 0, 255, out=i32)
np.copyto(image, i32, casting='unsafe')
return None return None

View File

@@ -828,18 +828,31 @@ class ProcessorManager:
for p in self._processors.values() for p in self._processors.values()
) )
def _is_device_streaming(self, device_id: str) -> bool:
"""Check if any running processor targets this device."""
for proc in self._processors.values():
if getattr(proc, 'device_id', None) == device_id and proc.is_running:
return True
return False
async def _health_check_loop(self, device_id: str): async def _health_check_loop(self, device_id: str):
"""Background loop that periodically checks a device.""" """Background loop that periodically checks a device.
Uses adaptive intervals: 10s for actively streaming devices,
60s for idle devices, to balance responsiveness with overhead.
"""
state = self._devices.get(device_id) state = self._devices.get(device_id)
if not state: if not state:
return return
check_interval = DEFAULT_STATE_CHECK_INTERVAL ACTIVE_INTERVAL = 10 # streaming devices — faster detection
IDLE_INTERVAL = 60 # idle devices — less overhead
try: try:
while self._health_monitoring_active: while self._health_monitoring_active:
await self._check_device_health(device_id) await self._check_device_health(device_id)
await asyncio.sleep(check_interval) interval = ACTIVE_INTERVAL if self._is_device_streaming(device_id) else IDLE_INTERVAL
await asyncio.sleep(interval)
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
except Exception as e: except Exception as e:

View File

@@ -67,6 +67,11 @@ class WledTargetProcessor(TargetProcessor):
self._resolved_display_index: Optional[int] = None self._resolved_display_index: Optional[int] = None
# Fit-to-device linspace cache (per-instance to avoid cross-target thrash)
self._fit_cache_key: tuple = (0, 0)
self._fit_cache_src: Optional[np.ndarray] = None
self._fit_cache_dst: Optional[np.ndarray] = None
# LED preview WebSocket clients # LED preview WebSocket clients
self._preview_clients: list = [] self._preview_clients: list = []
self._last_preview_colors: np.ndarray | None = None self._last_preview_colors: np.ndarray | None = None
@@ -461,7 +466,7 @@ class WledTargetProcessor(TargetProcessor):
self._preview_clients.append(ws) self._preview_clients.append(ws)
# Send last known frame immediately so late joiners see current state # Send last known frame immediately so late joiners see current state
if self._last_preview_colors is not None: if self._last_preview_colors is not None:
data = bytes([self._last_preview_brightness]) + self._last_preview_colors.astype(np.uint8).tobytes() data = bytes([self._last_preview_brightness]) + self._last_preview_colors.tobytes()
asyncio.ensure_future(self._send_preview_to(ws, data)) asyncio.ensure_future(self._send_preview_to(ws, data))
@staticmethod @staticmethod
@@ -484,7 +489,7 @@ class WledTargetProcessor(TargetProcessor):
if not self._preview_clients: if not self._preview_clients:
return return
data = bytes([brightness]) + colors.astype(np.uint8).tobytes() data = bytes([brightness]) + colors.tobytes()
async def _send_safe(ws): async def _send_safe(ws):
try: try:
@@ -501,16 +506,18 @@ class WledTargetProcessor(TargetProcessor):
# ----- Private: processing loop ----- # ----- Private: processing loop -----
@staticmethod def _fit_to_device(self, colors: np.ndarray, device_led_count: int) -> np.ndarray:
def _fit_to_device(colors: np.ndarray, device_led_count: int) -> np.ndarray:
"""Resample colors to match the target LED count.""" """Resample colors to match the target LED count."""
n = len(colors) n = len(colors)
if n == device_led_count or device_led_count <= 0: if n == device_led_count or device_led_count <= 0:
return colors return colors
src_x = np.linspace(0, 1, n) key = (n, device_led_count)
dst_x = np.linspace(0, 1, device_led_count) if self._fit_cache_key != key:
self._fit_cache_src = np.linspace(0, 1, n)
self._fit_cache_dst = np.linspace(0, 1, device_led_count)
self._fit_cache_key = key
result = np.column_stack([ result = np.column_stack([
np.interp(dst_x, src_x, colors[:, ch]).astype(np.uint8) np.interp(self._fit_cache_dst, self._fit_cache_src, colors[:, ch]).astype(np.uint8)
for ch in range(colors.shape[1]) for ch in range(colors.shape[1])
]) ])
return result return result
@@ -568,9 +575,9 @@ class WledTargetProcessor(TargetProcessor):
# --- Timing diagnostics --- # --- Timing diagnostics ---
_diag_interval = 5.0 _diag_interval = 5.0
_diag_next_report = time.perf_counter() + _diag_interval _diag_next_report = time.perf_counter() + _diag_interval
_diag_sleep_jitters: list = [] _diag_sleep_jitters: collections.deque = collections.deque(maxlen=300)
_diag_slow_iters: list = [] _diag_slow_iters: collections.deque = collections.deque(maxlen=50)
_diag_iter_times: list = [] _diag_iter_times: collections.deque = collections.deque(maxlen=300)
_diag_device_info: Optional[DeviceInfo] = None _diag_device_info: Optional[DeviceInfo] = None
_diag_device_info_age = 0 _diag_device_info_age = 0
@@ -817,7 +824,7 @@ class WledTargetProcessor(TargetProcessor):
iter_ms = (iter_end - loop_start) * 1000 iter_ms = (iter_end - loop_start) * 1000
_diag_iter_times.append(iter_ms) _diag_iter_times.append(iter_ms)
if iter_ms > frame_time * 1500: if iter_ms > frame_time * 1500:
if "sleep_jitter" not in [s[1] for s in _diag_slow_iters[-1:]]: if not _diag_slow_iters or _diag_slow_iters[-1][1] != "sleep_jitter":
_diag_slow_iters.append((iter_ms, "slow_iter")) _diag_slow_iters.append((iter_ms, "slow_iter"))
# Periodic diagnostics report # Periodic diagnostics report
@@ -845,7 +852,7 @@ class WledTargetProcessor(TargetProcessor):
logger.warning( logger.warning(
f"[DIAG] {self._target_id} slow iterations: " f"[DIAG] {self._target_id} slow iterations: "
f"{len(_diag_slow_iters)} in last {_diag_interval}s — " f"{len(_diag_slow_iters)} in last {_diag_interval}s — "
f"{_diag_slow_iters[:5]}" f"{list(_diag_slow_iters)[:5]}"
) )
_diag_sleep_jitters.clear() _diag_sleep_jitters.clear()
_diag_slow_iters.clear() _diag_slow_iters.clear()
@@ -855,7 +862,9 @@ class WledTargetProcessor(TargetProcessor):
logger.info(f"Processing loop cancelled for target {self._target_id}") logger.info(f"Processing loop cancelled for target {self._target_id}")
raise raise
except Exception as e: except Exception as e:
logger.error(f"Fatal error in processing loop for target {self._target_id}: {e}") logger.error(f"Fatal error in processing loop for target {self._target_id}: {e}", exc_info=True)
self._metrics.last_error = f"FATAL: {e}"
self._metrics.errors_count += 1
self._is_running = False self._is_running = False
raise raise
finally: finally:

View File

@@ -15,7 +15,7 @@ logger = get_logger(__name__)
class ProfileEngine: class ProfileEngine:
"""Evaluates profile conditions and starts/stops targets accordingly.""" """Evaluates profile conditions and starts/stops targets accordingly."""
def __init__(self, profile_store: ProfileStore, processor_manager, poll_interval: float = 3.0): def __init__(self, profile_store: ProfileStore, processor_manager, poll_interval: float = 1.0):
self._store = profile_store self._store = profile_store
self._manager = processor_manager self._manager = processor_manager
self._poll_interval = poll_interval self._poll_interval = poll_interval