Add quiet hours, fix Telegram bugs, and improve cache performance
All checks were successful
Validate / Hassfest (push) Successful in 5s

- Add quiet hours support to queue notifications during configured time windows
- Fix UnboundLocalError when single-item document chunk exceeds max_asset_data_size
- Fix document-only multi-item chunks being silently dropped (missing skip guard)
- Fix notification queue entity lookup by storing entity_id in queued params
- Fix quiet hours using OS timezone instead of HA-configured timezone (dt_util.now)
- Fix chat_action schema rejecting empty string from "Disabled" selector
- Fix stale thumbhash cache entries not being removed on mismatch
- Fix translation descriptions for send_large_photos_as_documents
- Add batch async_set_many() to TelegramFileCache to reduce disk writes
- Add max-entries eviction (2000) for thumbhash cache to prevent unbounded growth
- Eliminate redundant _is_asset_id/get_asset_thumbhash lookups in media group loop

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-09 09:45:34 +03:00
parent dd7032b411
commit 678e8a6e62
8 changed files with 437 additions and 52 deletions

View File

@@ -4,16 +4,21 @@ from __future__ import annotations
import logging import logging
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime
from homeassistant.config_entries import ConfigEntry, ConfigSubentry from homeassistant.config_entries import ConfigEntry, ConfigSubentry
from homeassistant.core import HomeAssistant from homeassistant.core import HomeAssistant
from homeassistant.helpers.event import async_track_time_change
from .const import ( from .const import (
CONF_ALBUM_ID, CONF_ALBUM_ID,
CONF_ALBUM_NAME, CONF_ALBUM_NAME,
CONF_API_KEY, CONF_API_KEY,
CONF_HUB_NAME, CONF_HUB_NAME,
CONF_IMMICH_URL, CONF_IMMICH_URL,
CONF_QUIET_HOURS_END,
CONF_QUIET_HOURS_START,
CONF_SCAN_INTERVAL, CONF_SCAN_INTERVAL,
CONF_TELEGRAM_CACHE_TTL, CONF_TELEGRAM_CACHE_TTL,
DEFAULT_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
@@ -22,7 +27,7 @@ from .const import (
PLATFORMS, PLATFORMS,
) )
from .coordinator import ImmichAlbumWatcherCoordinator from .coordinator import ImmichAlbumWatcherCoordinator
from .storage import ImmichAlbumStorage, TelegramFileCache from .storage import ImmichAlbumStorage, NotificationQueue, TelegramFileCache
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@@ -36,6 +41,8 @@ class ImmichHubData:
api_key: str api_key: str
scan_interval: int scan_interval: int
telegram_cache_ttl: int telegram_cache_ttl: int
quiet_hours_start: str
quiet_hours_end: str
@dataclass @dataclass
@@ -59,6 +66,8 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
api_key = entry.data[CONF_API_KEY] api_key = entry.data[CONF_API_KEY]
scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
telegram_cache_ttl = entry.options.get(CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL) telegram_cache_ttl = entry.options.get(CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL)
quiet_hours_start = entry.options.get(CONF_QUIET_HOURS_START, "")
quiet_hours_end = entry.options.get(CONF_QUIET_HOURS_END, "")
# Store hub data # Store hub data
entry.runtime_data = ImmichHubData( entry.runtime_data = ImmichHubData(
@@ -67,6 +76,8 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
api_key=api_key, api_key=api_key,
scan_interval=scan_interval, scan_interval=scan_interval,
telegram_cache_ttl=telegram_cache_ttl, telegram_cache_ttl=telegram_cache_ttl,
quiet_hours_start=quiet_hours_start,
quiet_hours_end=quiet_hours_end,
) )
# Create storage for persisting album state across restarts # Create storage for persisting album state across restarts
@@ -85,6 +96,10 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
) )
await telegram_asset_cache.async_load() await telegram_asset_cache.async_load()
# Create notification queue for quiet hours
notification_queue = NotificationQueue(hass, entry.entry_id)
await notification_queue.async_load()
# Store hub reference # Store hub reference
hass.data[DOMAIN][entry.entry_id] = { hass.data[DOMAIN][entry.entry_id] = {
"hub": entry.runtime_data, "hub": entry.runtime_data,
@@ -92,6 +107,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
"storage": storage, "storage": storage,
"telegram_cache": telegram_cache, "telegram_cache": telegram_cache,
"telegram_asset_cache": telegram_asset_cache, "telegram_asset_cache": telegram_asset_cache,
"notification_queue": notification_queue,
} }
# Track loaded subentries to detect changes # Track loaded subentries to detect changes
@@ -104,6 +120,13 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
# Forward platform setup once - platforms will iterate through subentries # Forward platform setup once - platforms will iterate through subentries
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
# Register quiet hours end timer
_register_quiet_hours_timer(hass, entry)
# Check if there are queued notifications from before restart (outside quiet hours)
if notification_queue.has_pending() and not _is_quiet_hours(quiet_hours_start, quiet_hours_end):
hass.async_create_task(_process_notification_queue(hass, entry))
# Register update listener for options and subentry changes # Register update listener for options and subentry changes
entry.async_on_unload(entry.add_update_listener(_async_update_listener)) entry.async_on_unload(entry.add_update_listener(_async_update_listener))
@@ -159,6 +182,118 @@ async def _async_setup_subentry_coordinator(
_LOGGER.info("Coordinator for album '%s' set up successfully", album_name) _LOGGER.info("Coordinator for album '%s' set up successfully", album_name)
def _is_quiet_hours(start_str: str, end_str: str, hass: HomeAssistant | None = None) -> bool:
"""Check if current time is within quiet hours."""
from datetime import time as dt_time
from homeassistant.util import dt as dt_util
if not start_str or not end_str:
return False
try:
now = dt_util.now().time()
start_time = dt_time.fromisoformat(start_str)
end_time = dt_time.fromisoformat(end_str)
except ValueError:
return False
if start_time <= end_time:
return start_time <= now < end_time
else:
# Crosses midnight (e.g., 22:00 - 08:00)
return now >= start_time or now < end_time
def _register_quiet_hours_timer(hass: HomeAssistant, entry: ImmichConfigEntry) -> None:
"""Register a timer to process the notification queue when quiet hours end."""
entry_data = hass.data[DOMAIN][entry.entry_id]
# Cancel existing timer if any
unsub = entry_data.pop("quiet_hours_unsub", None)
if unsub:
unsub()
end_str = entry.options.get(CONF_QUIET_HOURS_END, "")
start_str = entry.options.get(CONF_QUIET_HOURS_START, "")
if not end_str or not start_str:
return
try:
from datetime import time as dt_time
end_time = dt_time.fromisoformat(end_str)
except ValueError:
_LOGGER.warning("Invalid quiet hours end time: %s", end_str)
return
async def _on_quiet_hours_end(_now: datetime) -> None:
"""Handle quiet hours end — process queued notifications."""
queue: NotificationQueue = entry_data["notification_queue"]
if queue.has_pending():
_LOGGER.info("Quiet hours ended, processing queued notifications")
await _process_notification_queue(hass, entry)
unsub = async_track_time_change(
hass, _on_quiet_hours_end, hour=end_time.hour, minute=end_time.minute, second=0
)
entry_data["quiet_hours_unsub"] = unsub
entry.async_on_unload(unsub)
_LOGGER.debug("Registered quiet hours timer for %s", end_str)
async def _process_notification_queue(
hass: HomeAssistant, entry: ImmichConfigEntry
) -> None:
"""Process all queued notifications via the HA service call."""
import asyncio
from homeassistant.helpers import entity_registry as er
entry_data = hass.data[DOMAIN].get(entry.entry_id)
if not entry_data:
return
queue: NotificationQueue = entry_data["notification_queue"]
items = queue.get_all()
if not items:
return
# Find a fallback sensor entity for items that don't have entity_id stored
ent_reg = er.async_get(hass)
fallback_entity_id = None
for ent in er.async_entries_for_config_entry(ent_reg, entry.entry_id):
if ent.domain == "sensor":
fallback_entity_id = ent.entity_id
break
if not fallback_entity_id:
_LOGGER.warning("No sensor entity found to process notification queue")
return
_LOGGER.info("Processing %d queued notifications", len(items))
for i, item in enumerate(items):
params = item.get("params", {})
try:
# Use stored entity_id from the original call, fall back to discovered one
target_entity_id = params.pop("entity_id", None) or fallback_entity_id
# Call the service with ignore_quiet_hours=True to prevent re-queuing
await hass.services.async_call(
DOMAIN,
"send_telegram_notification",
{**params, "ignore_quiet_hours": True},
target={"entity_id": target_entity_id},
blocking=True,
)
except Exception:
_LOGGER.exception("Failed to send queued notification %d/%d", i + 1, len(items))
# Small delay between notifications to avoid rate limiting
if i < len(items) - 1:
await asyncio.sleep(1)
await queue.async_clear()
_LOGGER.info("Processed %d queued notifications", len(items))
async def _async_update_listener( async def _async_update_listener(
hass: HomeAssistant, entry: ImmichConfigEntry hass: HomeAssistant, entry: ImmichConfigEntry
) -> None: ) -> None:
@@ -177,18 +312,26 @@ async def _async_update_listener(
await hass.config_entries.async_reload(entry.entry_id) await hass.config_entries.async_reload(entry.entry_id)
return return
# Handle options-only update (scan interval change) # Handle options-only update
new_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) new_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
new_quiet_start = entry.options.get(CONF_QUIET_HOURS_START, "")
new_quiet_end = entry.options.get(CONF_QUIET_HOURS_END, "")
# Update hub data # Update hub data
entry.runtime_data.scan_interval = new_interval entry.runtime_data.scan_interval = new_interval
entry.runtime_data.quiet_hours_start = new_quiet_start
entry.runtime_data.quiet_hours_end = new_quiet_end
# Update all subentry coordinators # Update all subentry coordinators
subentries_data = entry_data["subentries"] subentries_data = entry_data["subentries"]
for subentry_data in subentries_data.values(): for subentry_data in subentries_data.values():
subentry_data.coordinator.update_scan_interval(new_interval) subentry_data.coordinator.update_scan_interval(new_interval)
_LOGGER.info("Updated scan interval to %d seconds", new_interval) # Re-register quiet hours timer
_register_quiet_hours_timer(hass, entry)
_LOGGER.info("Updated hub options (scan_interval=%d, quiet_hours=%s-%s)",
new_interval, new_quiet_start or "disabled", new_quiet_end or "disabled")
async def async_unload_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bool: async def async_unload_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bool:

View File

@@ -25,6 +25,8 @@ from .const import (
CONF_API_KEY, CONF_API_KEY,
CONF_HUB_NAME, CONF_HUB_NAME,
CONF_IMMICH_URL, CONF_IMMICH_URL,
CONF_QUIET_HOURS_END,
CONF_QUIET_HOURS_START,
CONF_SCAN_INTERVAL, CONF_SCAN_INTERVAL,
CONF_TELEGRAM_BOT_TOKEN, CONF_TELEGRAM_BOT_TOKEN,
CONF_TELEGRAM_CACHE_TTL, CONF_TELEGRAM_CACHE_TTL,
@@ -245,6 +247,26 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
) -> ConfigFlowResult: ) -> ConfigFlowResult:
"""Manage the options.""" """Manage the options."""
if user_input is not None: if user_input is not None:
errors: dict[str, str] = {}
quiet_start = user_input.get(CONF_QUIET_HOURS_START, "")
quiet_end = user_input.get(CONF_QUIET_HOURS_END, "")
import re
time_pattern = re.compile(r"^(\d{2}:\d{2})?$")
if quiet_start and not time_pattern.match(quiet_start):
errors[CONF_QUIET_HOURS_START] = "invalid_time_format"
if quiet_end and not time_pattern.match(quiet_end):
errors[CONF_QUIET_HOURS_END] = "invalid_time_format"
if (quiet_start and not quiet_end) or (quiet_end and not quiet_start):
errors["base"] = "quiet_hours_incomplete"
if errors:
return self.async_show_form(
step_id="init",
data_schema=self._build_options_schema(),
errors=errors,
)
return self.async_create_entry( return self.async_create_entry(
title="", title="",
data={ data={
@@ -257,9 +279,18 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
CONF_TELEGRAM_CACHE_TTL: user_input.get( CONF_TELEGRAM_CACHE_TTL: user_input.get(
CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL
), ),
CONF_QUIET_HOURS_START: quiet_start,
CONF_QUIET_HOURS_END: quiet_end,
}, },
) )
return self.async_show_form(
step_id="init",
data_schema=self._build_options_schema(),
)
def _build_options_schema(self) -> vol.Schema:
"""Build the options form schema."""
current_interval = self._config_entry.options.get( current_interval = self._config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
) )
@@ -269,10 +300,14 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
current_cache_ttl = self._config_entry.options.get( current_cache_ttl = self._config_entry.options.get(
CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL
) )
current_quiet_start = self._config_entry.options.get(
CONF_QUIET_HOURS_START, ""
)
current_quiet_end = self._config_entry.options.get(
CONF_QUIET_HOURS_END, ""
)
return self.async_show_form( return vol.Schema(
step_id="init",
data_schema=vol.Schema(
{ {
vol.Required( vol.Required(
CONF_SCAN_INTERVAL, default=current_interval CONF_SCAN_INTERVAL, default=current_interval
@@ -283,8 +318,13 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
vol.Optional( vol.Optional(
CONF_TELEGRAM_CACHE_TTL, default=current_cache_ttl CONF_TELEGRAM_CACHE_TTL, default=current_cache_ttl
): vol.All(vol.Coerce(int), vol.Range(min=1, max=168)), ): vol.All(vol.Coerce(int), vol.Range(min=1, max=168)),
vol.Optional(
CONF_QUIET_HOURS_START, default=current_quiet_start
): str,
vol.Optional(
CONF_QUIET_HOURS_END, default=current_quiet_end
): str,
} }
),
) )

View File

@@ -15,6 +15,8 @@ CONF_ALBUM_NAME: Final = "album_name"
CONF_SCAN_INTERVAL: Final = "scan_interval" CONF_SCAN_INTERVAL: Final = "scan_interval"
CONF_TELEGRAM_BOT_TOKEN: Final = "telegram_bot_token" CONF_TELEGRAM_BOT_TOKEN: Final = "telegram_bot_token"
CONF_TELEGRAM_CACHE_TTL: Final = "telegram_cache_ttl" CONF_TELEGRAM_CACHE_TTL: Final = "telegram_cache_ttl"
CONF_QUIET_HOURS_START: Final = "quiet_hours_start"
CONF_QUIET_HOURS_END: Final = "quiet_hours_end"
# Subentry type # Subentry type
SUBENTRY_TYPE_ALBUM: Final = "album" SUBENTRY_TYPE_ALBUM: Final = "album"

View File

@@ -41,6 +41,8 @@ from .const import (
CONF_ALBUM_ID, CONF_ALBUM_ID,
CONF_ALBUM_NAME, CONF_ALBUM_NAME,
CONF_HUB_NAME, CONF_HUB_NAME,
CONF_QUIET_HOURS_END,
CONF_QUIET_HOURS_START,
CONF_TELEGRAM_BOT_TOKEN, CONF_TELEGRAM_BOT_TOKEN,
DOMAIN, DOMAIN,
SERVICE_GET_ASSETS, SERVICE_GET_ASSETS,
@@ -48,7 +50,7 @@ from .const import (
SERVICE_SEND_TELEGRAM_NOTIFICATION, SERVICE_SEND_TELEGRAM_NOTIFICATION,
) )
from .coordinator import AlbumData, ImmichAlbumWatcherCoordinator from .coordinator import AlbumData, ImmichAlbumWatcherCoordinator
from .storage import TelegramFileCache from .storage import NotificationQueue, TelegramFileCache
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@@ -242,8 +244,9 @@ async def async_setup_entry(
), ),
vol.Optional("send_large_photos_as_documents", default=False): bool, vol.Optional("send_large_photos_as_documents", default=False): bool,
vol.Optional("chat_action", default="typing"): vol.Any( vol.Optional("chat_action", default="typing"): vol.Any(
None, vol.In(["typing", "upload_photo", "upload_video", "upload_document"]) None, vol.In(["", "typing", "upload_photo", "upload_video", "upload_document"])
), ),
vol.Optional("ignore_quiet_hours", default=False): bool,
}, },
"async_send_telegram_notification", "async_send_telegram_notification",
supports_response=SupportsResponse.OPTIONAL, supports_response=SupportsResponse.OPTIONAL,
@@ -334,6 +337,29 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
) )
return {"assets": assets} return {"assets": assets}
def _is_quiet_hours(self) -> bool:
"""Check if current time is within configured quiet hours."""
from datetime import time as dt_time
from homeassistant.util import dt as dt_util
start_str = self._entry.options.get(CONF_QUIET_HOURS_START, "")
end_str = self._entry.options.get(CONF_QUIET_HOURS_END, "")
if not start_str or not end_str:
return False
try:
now = dt_util.now().time()
start_time = dt_time.fromisoformat(start_str)
end_time = dt_time.fromisoformat(end_str)
except ValueError:
return False
if start_time <= end_time:
return start_time <= now < end_time
else:
# Crosses midnight (e.g., 22:00 - 08:00)
return now >= start_time or now < end_time
async def async_send_telegram_notification( async def async_send_telegram_notification(
self, self,
chat_id: str, chat_id: str,
@@ -349,6 +375,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
max_asset_data_size: int | None = None, max_asset_data_size: int | None = None,
send_large_photos_as_documents: bool = False, send_large_photos_as_documents: bool = False,
chat_action: str | None = "typing", chat_action: str | None = "typing",
ignore_quiet_hours: bool = False,
) -> ServiceResponse: ) -> ServiceResponse:
"""Send notification to Telegram. """Send notification to Telegram.
@@ -365,6 +392,26 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
If wait_for_response is False, the task will be executed in the background If wait_for_response is False, the task will be executed in the background
and the service will return immediately. and the service will return immediately.
""" """
# Check quiet hours — queue notification if active
if not ignore_quiet_hours and self._is_quiet_hours():
queue: NotificationQueue = self.hass.data[DOMAIN][self._entry.entry_id]["notification_queue"]
await queue.async_enqueue({
"entity_id": self.entity_id,
"chat_id": chat_id,
"assets": assets,
"bot_token": bot_token,
"caption": caption,
"reply_to_message_id": reply_to_message_id,
"disable_web_page_preview": disable_web_page_preview,
"parse_mode": parse_mode,
"max_group_size": max_group_size,
"chunk_delay": chunk_delay,
"max_asset_data_size": max_asset_data_size,
"send_large_photos_as_documents": send_large_photos_as_documents,
"chat_action": chat_action,
})
return {"success": True, "status": "queued_quiet_hours"}
# If non-blocking mode, create a background task and return immediately # If non-blocking mode, create a background task and return immediately
if not wait_for_response: if not wait_for_response:
self.hass.async_create_task( self.hass.async_create_task(
@@ -1214,6 +1261,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
# Only apply caption and reply_to to the first chunk # Only apply caption and reply_to to the first chunk
chunk_caption = caption if chunk_idx == 0 else None chunk_caption = caption if chunk_idx == 0 else None
chunk_reply_to = reply_to_message_id if chunk_idx == 0 else None chunk_reply_to = reply_to_message_id if chunk_idx == 0 else None
result = None
if media_type == "photo": if media_type == "photo":
_LOGGER.debug("Sending chunk %d/%d as single photo", chunk_idx + 1, len(chunks)) _LOGGER.debug("Sending chunk %d/%d as single photo", chunk_idx + 1, len(chunks))
@@ -1248,6 +1296,10 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
except aiohttp.ClientError as err: except aiohttp.ClientError as err:
return {"success": False, "error": f"Failed to download media: {err}", "failed_at_chunk": chunk_idx + 1} return {"success": False, "error": f"Failed to download media: {err}", "failed_at_chunk": chunk_idx + 1}
if result is None:
# Document was skipped (e.g., exceeded max_asset_data_size)
continue
if not result.get("success"): if not result.get("success"):
result["failed_at_chunk"] = chunk_idx + 1 result["failed_at_chunk"] = chunk_idx + 1
return result return result
@@ -1258,10 +1310,11 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
_LOGGER.debug("Sending chunk %d/%d as media group (%d items)", chunk_idx + 1, len(chunks), len(chunk)) _LOGGER.debug("Sending chunk %d/%d as media group (%d items)", chunk_idx + 1, len(chunks), len(chunk))
# Helper to get the appropriate cache for a cache key # Helper to get the appropriate cache for a cache key
def get_cache_for_key(key: str) -> TelegramFileCache | None: def get_cache_for_key(key: str, is_asset: bool | None = None) -> TelegramFileCache | None:
"""Return asset cache if key is a UUID, otherwise URL cache.""" """Return asset cache if key is a UUID, otherwise URL cache."""
is_asset_id = _is_asset_id(key) if is_asset is None:
return self.coordinator.telegram_asset_cache if is_asset_id else self.coordinator.telegram_cache is_asset = _is_asset_id(key)
return self.coordinator.telegram_asset_cache if is_asset else self.coordinator.telegram_cache
# Collect media items - either from cache (file_id) or by downloading # Collect media items - either from cache (file_id) or by downloading
# Each item: (type, media_ref, filename, cache_key, is_cached, content_type) # Each item: (type, media_ref, filename, cache_key, is_cached, content_type)
@@ -1322,8 +1375,9 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
continue continue
# Check cache first for photos/videos # Check cache first for photos/videos
item_cache = get_cache_for_key(item_cache_key) is_asset = _is_asset_id(item_cache_key)
item_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if _is_asset_id(item_cache_key) else None item_cache = get_cache_for_key(item_cache_key, is_asset)
item_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if is_asset else None
cached = item_cache.get(item_cache_key, thumbhash=item_thumbhash) if item_cache else None cached = item_cache.get(item_cache_key, thumbhash=item_thumbhash) if item_cache else None
if cached and cached.get("file_id"): if cached and cached.get("file_id"):
# Use cached file_id # Use cached file_id
@@ -1393,7 +1447,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
} }
# Skip this chunk if all files were filtered out # Skip this chunk if all files were filtered out
if not media_items and not oversized_photos: if not media_items and not oversized_photos and not documents_to_send:
_LOGGER.info("Chunk %d/%d: all %d media items skipped", _LOGGER.info("Chunk %d/%d: all %d media items skipped",
chunk_idx + 1, len(chunks), len(chunk)) chunk_idx + 1, len(chunks), len(chunk))
continue continue
@@ -1467,9 +1521,10 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
if sub_caption: if sub_caption:
first_caption_used = True first_caption_used = True
# Cache the uploaded file_id # Cache the uploaded file_id
sg_cache = get_cache_for_key(sg_ck) sg_is_asset = _is_asset_id(sg_ck)
sg_cache = get_cache_for_key(sg_ck, sg_is_asset)
if sg_cache: if sg_cache:
sg_thumbhash = self.coordinator.get_asset_thumbhash(sg_ck) if _is_asset_id(sg_ck) else None sg_thumbhash = self.coordinator.get_asset_thumbhash(sg_ck) if sg_is_asset else None
result_data = result.get("result", {}) result_data = result.get("result", {})
if sg_type == "photo": if sg_type == "photo":
photos = result_data.get("photo", []) photos = result_data.get("photo", [])
@@ -1551,7 +1606,8 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
# Build media JSON - use file_id for cached, attach:// for uploaded # Build media JSON - use file_id for cached, attach:// for uploaded
media_json = [] media_json = []
upload_idx = 0 upload_idx = 0
keys_to_cache: list[tuple[str, int, str]] = [] # (cache_key, result_idx, type) # (cache_key, result_idx, type, is_asset, thumbhash)
keys_to_cache: list[tuple[str, int, str, bool, str | None]] = []
for i, (media_type, media_ref, filename, item_cache_key, is_cached, item_content_type) in enumerate(sub_group_items): for i, (media_type, media_ref, filename, item_cache_key, is_cached, item_content_type) in enumerate(sub_group_items):
if is_cached: if is_cached:
@@ -1570,7 +1626,9 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
# Use provided content_type or default based on media type # Use provided content_type or default based on media type
content_type = item_content_type or ("image/jpeg" if media_type == "photo" else "video/mp4") content_type = item_content_type or ("image/jpeg" if media_type == "photo" else "video/mp4")
form.add_field(attach_name, media_ref, filename=filename, content_type=content_type) form.add_field(attach_name, media_ref, filename=filename, content_type=content_type)
keys_to_cache.append((item_cache_key, i, media_type)) ck_is_asset = _is_asset_id(item_cache_key)
ck_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if ck_is_asset else None
keys_to_cache.append((item_cache_key, i, media_type, ck_is_asset, ck_thumbhash))
upload_idx += 1 upload_idx += 1
if i == 0 and sub_caption and not first_caption_used: if i == 0 and sub_caption and not first_caption_used:
@@ -1597,22 +1655,31 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
if sub_caption: if sub_caption:
first_caption_used = True first_caption_used = True
# Cache the newly uploaded file_ids # Cache the newly uploaded file_ids (batched per cache instance)
if keys_to_cache: if keys_to_cache:
result_messages = result.get("result", []) result_messages = result.get("result", [])
for ck, result_idx, m_type in keys_to_cache: # Group entries by cache instance for batch writes
ck_cache = get_cache_for_key(ck) cache_batches: dict[int, tuple[TelegramFileCache, list[tuple[str, str, str, str | None]]]] = {}
ck_thumbhash = self.coordinator.get_asset_thumbhash(ck) if _is_asset_id(ck) else None for ck, result_idx, m_type, ck_is_asset, ck_thumbhash in keys_to_cache:
if result_idx < len(result_messages) and ck_cache: ck_cache = get_cache_for_key(ck, ck_is_asset)
if result_idx >= len(result_messages) or not ck_cache:
continue
msg = result_messages[result_idx] msg = result_messages[result_idx]
file_id = None
if m_type == "photo": if m_type == "photo":
photos = msg.get("photo", []) photos = msg.get("photo", [])
if photos: if photos:
await ck_cache.async_set(ck, photos[-1].get("file_id"), "photo", thumbhash=ck_thumbhash) file_id = photos[-1].get("file_id")
elif m_type == "video": elif m_type == "video":
video = msg.get("video", {}) video = msg.get("video", {})
if video.get("file_id"): file_id = video.get("file_id")
await ck_cache.async_set(ck, video["file_id"], "video", thumbhash=ck_thumbhash) if file_id:
cache_id = id(ck_cache)
if cache_id not in cache_batches:
cache_batches[cache_id] = (ck_cache, [])
cache_batches[cache_id][1].append((ck, file_id, m_type, ck_thumbhash))
for ck_cache, batch_entries in cache_batches.values():
await ck_cache.async_set_many(batch_entries)
else: else:
# Log detailed error for media group with total size info # Log detailed error for media group with total size info
uploaded_data = [m for m in sub_group_items if not m[4]] uploaded_data = [m for m in sub_group_items if not m[4]]

View File

@@ -256,3 +256,10 @@ send_telegram_notification:
value: "upload_document" value: "upload_document"
- label: "Disabled" - label: "Disabled"
value: "" value: ""
ignore_quiet_hours:
name: Ignore Quiet Hours
description: Send notification immediately even during quiet hours. By default, notifications are queued during quiet hours and sent when they end.
required: false
default: false
selector:
boolean:

View File

@@ -115,9 +115,26 @@ class TelegramFileCache:
mode, mode,
) )
# Maximum number of entries to keep in thumbhash mode to prevent unbounded growth
THUMBHASH_MAX_ENTRIES = 2000
async def _cleanup_expired(self) -> None: async def _cleanup_expired(self) -> None:
"""Remove expired cache entries (TTL mode only).""" """Remove expired cache entries (TTL mode) or trim old entries (thumbhash mode)."""
if self._use_thumbhash: if self._use_thumbhash:
files = self._data.get("files", {}) if self._data else {}
if len(files) > self.THUMBHASH_MAX_ENTRIES:
sorted_keys = sorted(
files, key=lambda k: files[k].get("cached_at", "")
)
keys_to_remove = sorted_keys[: len(files) - self.THUMBHASH_MAX_ENTRIES]
for key in keys_to_remove:
del files[key]
await self._store.async_save(self._data)
_LOGGER.debug(
"Trimmed thumbhash cache from %d to %d entries",
len(keys_to_remove) + self.THUMBHASH_MAX_ENTRIES,
self.THUMBHASH_MAX_ENTRIES,
)
return return
if not self._data or "files" not in self._data: if not self._data or "files" not in self._data:
@@ -164,9 +181,10 @@ class TelegramFileCache:
stored_thumbhash = entry.get("thumbhash") stored_thumbhash = entry.get("thumbhash")
if stored_thumbhash and stored_thumbhash != thumbhash: if stored_thumbhash and stored_thumbhash != thumbhash:
_LOGGER.debug( _LOGGER.debug(
"Cache miss for %s: thumbhash changed", "Cache miss for %s: thumbhash changed, removing stale entry",
key[:36], key[:36],
) )
del self._data["files"][key]
return None return None
# If no thumbhash provided (asset not in monitored album), # If no thumbhash provided (asset not in monitored album),
# return cached entry anyway — self-heals on Telegram rejection # return cached entry anyway — self-heals on Telegram rejection
@@ -210,7 +228,91 @@ class TelegramFileCache:
await self._store.async_save(self._data) await self._store.async_save(self._data)
_LOGGER.debug("Cached Telegram file_id for key (type: %s)", media_type) _LOGGER.debug("Cached Telegram file_id for key (type: %s)", media_type)
async def async_set_many(
self, entries: list[tuple[str, str, str, str | None]]
) -> None:
"""Store multiple file_ids in a single disk write.
Args:
entries: List of (key, file_id, media_type, thumbhash) tuples
"""
if not entries:
return
if self._data is None:
self._data = {"files": {}}
now_iso = datetime.now(timezone.utc).isoformat()
for key, file_id, media_type, thumbhash in entries:
entry_data: dict[str, Any] = {
"file_id": file_id,
"type": media_type,
"cached_at": now_iso,
}
if thumbhash is not None:
entry_data["thumbhash"] = thumbhash
self._data["files"][key] = entry_data
await self._store.async_save(self._data)
_LOGGER.debug("Batch cached %d Telegram file_ids", len(entries))
async def async_remove(self) -> None: async def async_remove(self) -> None:
"""Remove all cache data.""" """Remove all cache data."""
await self._store.async_remove() await self._store.async_remove()
self._data = None self._data = None
class NotificationQueue:
"""Persistent queue for notifications deferred during quiet hours.
Stores full service call parameters so notifications can be replayed
exactly as they were originally called.
"""
def __init__(self, hass: HomeAssistant, entry_id: str) -> None:
"""Initialize the notification queue."""
self._store: Store[dict[str, Any]] = Store(
hass, STORAGE_VERSION, f"{STORAGE_KEY_PREFIX}.notification_queue.{entry_id}"
)
self._data: dict[str, Any] | None = None
async def async_load(self) -> None:
"""Load queue data from storage."""
self._data = await self._store.async_load() or {"queue": []}
_LOGGER.debug(
"Loaded notification queue with %d items",
len(self._data.get("queue", [])),
)
async def async_enqueue(self, notification_params: dict[str, Any]) -> None:
"""Add a notification to the queue."""
if self._data is None:
self._data = {"queue": []}
self._data["queue"].append({
"params": notification_params,
"queued_at": datetime.now(timezone.utc).isoformat(),
})
await self._store.async_save(self._data)
_LOGGER.debug("Queued notification during quiet hours (total: %d)", len(self._data["queue"]))
def get_all(self) -> list[dict[str, Any]]:
"""Get all queued notifications."""
if not self._data:
return []
return list(self._data.get("queue", []))
def has_pending(self) -> bool:
"""Check if there are pending notifications."""
return bool(self._data and self._data.get("queue"))
async def async_clear(self) -> None:
"""Clear all queued notifications."""
if self._data:
self._data["queue"] = []
await self._store.async_save(self._data)
async def async_remove(self) -> None:
"""Remove all queue data."""
await self._store.async_remove()
self._data = None

View File

@@ -113,6 +113,10 @@
} }
}, },
"options": { "options": {
"error": {
"invalid_time_format": "Invalid time format. Use HH:MM (e.g. 22:00)",
"quiet_hours_incomplete": "Both start and end times must be set for quiet hours"
},
"step": { "step": {
"init": { "init": {
"title": "Immich Album Watcher Options", "title": "Immich Album Watcher Options",
@@ -120,12 +124,16 @@
"data": { "data": {
"scan_interval": "Scan interval (seconds)", "scan_interval": "Scan interval (seconds)",
"telegram_bot_token": "Telegram Bot Token", "telegram_bot_token": "Telegram Bot Token",
"telegram_cache_ttl": "Telegram Cache TTL (hours)" "telegram_cache_ttl": "Telegram Cache TTL (hours)",
"quiet_hours_start": "Quiet Hours Start",
"quiet_hours_end": "Quiet Hours End"
}, },
"data_description": { "data_description": {
"scan_interval": "How often to check for album changes (10-3600 seconds)", "scan_interval": "How often to check for album changes (10-3600 seconds)",
"telegram_bot_token": "Bot token for sending notifications to Telegram", "telegram_bot_token": "Bot token for sending notifications to Telegram",
"telegram_cache_ttl": "How long to cache uploaded file IDs to avoid re-uploading (1-168 hours, default: 48)" "telegram_cache_ttl": "How long to cache uploaded file IDs to avoid re-uploading (1-168 hours, default: 48)",
"quiet_hours_start": "Start time for quiet hours (HH:MM format, e.g. 22:00). Notifications will be queued during this period. Leave empty to disable.",
"quiet_hours_end": "End time for quiet hours (HH:MM format, e.g. 08:00). Queued notifications will be sent at this time."
} }
} }
} }
@@ -243,11 +251,15 @@
}, },
"send_large_photos_as_documents": { "send_large_photos_as_documents": {
"name": "Send Large Photos As Documents", "name": "Send Large Photos As Documents",
"description": "How to handle photos exceeding Telegram's limits (10MB or 10000px dimension sum). If true, send as documents. If false, downsize to fit limits." "description": "How to handle photos exceeding Telegram's limits (10MB or 10000px dimension sum). If true, send as documents. If false, skip oversized photos."
}, },
"chat_action": { "chat_action": {
"name": "Chat Action", "name": "Chat Action",
"description": "Chat action to display while processing (typing, upload_photo, upload_video, upload_document). Set to empty to disable." "description": "Chat action to display while processing (typing, upload_photo, upload_video, upload_document). Set to empty to disable."
},
"ignore_quiet_hours": {
"name": "Ignore Quiet Hours",
"description": "Send notification immediately even during quiet hours. By default, notifications are queued during quiet hours and sent when they end."
} }
} }
} }

View File

@@ -113,6 +113,10 @@
} }
}, },
"options": { "options": {
"error": {
"invalid_time_format": "Неверный формат времени. Используйте ЧЧ:ММ (например 22:00)",
"quiet_hours_incomplete": "Необходимо указать и начало, и конец тихих часов"
},
"step": { "step": {
"init": { "init": {
"title": "Настройки Immich Album Watcher", "title": "Настройки Immich Album Watcher",
@@ -120,12 +124,16 @@
"data": { "data": {
"scan_interval": "Интервал сканирования (секунды)", "scan_interval": "Интервал сканирования (секунды)",
"telegram_bot_token": "Токен Telegram бота", "telegram_bot_token": "Токен Telegram бота",
"telegram_cache_ttl": "Время жизни кэша Telegram (часы)" "telegram_cache_ttl": "Время жизни кэша Telegram (часы)",
"quiet_hours_start": "Начало тихих часов",
"quiet_hours_end": "Конец тихих часов"
}, },
"data_description": { "data_description": {
"scan_interval": "Как часто проверять изменения в альбомах (10-3600 секунд)", "scan_interval": "Как часто проверять изменения в альбомах (10-3600 секунд)",
"telegram_bot_token": "Токен бота для отправки уведомлений в Telegram", "telegram_bot_token": "Токен бота для отправки уведомлений в Telegram",
"telegram_cache_ttl": "Сколько хранить ID загруженных файлов для повторной отправки без загрузки (1-168 часов, по умолчанию: 48)" "telegram_cache_ttl": "Сколько хранить ID загруженных файлов для повторной отправки без загрузки (1-168 часов, по умолчанию: 48)",
"quiet_hours_start": "Время начала тихих часов (формат ЧЧ:ММ, например 22:00). Уведомления будут поставлены в очередь. Оставьте пустым для отключения.",
"quiet_hours_end": "Время окончания тихих часов (формат ЧЧ:ММ, например 08:00). Уведомления из очереди будут отправлены в это время."
} }
} }
} }
@@ -243,11 +251,15 @@
}, },
"send_large_photos_as_documents": { "send_large_photos_as_documents": {
"name": "Большие фото как документы", "name": "Большие фото как документы",
"description": "Как обрабатывать фото, превышающие лимиты Telegram (10МБ или сумма размеров 10000пкс). Если true, отправлять как документы. Если false, уменьшать для соответствия лимитам." "description": "Как обрабатывать фото, превышающие лимиты Telegram (10МБ или сумма размеров 10000пкс). Если true, отправлять как документы. Если false, пропускать."
}, },
"chat_action": { "chat_action": {
"name": "Действие в чате", "name": "Действие в чате",
"description": "Действие для отображения во время обработки (typing, upload_photo, upload_video, upload_document). Оставьте пустым для отключения." "description": "Действие для отображения во время обработки (typing, upload_photo, upload_video, upload_document). Оставьте пустым для отключения."
},
"ignore_quiet_hours": {
"name": "Игнорировать тихие часы",
"description": "Отправить уведомление немедленно, даже во время тихих часов. По умолчанию уведомления ставятся в очередь и отправляются по окончании тихих часов."
} }
} }
} }