Add quiet hours, fix Telegram bugs, and improve cache performance
All checks were successful
Validate / Hassfest (push) Successful in 5s
All checks were successful
Validate / Hassfest (push) Successful in 5s
- Add quiet hours support to queue notifications during configured time windows - Fix UnboundLocalError when single-item document chunk exceeds max_asset_data_size - Fix document-only multi-item chunks being silently dropped (missing skip guard) - Fix notification queue entity lookup by storing entity_id in queued params - Fix quiet hours using OS timezone instead of HA-configured timezone (dt_util.now) - Fix chat_action schema rejecting empty string from "Disabled" selector - Fix stale thumbhash cache entries not being removed on mismatch - Fix translation descriptions for send_large_photos_as_documents - Add batch async_set_many() to TelegramFileCache to reduce disk writes - Add max-entries eviction (2000) for thumbhash cache to prevent unbounded growth - Eliminate redundant _is_asset_id/get_asset_thumbhash lookups in media group loop Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -41,6 +41,8 @@ from .const import (
|
||||
CONF_ALBUM_ID,
|
||||
CONF_ALBUM_NAME,
|
||||
CONF_HUB_NAME,
|
||||
CONF_QUIET_HOURS_END,
|
||||
CONF_QUIET_HOURS_START,
|
||||
CONF_TELEGRAM_BOT_TOKEN,
|
||||
DOMAIN,
|
||||
SERVICE_GET_ASSETS,
|
||||
@@ -48,7 +50,7 @@ from .const import (
|
||||
SERVICE_SEND_TELEGRAM_NOTIFICATION,
|
||||
)
|
||||
from .coordinator import AlbumData, ImmichAlbumWatcherCoordinator
|
||||
from .storage import TelegramFileCache
|
||||
from .storage import NotificationQueue, TelegramFileCache
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@@ -242,8 +244,9 @@ async def async_setup_entry(
|
||||
),
|
||||
vol.Optional("send_large_photos_as_documents", default=False): bool,
|
||||
vol.Optional("chat_action", default="typing"): vol.Any(
|
||||
None, vol.In(["typing", "upload_photo", "upload_video", "upload_document"])
|
||||
None, vol.In(["", "typing", "upload_photo", "upload_video", "upload_document"])
|
||||
),
|
||||
vol.Optional("ignore_quiet_hours", default=False): bool,
|
||||
},
|
||||
"async_send_telegram_notification",
|
||||
supports_response=SupportsResponse.OPTIONAL,
|
||||
@@ -334,6 +337,29 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
)
|
||||
return {"assets": assets}
|
||||
|
||||
def _is_quiet_hours(self) -> bool:
|
||||
"""Check if current time is within configured quiet hours."""
|
||||
from datetime import time as dt_time
|
||||
from homeassistant.util import dt as dt_util
|
||||
|
||||
start_str = self._entry.options.get(CONF_QUIET_HOURS_START, "")
|
||||
end_str = self._entry.options.get(CONF_QUIET_HOURS_END, "")
|
||||
if not start_str or not end_str:
|
||||
return False
|
||||
|
||||
try:
|
||||
now = dt_util.now().time()
|
||||
start_time = dt_time.fromisoformat(start_str)
|
||||
end_time = dt_time.fromisoformat(end_str)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
if start_time <= end_time:
|
||||
return start_time <= now < end_time
|
||||
else:
|
||||
# Crosses midnight (e.g., 22:00 - 08:00)
|
||||
return now >= start_time or now < end_time
|
||||
|
||||
async def async_send_telegram_notification(
|
||||
self,
|
||||
chat_id: str,
|
||||
@@ -349,6 +375,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
max_asset_data_size: int | None = None,
|
||||
send_large_photos_as_documents: bool = False,
|
||||
chat_action: str | None = "typing",
|
||||
ignore_quiet_hours: bool = False,
|
||||
) -> ServiceResponse:
|
||||
"""Send notification to Telegram.
|
||||
|
||||
@@ -365,6 +392,26 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
If wait_for_response is False, the task will be executed in the background
|
||||
and the service will return immediately.
|
||||
"""
|
||||
# Check quiet hours — queue notification if active
|
||||
if not ignore_quiet_hours and self._is_quiet_hours():
|
||||
queue: NotificationQueue = self.hass.data[DOMAIN][self._entry.entry_id]["notification_queue"]
|
||||
await queue.async_enqueue({
|
||||
"entity_id": self.entity_id,
|
||||
"chat_id": chat_id,
|
||||
"assets": assets,
|
||||
"bot_token": bot_token,
|
||||
"caption": caption,
|
||||
"reply_to_message_id": reply_to_message_id,
|
||||
"disable_web_page_preview": disable_web_page_preview,
|
||||
"parse_mode": parse_mode,
|
||||
"max_group_size": max_group_size,
|
||||
"chunk_delay": chunk_delay,
|
||||
"max_asset_data_size": max_asset_data_size,
|
||||
"send_large_photos_as_documents": send_large_photos_as_documents,
|
||||
"chat_action": chat_action,
|
||||
})
|
||||
return {"success": True, "status": "queued_quiet_hours"}
|
||||
|
||||
# If non-blocking mode, create a background task and return immediately
|
||||
if not wait_for_response:
|
||||
self.hass.async_create_task(
|
||||
@@ -1214,6 +1261,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
# Only apply caption and reply_to to the first chunk
|
||||
chunk_caption = caption if chunk_idx == 0 else None
|
||||
chunk_reply_to = reply_to_message_id if chunk_idx == 0 else None
|
||||
result = None
|
||||
|
||||
if media_type == "photo":
|
||||
_LOGGER.debug("Sending chunk %d/%d as single photo", chunk_idx + 1, len(chunks))
|
||||
@@ -1248,6 +1296,10 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
except aiohttp.ClientError as err:
|
||||
return {"success": False, "error": f"Failed to download media: {err}", "failed_at_chunk": chunk_idx + 1}
|
||||
|
||||
if result is None:
|
||||
# Document was skipped (e.g., exceeded max_asset_data_size)
|
||||
continue
|
||||
|
||||
if not result.get("success"):
|
||||
result["failed_at_chunk"] = chunk_idx + 1
|
||||
return result
|
||||
@@ -1258,10 +1310,11 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
_LOGGER.debug("Sending chunk %d/%d as media group (%d items)", chunk_idx + 1, len(chunks), len(chunk))
|
||||
|
||||
# Helper to get the appropriate cache for a cache key
|
||||
def get_cache_for_key(key: str) -> TelegramFileCache | None:
|
||||
def get_cache_for_key(key: str, is_asset: bool | None = None) -> TelegramFileCache | None:
|
||||
"""Return asset cache if key is a UUID, otherwise URL cache."""
|
||||
is_asset_id = _is_asset_id(key)
|
||||
return self.coordinator.telegram_asset_cache if is_asset_id else self.coordinator.telegram_cache
|
||||
if is_asset is None:
|
||||
is_asset = _is_asset_id(key)
|
||||
return self.coordinator.telegram_asset_cache if is_asset else self.coordinator.telegram_cache
|
||||
|
||||
# Collect media items - either from cache (file_id) or by downloading
|
||||
# Each item: (type, media_ref, filename, cache_key, is_cached, content_type)
|
||||
@@ -1322,8 +1375,9 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
continue
|
||||
|
||||
# Check cache first for photos/videos
|
||||
item_cache = get_cache_for_key(item_cache_key)
|
||||
item_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if _is_asset_id(item_cache_key) else None
|
||||
is_asset = _is_asset_id(item_cache_key)
|
||||
item_cache = get_cache_for_key(item_cache_key, is_asset)
|
||||
item_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if is_asset else None
|
||||
cached = item_cache.get(item_cache_key, thumbhash=item_thumbhash) if item_cache else None
|
||||
if cached and cached.get("file_id"):
|
||||
# Use cached file_id
|
||||
@@ -1393,7 +1447,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
}
|
||||
|
||||
# Skip this chunk if all files were filtered out
|
||||
if not media_items and not oversized_photos:
|
||||
if not media_items and not oversized_photos and not documents_to_send:
|
||||
_LOGGER.info("Chunk %d/%d: all %d media items skipped",
|
||||
chunk_idx + 1, len(chunks), len(chunk))
|
||||
continue
|
||||
@@ -1467,9 +1521,10 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
if sub_caption:
|
||||
first_caption_used = True
|
||||
# Cache the uploaded file_id
|
||||
sg_cache = get_cache_for_key(sg_ck)
|
||||
sg_is_asset = _is_asset_id(sg_ck)
|
||||
sg_cache = get_cache_for_key(sg_ck, sg_is_asset)
|
||||
if sg_cache:
|
||||
sg_thumbhash = self.coordinator.get_asset_thumbhash(sg_ck) if _is_asset_id(sg_ck) else None
|
||||
sg_thumbhash = self.coordinator.get_asset_thumbhash(sg_ck) if sg_is_asset else None
|
||||
result_data = result.get("result", {})
|
||||
if sg_type == "photo":
|
||||
photos = result_data.get("photo", [])
|
||||
@@ -1551,7 +1606,8 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
# Build media JSON - use file_id for cached, attach:// for uploaded
|
||||
media_json = []
|
||||
upload_idx = 0
|
||||
keys_to_cache: list[tuple[str, int, str]] = [] # (cache_key, result_idx, type)
|
||||
# (cache_key, result_idx, type, is_asset, thumbhash)
|
||||
keys_to_cache: list[tuple[str, int, str, bool, str | None]] = []
|
||||
|
||||
for i, (media_type, media_ref, filename, item_cache_key, is_cached, item_content_type) in enumerate(sub_group_items):
|
||||
if is_cached:
|
||||
@@ -1570,7 +1626,9 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
# Use provided content_type or default based on media type
|
||||
content_type = item_content_type or ("image/jpeg" if media_type == "photo" else "video/mp4")
|
||||
form.add_field(attach_name, media_ref, filename=filename, content_type=content_type)
|
||||
keys_to_cache.append((item_cache_key, i, media_type))
|
||||
ck_is_asset = _is_asset_id(item_cache_key)
|
||||
ck_thumbhash = self.coordinator.get_asset_thumbhash(item_cache_key) if ck_is_asset else None
|
||||
keys_to_cache.append((item_cache_key, i, media_type, ck_is_asset, ck_thumbhash))
|
||||
upload_idx += 1
|
||||
|
||||
if i == 0 and sub_caption and not first_caption_used:
|
||||
@@ -1597,22 +1655,31 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
|
||||
if sub_caption:
|
||||
first_caption_used = True
|
||||
|
||||
# Cache the newly uploaded file_ids
|
||||
# Cache the newly uploaded file_ids (batched per cache instance)
|
||||
if keys_to_cache:
|
||||
result_messages = result.get("result", [])
|
||||
for ck, result_idx, m_type in keys_to_cache:
|
||||
ck_cache = get_cache_for_key(ck)
|
||||
ck_thumbhash = self.coordinator.get_asset_thumbhash(ck) if _is_asset_id(ck) else None
|
||||
if result_idx < len(result_messages) and ck_cache:
|
||||
msg = result_messages[result_idx]
|
||||
if m_type == "photo":
|
||||
photos = msg.get("photo", [])
|
||||
if photos:
|
||||
await ck_cache.async_set(ck, photos[-1].get("file_id"), "photo", thumbhash=ck_thumbhash)
|
||||
elif m_type == "video":
|
||||
video = msg.get("video", {})
|
||||
if video.get("file_id"):
|
||||
await ck_cache.async_set(ck, video["file_id"], "video", thumbhash=ck_thumbhash)
|
||||
# Group entries by cache instance for batch writes
|
||||
cache_batches: dict[int, tuple[TelegramFileCache, list[tuple[str, str, str, str | None]]]] = {}
|
||||
for ck, result_idx, m_type, ck_is_asset, ck_thumbhash in keys_to_cache:
|
||||
ck_cache = get_cache_for_key(ck, ck_is_asset)
|
||||
if result_idx >= len(result_messages) or not ck_cache:
|
||||
continue
|
||||
msg = result_messages[result_idx]
|
||||
file_id = None
|
||||
if m_type == "photo":
|
||||
photos = msg.get("photo", [])
|
||||
if photos:
|
||||
file_id = photos[-1].get("file_id")
|
||||
elif m_type == "video":
|
||||
video = msg.get("video", {})
|
||||
file_id = video.get("file_id")
|
||||
if file_id:
|
||||
cache_id = id(ck_cache)
|
||||
if cache_id not in cache_batches:
|
||||
cache_batches[cache_id] = (ck_cache, [])
|
||||
cache_batches[cache_id][1].append((ck, file_id, m_type, ck_thumbhash))
|
||||
for ck_cache, batch_entries in cache_batches.values():
|
||||
await ck_cache.async_set_many(batch_entries)
|
||||
else:
|
||||
# Log detailed error for media group with total size info
|
||||
uploaded_data = [m for m in sub_group_items if not m[4]]
|
||||
|
||||
Reference in New Issue
Block a user