Add Telegram file ID caching and reverse geocoding fields
All checks were successful
Validate / Hassfest (push) Successful in 3s

Implement caching for Telegram file_ids to avoid re-uploading the same media.
Cached IDs are reused for subsequent sends, improving performance significantly.
Added configurable cache TTL option (1-168 hours, default 48).

Also added city, state, and country fields from Immich reverse geocoding
to asset data in events and get_assets service.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-01 03:12:05 +03:00
parent 011f105823
commit c29fc2fbcf
11 changed files with 478 additions and 87 deletions

View File

@@ -3,6 +3,7 @@
## Version Management ## Version Management
Update the integration version in `custom_components/immich_album_watcher/manifest.json` only when changes are made to the **integration content** (files inside `custom_components/immich_album_watcher/`). Update the integration version in `custom_components/immich_album_watcher/manifest.json` only when changes are made to the **integration content** (files inside `custom_components/immich_album_watcher/`).
**IMPORTANT** ALWAYS ask for version bump before doing it.
Do NOT bump version for: Do NOT bump version for:

View File

@@ -77,6 +77,7 @@ A Home Assistant custom integration that monitors [Immich](https://immich.app/)
| Albums | Albums to monitor | Required | | Albums | Albums to monitor | Required |
| Scan Interval | How often to check for changes (seconds) | 60 | | Scan Interval | How often to check for changes (seconds) | 60 |
| Telegram Bot Token | Bot token for sending media to Telegram (optional) | - | | Telegram Bot Token | Bot token for sending media to Telegram (optional) | - |
| Telegram Cache TTL | How long to cache uploaded file IDs (hours, 1-168) | 48 |
## Entities Created (per album) ## Entities Created (per album)
@@ -241,6 +242,8 @@ Send notifications to Telegram. Supports multiple formats:
The service downloads media from Immich and uploads it to Telegram, bypassing any CORS restrictions. Large lists of media are automatically split into multiple media groups based on the `max_group_size` parameter (default: 10 items per group). The service downloads media from Immich and uploads it to Telegram, bypassing any CORS restrictions. Large lists of media are automatically split into multiple media groups based on the `max_group_size` parameter (default: 10 items per group).
**File ID Caching:** When media is uploaded to Telegram, the service caches the returned `file_id`. Subsequent sends of the same media will use the cached `file_id` instead of re-uploading, significantly improving performance. The cache TTL is configurable in hub options (default: 48 hours, range: 1-168 hours). The cache is persistent across Home Assistant restarts and is stored per album.
**Examples:** **Examples:**
Text message: Text message:
@@ -420,6 +423,9 @@ Each item in the `added_assets` list contains the following fields:
| `rating` | User rating of the asset (1-5 stars, or `null` if not rated) | | `rating` | User rating of the asset (1-5 stars, or `null` if not rated) |
| `latitude` | GPS latitude coordinate (or `null` if no geolocation) | | `latitude` | GPS latitude coordinate (or `null` if no geolocation) |
| `longitude` | GPS longitude coordinate (or `null` if no geolocation) | | `longitude` | GPS longitude coordinate (or `null` if no geolocation) |
| `city` | City name from reverse geocoding (or `null` if unavailable) |
| `state` | State/region name from reverse geocoding (or `null` if unavailable) |
| `country` | Country name from reverse geocoding (or `null` if unavailable) |
| `url` | Public URL to view the asset (only present if album has a shared link) | | `url` | Public URL to view the asset (only present if album has a shared link) |
| `download_url` | Direct download URL for the original file (if shared link exists) | | `download_url` | Direct download URL for the original file (if shared link exists) |
| `playback_url` | Video playback URL (for VIDEO assets only, if shared link exists) | | `playback_url` | Video playback URL (for VIDEO assets only, if shared link exists) |

View File

@@ -15,12 +15,14 @@ from .const import (
CONF_HUB_NAME, CONF_HUB_NAME,
CONF_IMMICH_URL, CONF_IMMICH_URL,
CONF_SCAN_INTERVAL, CONF_SCAN_INTERVAL,
CONF_TELEGRAM_CACHE_TTL,
DEFAULT_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
DEFAULT_TELEGRAM_CACHE_TTL,
DOMAIN, DOMAIN,
PLATFORMS, PLATFORMS,
) )
from .coordinator import ImmichAlbumWatcherCoordinator from .coordinator import ImmichAlbumWatcherCoordinator
from .storage import ImmichAlbumStorage from .storage import ImmichAlbumStorage, TelegramFileCache
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@@ -33,6 +35,7 @@ class ImmichHubData:
url: str url: str
api_key: str api_key: str
scan_interval: int scan_interval: int
telegram_cache_ttl: int
@dataclass @dataclass
@@ -55,6 +58,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
url = entry.data[CONF_IMMICH_URL] url = entry.data[CONF_IMMICH_URL]
api_key = entry.data[CONF_API_KEY] api_key = entry.data[CONF_API_KEY]
scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
telegram_cache_ttl = entry.options.get(CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL)
# Store hub data # Store hub data
entry.runtime_data = ImmichHubData( entry.runtime_data = ImmichHubData(
@@ -62,6 +66,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ImmichConfigEntry) -> bo
url=url, url=url,
api_key=api_key, api_key=api_key,
scan_interval=scan_interval, scan_interval=scan_interval,
telegram_cache_ttl=telegram_cache_ttl,
) )
# Create storage for persisting album state across restarts # Create storage for persisting album state across restarts
@@ -107,6 +112,12 @@ async def _async_setup_subentry_coordinator(
_LOGGER.debug("Setting up coordinator for album: %s (%s)", album_name, album_id) _LOGGER.debug("Setting up coordinator for album: %s (%s)", album_name, album_id)
# Create and load Telegram file cache for this album
# TTL is in hours from config, convert to seconds
cache_ttl_seconds = hub_data.telegram_cache_ttl * 60 * 60
telegram_cache = TelegramFileCache(hass, album_id, ttl_seconds=cache_ttl_seconds)
await telegram_cache.async_load()
# Create coordinator for this album # Create coordinator for this album
coordinator = ImmichAlbumWatcherCoordinator( coordinator = ImmichAlbumWatcherCoordinator(
hass, hass,
@@ -117,6 +128,7 @@ async def _async_setup_subentry_coordinator(
scan_interval=hub_data.scan_interval, scan_interval=hub_data.scan_interval,
hub_name=hub_data.name, hub_name=hub_data.name,
storage=storage, storage=storage,
telegram_cache=telegram_cache,
) )
# Load persisted state before first refresh to detect changes during downtime # Load persisted state before first refresh to detect changes during downtime

View File

@@ -27,7 +27,9 @@ from .const import (
CONF_IMMICH_URL, CONF_IMMICH_URL,
CONF_SCAN_INTERVAL, CONF_SCAN_INTERVAL,
CONF_TELEGRAM_BOT_TOKEN, CONF_TELEGRAM_BOT_TOKEN,
CONF_TELEGRAM_CACHE_TTL,
DEFAULT_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL,
DEFAULT_TELEGRAM_CACHE_TTL,
DOMAIN, DOMAIN,
SUBENTRY_TYPE_ALBUM, SUBENTRY_TYPE_ALBUM,
) )
@@ -252,6 +254,9 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
CONF_TELEGRAM_BOT_TOKEN: user_input.get( CONF_TELEGRAM_BOT_TOKEN: user_input.get(
CONF_TELEGRAM_BOT_TOKEN, "" CONF_TELEGRAM_BOT_TOKEN, ""
), ),
CONF_TELEGRAM_CACHE_TTL: user_input.get(
CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL
),
}, },
) )
@@ -261,6 +266,9 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
current_bot_token = self._config_entry.options.get( current_bot_token = self._config_entry.options.get(
CONF_TELEGRAM_BOT_TOKEN, "" CONF_TELEGRAM_BOT_TOKEN, ""
) )
current_cache_ttl = self._config_entry.options.get(
CONF_TELEGRAM_CACHE_TTL, DEFAULT_TELEGRAM_CACHE_TTL
)
return self.async_show_form( return self.async_show_form(
step_id="init", step_id="init",
@@ -272,6 +280,9 @@ class ImmichAlbumWatcherOptionsFlow(OptionsFlow):
vol.Optional( vol.Optional(
CONF_TELEGRAM_BOT_TOKEN, default=current_bot_token CONF_TELEGRAM_BOT_TOKEN, default=current_bot_token
): str, ): str,
vol.Optional(
CONF_TELEGRAM_CACHE_TTL, default=current_cache_ttl
): vol.All(vol.Coerce(int), vol.Range(min=1, max=168)),
} }
), ),
) )

View File

@@ -14,12 +14,14 @@ CONF_ALBUM_ID: Final = "album_id"
CONF_ALBUM_NAME: Final = "album_name" CONF_ALBUM_NAME: Final = "album_name"
CONF_SCAN_INTERVAL: Final = "scan_interval" CONF_SCAN_INTERVAL: Final = "scan_interval"
CONF_TELEGRAM_BOT_TOKEN: Final = "telegram_bot_token" CONF_TELEGRAM_BOT_TOKEN: Final = "telegram_bot_token"
CONF_TELEGRAM_CACHE_TTL: Final = "telegram_cache_ttl"
# Subentry type # Subentry type
SUBENTRY_TYPE_ALBUM: Final = "album" SUBENTRY_TYPE_ALBUM: Final = "album"
# Defaults # Defaults
DEFAULT_SCAN_INTERVAL: Final = 60 # seconds DEFAULT_SCAN_INTERVAL: Final = 60 # seconds
DEFAULT_TELEGRAM_CACHE_TTL: Final = 48 # hours
NEW_ASSETS_RESET_DELAY: Final = 300 # 5 minutes NEW_ASSETS_RESET_DELAY: Final = 300 # 5 minutes
DEFAULT_SHARE_PASSWORD: Final = "immich123" DEFAULT_SHARE_PASSWORD: Final = "immich123"
@@ -70,6 +72,9 @@ ATTR_ASSET_IS_FAVORITE: Final = "is_favorite"
ATTR_ASSET_RATING: Final = "rating" ATTR_ASSET_RATING: Final = "rating"
ATTR_ASSET_LATITUDE: Final = "latitude" ATTR_ASSET_LATITUDE: Final = "latitude"
ATTR_ASSET_LONGITUDE: Final = "longitude" ATTR_ASSET_LONGITUDE: Final = "longitude"
ATTR_ASSET_CITY: Final = "city"
ATTR_ASSET_STATE: Final = "state"
ATTR_ASSET_COUNTRY: Final = "country"
# Asset types # Asset types
ASSET_TYPE_IMAGE: Final = "IMAGE" ASSET_TYPE_IMAGE: Final = "IMAGE"

View File

@@ -8,7 +8,7 @@ from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
if TYPE_CHECKING: if TYPE_CHECKING:
from .storage import ImmichAlbumStorage from .storage import ImmichAlbumStorage, TelegramFileCache
import aiohttp import aiohttp
@@ -31,6 +31,9 @@ from .const import (
ATTR_ASSET_IS_FAVORITE, ATTR_ASSET_IS_FAVORITE,
ATTR_ASSET_LATITUDE, ATTR_ASSET_LATITUDE,
ATTR_ASSET_LONGITUDE, ATTR_ASSET_LONGITUDE,
ATTR_ASSET_CITY,
ATTR_ASSET_STATE,
ATTR_ASSET_COUNTRY,
ATTR_ASSET_OWNER, ATTR_ASSET_OWNER,
ATTR_ASSET_OWNER_ID, ATTR_ASSET_OWNER_ID,
ATTR_ASSET_PLAYBACK_URL, ATTR_ASSET_PLAYBACK_URL,
@@ -124,6 +127,9 @@ class AssetInfo:
rating: int | None = None rating: int | None = None
latitude: float | None = None latitude: float | None = None
longitude: float | None = None longitude: float | None = None
city: str | None = None
state: str | None = None
country: str | None = None
is_processed: bool = True # Whether asset is fully processed by Immich is_processed: bool = True # Whether asset is fully processed by Immich
@classmethod @classmethod
@@ -155,6 +161,11 @@ class AssetInfo:
latitude = exif_info.get("latitude") if exif_info else None latitude = exif_info.get("latitude") if exif_info else None
longitude = exif_info.get("longitude") if exif_info else None longitude = exif_info.get("longitude") if exif_info else None
# Get reverse geocoded location
city = exif_info.get("city") if exif_info else None
state = exif_info.get("state") if exif_info else None
country = exif_info.get("country") if exif_info else None
# Check if asset is fully processed by Immich # Check if asset is fully processed by Immich
asset_type = data.get("type", ASSET_TYPE_IMAGE) asset_type = data.get("type", ASSET_TYPE_IMAGE)
is_processed = cls._check_processing_status(data, asset_type) is_processed = cls._check_processing_status(data, asset_type)
@@ -172,6 +183,9 @@ class AssetInfo:
rating=rating, rating=rating,
latitude=latitude, latitude=latitude,
longitude=longitude, longitude=longitude,
city=city,
state=state,
country=country,
is_processed=is_processed, is_processed=is_processed,
) )
@@ -304,6 +318,7 @@ class ImmichAlbumWatcherCoordinator(DataUpdateCoordinator[AlbumData | None]):
scan_interval: int, scan_interval: int,
hub_name: str = "Immich", hub_name: str = "Immich",
storage: ImmichAlbumStorage | None = None, storage: ImmichAlbumStorage | None = None,
telegram_cache: TelegramFileCache | None = None,
) -> None: ) -> None:
"""Initialize the coordinator.""" """Initialize the coordinator."""
super().__init__( super().__init__(
@@ -323,6 +338,7 @@ class ImmichAlbumWatcherCoordinator(DataUpdateCoordinator[AlbumData | None]):
self._users_cache: dict[str, str] = {} # user_id -> name self._users_cache: dict[str, str] = {} # user_id -> name
self._shared_links: list[SharedLinkInfo] = [] self._shared_links: list[SharedLinkInfo] = []
self._storage = storage self._storage = storage
self._telegram_cache = telegram_cache
self._persisted_asset_ids: set[str] | None = None self._persisted_asset_ids: set[str] | None = None
@property @property
@@ -345,6 +361,11 @@ class ImmichAlbumWatcherCoordinator(DataUpdateCoordinator[AlbumData | None]):
"""Return the album name.""" """Return the album name."""
return self._album_name return self._album_name
@property
def telegram_cache(self) -> TelegramFileCache | None:
"""Return the Telegram file cache."""
return self._telegram_cache
def update_scan_interval(self, scan_interval: int) -> None: def update_scan_interval(self, scan_interval: int) -> None:
"""Update the scan interval.""" """Update the scan interval."""
self.update_interval = timedelta(seconds=scan_interval) self.update_interval = timedelta(seconds=scan_interval)
@@ -660,6 +681,9 @@ class ImmichAlbumWatcherCoordinator(DataUpdateCoordinator[AlbumData | None]):
ATTR_ASSET_RATING: asset.rating, ATTR_ASSET_RATING: asset.rating,
ATTR_ASSET_LATITUDE: asset.latitude, ATTR_ASSET_LATITUDE: asset.latitude,
ATTR_ASSET_LONGITUDE: asset.longitude, ATTR_ASSET_LONGITUDE: asset.longitude,
ATTR_ASSET_CITY: asset.city,
ATTR_ASSET_STATE: asset.state,
ATTR_ASSET_COUNTRY: asset.country,
} }
# Add thumbnail URL if requested # Add thumbnail URL if requested

View File

@@ -8,5 +8,5 @@
"iot_class": "cloud_polling", "iot_class": "cloud_polling",
"issue_tracker": "https://github.com/DolgolyovAlexei/haos-hacs-immich-album-watcher/issues", "issue_tracker": "https://github.com/DolgolyovAlexei/haos-hacs-immich-album-watcher/issues",
"requirements": [], "requirements": [],
"version": "2.3.0" "version": "2.6.0"
} }

View File

@@ -499,6 +499,41 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
if not url: if not url:
return {"success": False, "error": "Missing 'url' for photo"} return {"success": False, "error": "Missing 'url' for photo"}
# Check cache for file_id
cache = self.coordinator.telegram_cache
cached = cache.get(url) if cache else None
if cached and cached.get("file_id"):
# Use cached file_id - no download needed
file_id = cached["file_id"]
_LOGGER.debug("Using cached Telegram file_id for photo")
payload = {
"chat_id": chat_id,
"photo": file_id,
"parse_mode": parse_mode,
}
if caption:
payload["caption"] = caption
if reply_to_message_id:
payload["reply_to_message_id"] = reply_to_message_id
telegram_url = f"https://api.telegram.org/bot{token}/sendPhoto"
try:
async with session.post(telegram_url, json=payload) as response:
result = await response.json()
if response.status == 200 and result.get("ok"):
return {
"success": True,
"message_id": result.get("result", {}).get("message_id"),
"cached": True,
}
else:
# Cache might be stale, fall through to upload
_LOGGER.debug("Cached file_id failed, will re-upload: %s", result.get("description"))
except aiohttp.ClientError as err:
_LOGGER.debug("Cached file_id request failed: %s", err)
try: try:
# Download the photo # Download the photo
_LOGGER.debug("Downloading photo from %s", url[:80]) _LOGGER.debug("Downloading photo from %s", url[:80])
@@ -531,7 +566,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
_LOGGER.info("Photo %s, sending as document", reason) _LOGGER.info("Photo %s, sending as document", reason)
return await self._send_telegram_document( return await self._send_telegram_document(
session, token, chat_id, data, "photo.jpg", session, token, chat_id, data, "photo.jpg",
caption, reply_to_message_id, parse_mode caption, reply_to_message_id, parse_mode, url
) )
else: else:
# Skip oversized photo # Skip oversized photo
@@ -562,6 +597,14 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
result = await response.json() result = await response.json()
_LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok")) _LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok"))
if response.status == 200 and result.get("ok"): if response.status == 200 and result.get("ok"):
# Extract and cache file_id
photos = result.get("result", {}).get("photo", [])
if photos and cache:
# Use the largest photo's file_id
file_id = photos[-1].get("file_id")
if file_id:
await cache.async_set(url, file_id, "photo")
return { return {
"success": True, "success": True,
"message_id": result.get("result", {}).get("message_id"), "message_id": result.get("result", {}).get("message_id"),
@@ -601,6 +644,41 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
if not url: if not url:
return {"success": False, "error": "Missing 'url' for video"} return {"success": False, "error": "Missing 'url' for video"}
# Check cache for file_id
cache = self.coordinator.telegram_cache
cached = cache.get(url) if cache else None
if cached and cached.get("file_id"):
# Use cached file_id - no download needed
file_id = cached["file_id"]
_LOGGER.debug("Using cached Telegram file_id for video")
payload = {
"chat_id": chat_id,
"video": file_id,
"parse_mode": parse_mode,
}
if caption:
payload["caption"] = caption
if reply_to_message_id:
payload["reply_to_message_id"] = reply_to_message_id
telegram_url = f"https://api.telegram.org/bot{token}/sendVideo"
try:
async with session.post(telegram_url, json=payload) as response:
result = await response.json()
if response.status == 200 and result.get("ok"):
return {
"success": True,
"message_id": result.get("result", {}).get("message_id"),
"cached": True,
}
else:
# Cache might be stale, fall through to upload
_LOGGER.debug("Cached file_id failed, will re-upload: %s", result.get("description"))
except aiohttp.ClientError as err:
_LOGGER.debug("Cached file_id request failed: %s", err)
try: try:
# Download the video # Download the video
_LOGGER.debug("Downloading video from %s", url[:80]) _LOGGER.debug("Downloading video from %s", url[:80])
@@ -645,6 +723,13 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
result = await response.json() result = await response.json()
_LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok")) _LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok"))
if response.status == 200 and result.get("ok"): if response.status == 200 and result.get("ok"):
# Extract and cache file_id
video = result.get("result", {}).get("video", {})
if video and cache:
file_id = video.get("file_id")
if file_id:
await cache.async_set(url, file_id, "video")
return { return {
"success": True, "success": True,
"message_id": result.get("result", {}).get("message_id"), "message_id": result.get("result", {}).get("message_id"),
@@ -676,11 +761,46 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
caption: str | None = None, caption: str | None = None,
reply_to_message_id: int | None = None, reply_to_message_id: int | None = None,
parse_mode: str = "HTML", parse_mode: str = "HTML",
source_url: str | None = None,
) -> ServiceResponse: ) -> ServiceResponse:
"""Send a photo as a document to Telegram (for oversized photos).""" """Send a photo as a document to Telegram (for oversized photos)."""
import aiohttp import aiohttp
from aiohttp import FormData from aiohttp import FormData
# Check cache for file_id if source_url is provided
cache = self.coordinator.telegram_cache
if source_url:
cached = cache.get(source_url) if cache else None
if cached and cached.get("file_id") and cached.get("type") == "document":
# Use cached file_id
file_id = cached["file_id"]
_LOGGER.debug("Using cached Telegram file_id for document")
payload = {
"chat_id": chat_id,
"document": file_id,
"parse_mode": parse_mode,
}
if caption:
payload["caption"] = caption
if reply_to_message_id:
payload["reply_to_message_id"] = reply_to_message_id
telegram_url = f"https://api.telegram.org/bot{token}/sendDocument"
try:
async with session.post(telegram_url, json=payload) as response:
result = await response.json()
if response.status == 200 and result.get("ok"):
return {
"success": True,
"message_id": result.get("result", {}).get("message_id"),
"cached": True,
}
else:
_LOGGER.debug("Cached file_id failed, will re-upload: %s", result.get("description"))
except aiohttp.ClientError as err:
_LOGGER.debug("Cached file_id request failed: %s", err)
try: try:
# Build multipart form # Build multipart form
form = FormData() form = FormData()
@@ -702,6 +822,13 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
result = await response.json() result = await response.json()
_LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok")) _LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok"))
if response.status == 200 and result.get("ok"): if response.status == 200 and result.get("ok"):
# Extract and cache file_id
if source_url and cache:
document = result.get("result", {}).get("document", {})
file_id = document.get("file_id")
if file_id:
await cache.async_set(source_url, file_id, "document")
return { return {
"success": True, "success": True,
"message_id": result.get("result", {}).get("message_id"), "message_id": result.get("result", {}).get("message_id"),
@@ -794,9 +921,14 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
# Multi-item chunk: use sendMediaGroup # Multi-item chunk: use sendMediaGroup
_LOGGER.debug("Sending chunk %d/%d as media group (%d items)", chunk_idx + 1, len(chunks), len(chunk)) _LOGGER.debug("Sending chunk %d/%d as media group (%d items)", chunk_idx + 1, len(chunks), len(chunk))
# Download all media files for this chunk # Get cache reference
media_files: list[tuple[str, bytes, str]] = [] # (type, data, filename) cache = self.coordinator.telegram_cache
oversized_photos: list[tuple[bytes, str | None]] = [] # For send_large_photos_as_documents=true
# Collect media items - either from cache (file_id) or by downloading
# Each item: (type, media_ref, filename, url, is_cached)
# media_ref is either file_id (str) or data (bytes)
media_items: list[tuple[str, str | bytes, str, str, bool]] = []
oversized_photos: list[tuple[bytes, str | None, str]] = [] # (data, caption, url)
skipped_count = 0 skipped_count = 0
for i, item in enumerate(chunk): for i, item in enumerate(chunk):
@@ -815,6 +947,16 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
"error": f"Invalid type '{media_type}' in item {chunk_idx * max_group_size + i}. Must be 'photo' or 'video'.", "error": f"Invalid type '{media_type}' in item {chunk_idx * max_group_size + i}. Must be 'photo' or 'video'.",
} }
# Check cache first
cached = cache.get(url) if cache else None
if cached and cached.get("file_id"):
# Use cached file_id
ext = "jpg" if media_type == "photo" else "mp4"
filename = f"media_{chunk_idx * max_group_size + i}.{ext}"
media_items.append((media_type, cached["file_id"], filename, url, True))
_LOGGER.debug("Using cached file_id for media %d", chunk_idx * max_group_size + i)
continue
try: try:
_LOGGER.debug("Downloading media %d from %s", chunk_idx * max_group_size + i, url[:80]) _LOGGER.debug("Downloading media %d from %s", chunk_idx * max_group_size + i, url[:80])
async with session.get(url) as resp: async with session.get(url) as resp:
@@ -842,8 +984,8 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
if send_large_photos_as_documents: if send_large_photos_as_documents:
# Separate this photo to send as document later # Separate this photo to send as document later
# Caption only on first item of first chunk # Caption only on first item of first chunk
photo_caption = caption if chunk_idx == 0 and i == 0 and len(media_files) == 0 else None photo_caption = caption if chunk_idx == 0 and i == 0 and len(media_items) == 0 else None
oversized_photos.append((data, photo_caption)) oversized_photos.append((data, photo_caption, url))
_LOGGER.info("Photo %d %s, will send as document", i, reason) _LOGGER.info("Photo %d %s, will send as document", i, reason)
continue continue
else: else:
@@ -854,7 +996,7 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
ext = "jpg" if media_type == "photo" else "mp4" ext = "jpg" if media_type == "photo" else "mp4"
filename = f"media_{chunk_idx * max_group_size + i}.{ext}" filename = f"media_{chunk_idx * max_group_size + i}.{ext}"
media_files.append((media_type, data, filename)) media_items.append((media_type, data, filename, url, False))
except aiohttp.ClientError as err: except aiohttp.ClientError as err:
return { return {
"success": False, "success": False,
@@ -862,95 +1004,165 @@ class ImmichAlbumBaseSensor(CoordinatorEntity[ImmichAlbumWatcherCoordinator], Se
} }
# Skip this chunk if all files were filtered out # Skip this chunk if all files were filtered out
if not media_files and not oversized_photos: if not media_items and not oversized_photos:
_LOGGER.info("Chunk %d/%d: all %d media items skipped", _LOGGER.info("Chunk %d/%d: all %d media items skipped",
chunk_idx + 1, len(chunks), len(chunk)) chunk_idx + 1, len(chunks), len(chunk))
continue continue
# Send media group if we have normal-sized files # Send media group if we have normal-sized files
if media_files: if media_items:
# Build multipart form # Check if all items are cached (can use simple JSON payload)
form = FormData() all_cached = all(is_cached for _, _, _, _, is_cached in media_items)
form.add_field("chat_id", chat_id)
# Only use reply_to_message_id for the first chunk if all_cached:
if chunk_idx == 0 and reply_to_message_id: # All items cached - use simple JSON payload with file_ids
form.add_field("reply_to_message_id", str(reply_to_message_id)) _LOGGER.debug("All %d items cached, using file_ids", len(media_items))
media_json = []
for i, (media_type, file_id, _, _, _) in enumerate(media_items):
media_item_json: dict[str, Any] = {
"type": media_type,
"media": file_id,
}
if chunk_idx == 0 and i == 0 and caption and not oversized_photos:
media_item_json["caption"] = caption
media_item_json["parse_mode"] = parse_mode
media_json.append(media_item_json)
# Build media JSON with attach:// references payload = {
media_json = [] "chat_id": chat_id,
for i, (media_type, data, filename) in enumerate(media_files): "media": media_json,
attach_name = f"file{i}"
media_item: dict[str, Any] = {
"type": media_type,
"media": f"attach://{attach_name}",
} }
# Only add caption to the first item of the first chunk (if no oversized photos with caption) if chunk_idx == 0 and reply_to_message_id:
if chunk_idx == 0 and i == 0 and caption and not oversized_photos: payload["reply_to_message_id"] = reply_to_message_id
media_item["caption"] = caption
media_item["parse_mode"] = parse_mode
media_json.append(media_item)
content_type = "image/jpeg" if media_type == "photo" else "video/mp4" telegram_url = f"https://api.telegram.org/bot{token}/sendMediaGroup"
form.add_field(attach_name, data, filename=filename, content_type=content_type) try:
async with session.post(telegram_url, json=payload) as response:
result = await response.json()
if response.status == 200 and result.get("ok"):
chunk_message_ids = [
msg.get("message_id") for msg in result.get("result", [])
]
all_message_ids.extend(chunk_message_ids)
else:
# Cache might be stale - fall through to upload path
_LOGGER.debug("Cached file_ids failed, will re-upload: %s", result.get("description"))
all_cached = False # Force re-upload
except aiohttp.ClientError as err:
_LOGGER.debug("Cached file_ids request failed: %s", err)
all_cached = False
form.add_field("media", json.dumps(media_json)) if not all_cached:
# Build multipart form with mix of cached file_ids and uploaded data
form = FormData()
form.add_field("chat_id", chat_id)
# Send to Telegram # Only use reply_to_message_id for the first chunk
telegram_url = f"https://api.telegram.org/bot{token}/sendMediaGroup" if chunk_idx == 0 and reply_to_message_id:
form.add_field("reply_to_message_id", str(reply_to_message_id))
try: # Build media JSON - use file_id for cached, attach:// for uploaded
_LOGGER.debug("Uploading media group chunk %d/%d (%d files) to Telegram", media_json = []
chunk_idx + 1, len(chunks), len(media_files)) upload_idx = 0
async with session.post(telegram_url, data=form) as response: urls_to_cache: list[tuple[str, int, str]] = [] # (url, result_idx, type)
result = await response.json()
_LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok")) for i, (media_type, media_ref, filename, url, is_cached) in enumerate(media_items):
if response.status == 200 and result.get("ok"): if is_cached:
chunk_message_ids = [ # Use file_id directly
msg.get("message_id") for msg in result.get("result", []) media_item_json: dict[str, Any] = {
] "type": media_type,
all_message_ids.extend(chunk_message_ids) "media": media_ref, # file_id
else:
# Log detailed error for media group with total size info
total_size = sum(len(d) for _, d, _ in media_files)
_LOGGER.error(
"Telegram API error for chunk %d/%d: %s | Media count: %d | Total size: %d bytes (%.2f MB)",
chunk_idx + 1, len(chunks),
result.get("description", "Unknown Telegram error"),
len(media_files),
total_size,
total_size / (1024 * 1024)
)
# Log detailed diagnostics for the first photo in the group
for media_type, data, _ in media_files:
if media_type == "photo":
self._log_telegram_error(
error_code=result.get("error_code"),
description=result.get("description", "Unknown Telegram error"),
data=data,
media_type="photo",
)
break # Only log details for first photo
return {
"success": False,
"error": result.get("description", "Unknown Telegram error"),
"error_code": result.get("error_code"),
"failed_at_chunk": chunk_idx + 1,
} }
except aiohttp.ClientError as err: else:
_LOGGER.error("Telegram upload failed for chunk %d: %s", chunk_idx + 1, err) # Upload this file
return { attach_name = f"file{upload_idx}"
"success": False, media_item_json = {
"error": str(err), "type": media_type,
"failed_at_chunk": chunk_idx + 1, "media": f"attach://{attach_name}",
} }
content_type = "image/jpeg" if media_type == "photo" else "video/mp4"
form.add_field(attach_name, media_ref, filename=filename, content_type=content_type)
urls_to_cache.append((url, i, media_type))
upload_idx += 1
if chunk_idx == 0 and i == 0 and caption and not oversized_photos:
media_item_json["caption"] = caption
media_item_json["parse_mode"] = parse_mode
media_json.append(media_item_json)
form.add_field("media", json.dumps(media_json))
# Send to Telegram
telegram_url = f"https://api.telegram.org/bot{token}/sendMediaGroup"
try:
_LOGGER.debug("Uploading media group chunk %d/%d (%d files, %d cached) to Telegram",
chunk_idx + 1, len(chunks), len(media_items), len(media_items) - upload_idx)
async with session.post(telegram_url, data=form) as response:
result = await response.json()
_LOGGER.debug("Telegram API response: status=%d, ok=%s", response.status, result.get("ok"))
if response.status == 200 and result.get("ok"):
chunk_message_ids = [
msg.get("message_id") for msg in result.get("result", [])
]
all_message_ids.extend(chunk_message_ids)
# Cache the newly uploaded file_ids
if cache and urls_to_cache:
result_messages = result.get("result", [])
for url, result_idx, m_type in urls_to_cache:
if result_idx < len(result_messages):
msg = result_messages[result_idx]
if m_type == "photo":
photos = msg.get("photo", [])
if photos:
await cache.async_set(url, photos[-1].get("file_id"), "photo")
elif m_type == "video":
video = msg.get("video", {})
if video.get("file_id"):
await cache.async_set(url, video["file_id"], "video")
else:
# Log detailed error for media group with total size info
uploaded_data = [m for m in media_items if not m[4]]
total_size = sum(len(d) for _, d, _, _, _ in uploaded_data if isinstance(d, bytes))
_LOGGER.error(
"Telegram API error for chunk %d/%d: %s | Media count: %d | Uploaded size: %d bytes (%.2f MB)",
chunk_idx + 1, len(chunks),
result.get("description", "Unknown Telegram error"),
len(media_items),
total_size,
total_size / (1024 * 1024) if total_size else 0
)
# Log detailed diagnostics for the first photo in the group
for media_type, media_ref, _, _, is_cached in media_items:
if media_type == "photo" and not is_cached and isinstance(media_ref, bytes):
self._log_telegram_error(
error_code=result.get("error_code"),
description=result.get("description", "Unknown Telegram error"),
data=media_ref,
media_type="photo",
)
break # Only log details for first photo
return {
"success": False,
"error": result.get("description", "Unknown Telegram error"),
"error_code": result.get("error_code"),
"failed_at_chunk": chunk_idx + 1,
}
except aiohttp.ClientError as err:
_LOGGER.error("Telegram upload failed for chunk %d: %s", chunk_idx + 1, err)
return {
"success": False,
"error": str(err),
"failed_at_chunk": chunk_idx + 1,
}
# Send oversized photos as documents # Send oversized photos as documents
for i, (data, photo_caption) in enumerate(oversized_photos): for i, (data, photo_caption, photo_url) in enumerate(oversized_photos):
_LOGGER.debug("Sending oversized photo %d/%d as document", i + 1, len(oversized_photos)) _LOGGER.debug("Sending oversized photo %d/%d as document", i + 1, len(oversized_photos))
result = await self._send_telegram_document( result = await self._send_telegram_document(
session, token, chat_id, data, f"photo_{i}.jpg", session, token, chat_id, data, f"photo_{i}.jpg",
photo_caption, None, parse_mode photo_caption, None, parse_mode, photo_url
) )
if result.get("success"): if result.get("success"):
all_message_ids.append(result.get("message_id")) all_message_ids.append(result.get("message_id"))

View File

@@ -14,6 +14,9 @@ _LOGGER = logging.getLogger(__name__)
STORAGE_VERSION = 1 STORAGE_VERSION = 1
STORAGE_KEY_PREFIX = "immich_album_watcher" STORAGE_KEY_PREFIX = "immich_album_watcher"
# Default TTL for Telegram file_id cache (48 hours in seconds)
DEFAULT_TELEGRAM_CACHE_TTL = 48 * 60 * 60
class ImmichAlbumStorage: class ImmichAlbumStorage:
"""Handles persistence of album state across restarts.""" """Handles persistence of album state across restarts."""
@@ -63,3 +66,116 @@ class ImmichAlbumStorage:
"""Remove all storage data.""" """Remove all storage data."""
await self._store.async_remove() await self._store.async_remove()
self._data = None self._data = None
class TelegramFileCache:
"""Cache for Telegram file_ids to avoid re-uploading media.
When a file is uploaded to Telegram, it returns a file_id that can be reused
to send the same file without re-uploading. This cache stores these file_ids
keyed by the source URL.
"""
def __init__(
self,
hass: HomeAssistant,
album_id: str,
ttl_seconds: int = DEFAULT_TELEGRAM_CACHE_TTL,
) -> None:
"""Initialize the Telegram file cache.
Args:
hass: Home Assistant instance
album_id: Album ID for scoping the cache
ttl_seconds: Time-to-live for cache entries in seconds (default: 48 hours)
"""
self._store: Store[dict[str, Any]] = Store(
hass, STORAGE_VERSION, f"{STORAGE_KEY_PREFIX}.telegram_cache.{album_id}"
)
self._data: dict[str, Any] | None = None
self._ttl_seconds = ttl_seconds
async def async_load(self) -> None:
"""Load cache data from storage."""
self._data = await self._store.async_load() or {"files": {}}
# Clean up expired entries on load
await self._cleanup_expired()
_LOGGER.debug(
"Loaded Telegram file cache with %d entries",
len(self._data.get("files", {})),
)
async def _cleanup_expired(self) -> None:
"""Remove expired cache entries."""
if not self._data or "files" not in self._data:
return
now = datetime.now(timezone.utc)
expired_keys = []
for url, entry in self._data["files"].items():
cached_at_str = entry.get("cached_at")
if cached_at_str:
cached_at = datetime.fromisoformat(cached_at_str)
age_seconds = (now - cached_at).total_seconds()
if age_seconds > self._ttl_seconds:
expired_keys.append(url)
if expired_keys:
for key in expired_keys:
del self._data["files"][key]
await self._store.async_save(self._data)
_LOGGER.debug("Cleaned up %d expired Telegram cache entries", len(expired_keys))
def get(self, url: str) -> dict[str, Any] | None:
"""Get cached file_id for a URL.
Args:
url: The source URL of the media
Returns:
Dict with 'file_id' and 'type' if cached and not expired, None otherwise
"""
if not self._data or "files" not in self._data:
return None
entry = self._data["files"].get(url)
if not entry:
return None
# Check if expired
cached_at_str = entry.get("cached_at")
if cached_at_str:
cached_at = datetime.fromisoformat(cached_at_str)
age_seconds = (datetime.now(timezone.utc) - cached_at).total_seconds()
if age_seconds > self._ttl_seconds:
return None
return {
"file_id": entry.get("file_id"),
"type": entry.get("type"),
}
async def async_set(self, url: str, file_id: str, media_type: str) -> None:
"""Store a file_id for a URL.
Args:
url: The source URL of the media
file_id: The Telegram file_id
media_type: The type of media ('photo', 'video', 'document')
"""
if self._data is None:
self._data = {"files": {}}
self._data["files"][url] = {
"file_id": file_id,
"type": media_type,
"cached_at": datetime.now(timezone.utc).isoformat(),
}
await self._store.async_save(self._data)
_LOGGER.debug("Cached Telegram file_id for URL (type: %s)", media_type)
async def async_remove(self) -> None:
"""Remove all cache data."""
await self._store.async_remove()
self._data = None

View File

@@ -116,14 +116,16 @@
"step": { "step": {
"init": { "init": {
"title": "Immich Album Watcher Options", "title": "Immich Album Watcher Options",
"description": "Configure the polling interval for all albums.", "description": "Configure the polling interval and Telegram settings for all albums.",
"data": { "data": {
"scan_interval": "Scan interval (seconds)", "scan_interval": "Scan interval (seconds)",
"telegram_bot_token": "Telegram Bot Token" "telegram_bot_token": "Telegram Bot Token",
"telegram_cache_ttl": "Telegram Cache TTL (hours)"
}, },
"data_description": { "data_description": {
"scan_interval": "How often to check for album changes (10-3600 seconds)", "scan_interval": "How often to check for album changes (10-3600 seconds)",
"telegram_bot_token": "Bot token for sending notifications to Telegram" "telegram_bot_token": "Bot token for sending notifications to Telegram",
"telegram_cache_ttl": "How long to cache uploaded file IDs to avoid re-uploading (1-168 hours, default: 48)"
} }
} }
} }

View File

@@ -116,14 +116,16 @@
"step": { "step": {
"init": { "init": {
"title": "Настройки Immich Album Watcher", "title": "Настройки Immich Album Watcher",
"description": "Настройте интервал опроса для всех альбомов.", "description": "Настройте интервал опроса и параметры Telegram для всех альбомов.",
"data": { "data": {
"scan_interval": "Интервал сканирования (секунды)", "scan_interval": "Интервал сканирования (секунды)",
"telegram_bot_token": "Токен Telegram бота" "telegram_bot_token": "Токен Telegram бота",
"telegram_cache_ttl": "Время жизни кэша Telegram (часы)"
}, },
"data_description": { "data_description": {
"scan_interval": "Как часто проверять изменения в альбомах (10-3600 секунд)", "scan_interval": "Как часто проверять изменения в альбомах (10-3600 секунд)",
"telegram_bot_token": "Токен бота для отправки уведомлений в Telegram" "telegram_bot_token": "Токен бота для отправки уведомлений в Telegram",
"telegram_cache_ttl": "Сколько хранить ID загруженных файлов для повторной отправки без загрузки (1-168 часов, по умолчанию: 48)"
} }
} }
} }