Avoid full catalog fetch during refresh
This commit is contained in:
@@ -224,6 +224,75 @@ class DispatcharrDataUpdateCoordinator(DataUpdateCoordinator):
|
||||
self.stream_catalog_active_count = active_count
|
||||
self._last_catalog_refresh = datetime.now(timezone.utc)
|
||||
|
||||
def _cache_catalog_stream(self, stream: dict) -> None:
|
||||
"""Store one stream detail from /api/channels/streams/ in local caches."""
|
||||
if not isinstance(stream, dict):
|
||||
return
|
||||
|
||||
stream_id = stream.get("id")
|
||||
if isinstance(stream_id, int):
|
||||
self.stream_catalog_by_id[stream_id] = stream
|
||||
|
||||
name = stream.get("name")
|
||||
if not name:
|
||||
return
|
||||
|
||||
normalized = self._normalize_stream_name(name)
|
||||
if not normalized:
|
||||
return
|
||||
|
||||
existing = self.stream_catalog.get(normalized)
|
||||
if existing is None:
|
||||
self.stream_catalog[normalized] = stream
|
||||
return
|
||||
|
||||
new_viewers = stream.get("current_viewers") or 0
|
||||
existing_viewers = existing.get("current_viewers") or 0
|
||||
if isinstance(new_viewers, int) and new_viewers > existing_viewers:
|
||||
self.stream_catalog[normalized] = stream
|
||||
|
||||
def _refresh_catalog_stats(self) -> None:
|
||||
"""Refresh stream catalog counters from local cache."""
|
||||
active_count = 0
|
||||
for stream in self.stream_catalog_by_id.values():
|
||||
current_viewers = stream.get("current_viewers") or 0
|
||||
if isinstance(current_viewers, int) and current_viewers > 0:
|
||||
active_count += 1
|
||||
|
||||
self.stream_catalog_count = len(self.stream_catalog_by_id)
|
||||
self.stream_catalog_active_count = active_count
|
||||
self._last_catalog_refresh = datetime.now(timezone.utc)
|
||||
|
||||
async def _async_fetch_stream_details_for_active_ids(
|
||||
self, stream_ids: set[int]
|
||||
) -> None:
|
||||
"""Fetch catalog rows for active stream ids only.
|
||||
|
||||
Fetching the full catalog can be very large and may exceed Home Assistant
|
||||
coordinator refresh timeout. This keeps refreshes quick and reliable.
|
||||
"""
|
||||
missing_ids = [
|
||||
sid for sid in stream_ids if sid not in self.stream_catalog_by_id
|
||||
]
|
||||
if not missing_ids:
|
||||
return
|
||||
|
||||
for stream_id in missing_ids:
|
||||
try:
|
||||
payload = await self.api_request(
|
||||
"GET", f"/api/channels/streams/{stream_id}/"
|
||||
)
|
||||
except UpdateFailed as err:
|
||||
_LOGGER.debug(
|
||||
"Could not fetch stream detail for id=%s: %s", stream_id, err
|
||||
)
|
||||
continue
|
||||
|
||||
if isinstance(payload, dict):
|
||||
self._cache_catalog_stream(payload)
|
||||
|
||||
self._refresh_catalog_stats()
|
||||
|
||||
def _get_catalog_stream(self, stream_name: str) -> dict | None:
|
||||
"""Best-effort stream lookup from /api/channels/streams."""
|
||||
if not stream_name:
|
||||
@@ -321,17 +390,18 @@ class DispatcharrDataUpdateCoordinator(DataUpdateCoordinator):
|
||||
async def _async_update_data(self):
|
||||
"""Update data by fetching from authenticated endpoints."""
|
||||
now = datetime.now(timezone.utc)
|
||||
if (
|
||||
self._last_catalog_refresh is None
|
||||
or now - self._last_catalog_refresh > timedelta(minutes=10)
|
||||
):
|
||||
await self._async_fetch_stream_catalog()
|
||||
|
||||
status_data = await self._api_request("GET", f"{self.base_url}/proxy/ts/status")
|
||||
active_streams = status_data.get("channels", [])
|
||||
if not active_streams:
|
||||
return {}
|
||||
|
||||
active_stream_ids = {
|
||||
stream["stream_id"]
|
||||
for stream in active_streams
|
||||
if isinstance(stream.get("stream_id"), int)
|
||||
}
|
||||
await self._async_fetch_stream_details_for_active_ids(active_stream_ids)
|
||||
|
||||
xml_string = await self._api_request(
|
||||
"GET", f"{self.base_url}/output/epg", is_json=False
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user