feat(tap-uk-ees): fetch all historical releases, not just latest
All checks were successful
Build and Push Docker Images / Build Backend (FastAPI) (push) Successful in 32s
Build and Push Docker Images / Build Frontend (Next.js) (push) Successful in 1m9s
Build and Push Docker Images / Build Pipeline (Meltano + dbt + Airflow) (push) Successful in 1m42s
Build and Push Docker Images / Trigger Portainer Update (push) Successful in 0s

Add get_all_release_ids() to paginate /publications/{slug}/releases and
iterate over every release in get_records(). Add latest_only config flag
(default false) to restore single-release behaviour for daily runs.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Tudor Sitaru
2026-03-30 21:37:26 +01:00
parent f48faa1803
commit 9a1572ea20

View File

@@ -31,6 +31,24 @@ def get_content_release_id(publication_slug: str) -> str:
return resp.json()["id"]
def get_all_release_ids(publication_slug: str) -> list[str]:
"""Return all release IDs for a publication, newest first."""
url = f"{CONTENT_API_BASE}/publications/{publication_slug}/releases"
ids = []
page = 1
while True:
resp = requests.get(url, params={"page": page, "pageSize": 20}, timeout=TIMEOUT)
resp.raise_for_status()
data = resp.json()
for r in data.get("results", []):
ids.append(r["id"])
paging = data.get("paging", {})
if page >= paging.get("totalPages", 1):
break
page += 1
return ids
def download_release_zip(release_id: str) -> zipfile.ZipFile:
"""Download all data files for a release as a ZIP."""
url = f"{CONTENT_API_BASE}/releases/{release_id}/files"
@@ -58,11 +76,19 @@ class EESDatasetStream(Stream):
def get_records(self, context):
import pandas as pd
release_id = get_content_release_id(self._publication_slug)
latest_only = self.config.get("latest_only", False)
if latest_only:
release_ids = [get_content_release_id(self._publication_slug)]
else:
release_ids = get_all_release_ids(self._publication_slug)
self.logger.info(
"Downloading release %s for %s",
release_id,
self._publication_slug,
"Found %d release(s) for %s", len(release_ids), self._publication_slug
)
for release_id in release_ids:
self.logger.info(
"Downloading release %s for %s", release_id, self._publication_slug
)
zf = download_release_zip(release_id)
@@ -75,12 +101,13 @@ class EESDatasetStream(Stream):
break
if not target:
self.logger.error(
"File matching '%s' not found in ZIP. Available: %s",
self.logger.warning(
"File matching '%s' not found in release %s. Available: %s",
self._target_filename,
release_id,
[n for n in all_files if n.endswith(".csv")],
)
return
continue
self.logger.info("Reading %s from ZIP", target)
with zf.open(target) as f:
@@ -103,7 +130,7 @@ class EESDatasetStream(Stream):
if urn_col in df.columns:
df = df[df[urn_col].notna() & (df[urn_col] != "")]
self.logger.info("Emitting %d school-level rows", len(df))
self.logger.info("Emitting %d school-level rows from release %s", len(df), release_id)
for _, row in df.iterrows():
record = row.to_dict()
@@ -414,6 +441,12 @@ class TapUKEES(Tap):
config_jsonschema = th.PropertiesList(
th.Property("base_url", th.StringType, description="EES API base URL"),
th.Property(
"latest_only",
th.BooleanType,
description="Only fetch the latest release per publication (default: False — fetches all historical releases)",
default=False,
),
).to_dict()
def discover_streams(self):