|
|
import argparse |
|
|
import copy |
|
|
import json |
|
|
import logging |
|
|
import sys |
|
|
import tempfile |
|
|
from pathlib import Path |
|
|
from typing import Iterable, List, Tuple |
|
|
import xml.etree.ElementTree as ET |
|
|
|
|
|
|
|
|
LOGGER = logging.getLogger("configure") |
|
|
|
|
|
NS = { |
|
|
"leg": "http://www.legislation.gov.uk/namespaces/legislation", |
|
|
"ukm": "http://www.legislation.gov.uk/namespaces/metadata", |
|
|
"dc": "http://purl.org/dc/elements/1.1/", |
|
|
"dct": "http://purl.org/dc/terms/", |
|
|
} |
|
|
|
|
|
|
|
|
def enrich(record: dict) -> dict: |
|
|
root = ET.fromstring(record["xml_content"]) |
|
|
|
|
|
record["coming_into_force_date"] = root.get("RestrictStartDate") |
|
|
extent = root.get("RestrictExtent") |
|
|
record["extent"] = extent.split("+") if extent else None |
|
|
|
|
|
meta = root.find("ukm:Metadata", NS) |
|
|
if meta is not None: |
|
|
valid = meta.find("dct:valid", NS) |
|
|
record["publication_date"] = valid.text if valid is not None else None |
|
|
|
|
|
status = root.find(".//ukm:PrimaryMetadata/ukm:DocumentClassification/ukm:DocumentStatus", NS) |
|
|
record["status"] = status.get("Value") if status is not None else record.get("status") |
|
|
|
|
|
enactment = root.find(".//ukm:PrimaryMetadata/ukm:EnactmentDate", NS) |
|
|
record["enacted_date"] = enactment.get("Date") if enactment is not None else None |
|
|
|
|
|
return record |
|
|
|
|
|
|
|
|
def _iter_data_files(config_path: Path, splits: Iterable[str] | None = None) -> List[Tuple[str, Path]]: |
|
|
with config_path.open("r", encoding="utf-8") as handle: |
|
|
config = json.load(handle) |
|
|
|
|
|
data_files = config.get("data_files", {}) |
|
|
if isinstance(data_files, list): |
|
|
data_files = {"train": data_files} |
|
|
elif isinstance(data_files, str): |
|
|
data_files = {"train": data_files} |
|
|
|
|
|
selected_splits = set(splits) if splits else set(data_files.keys()) |
|
|
|
|
|
resolved: List[Tuple[str, Path]] = [] |
|
|
for split, value in data_files.items(): |
|
|
if split not in selected_splits: |
|
|
continue |
|
|
|
|
|
if isinstance(value, list): |
|
|
paths = value |
|
|
else: |
|
|
paths = [value] |
|
|
|
|
|
for rel_path in paths: |
|
|
resolved.append((split, (config_path.parent / rel_path).resolve())) |
|
|
|
|
|
if not resolved: |
|
|
raise ValueError("No data files found for the requested splits") |
|
|
|
|
|
return resolved |
|
|
|
|
|
|
|
|
def _configure_logging(verbose: bool) -> None: |
|
|
level = logging.DEBUG if verbose else logging.INFO |
|
|
handler = logging.StreamHandler() |
|
|
handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s")) |
|
|
LOGGER.setLevel(level) |
|
|
LOGGER.handlers.clear() |
|
|
LOGGER.addHandler(handler) |
|
|
LOGGER.propagate = False |
|
|
|
|
|
|
|
|
def _process_file(path: Path, *, dry_run: bool, strict: bool, limit: int | None) -> Tuple[int, int, int]: |
|
|
total = 0 |
|
|
changed = 0 |
|
|
errors = 0 |
|
|
|
|
|
if limit is not None and not dry_run: |
|
|
raise ValueError("--limit can only be used together with --dry-run") |
|
|
|
|
|
if not path.exists(): |
|
|
raise FileNotFoundError(f"Data file not found: {path}") |
|
|
|
|
|
LOGGER.info( |
|
|
"Processing %s (dry_run=%s, strict=%s, limit=%s)", |
|
|
path, |
|
|
dry_run, |
|
|
strict, |
|
|
limit, |
|
|
) |
|
|
|
|
|
tmp_file = None |
|
|
tmp_path = None |
|
|
|
|
|
if not dry_run: |
|
|
tmp_file = tempfile.NamedTemporaryFile("w", delete=False, dir=str(path.parent), encoding="utf-8") |
|
|
tmp_path = Path(tmp_file.name) |
|
|
|
|
|
try: |
|
|
with path.open("r", encoding="utf-8") as reader: |
|
|
for line in reader: |
|
|
line = line.rstrip("\n") |
|
|
if not line: |
|
|
if not dry_run and tmp_file is not None: |
|
|
tmp_file.write("\n") |
|
|
total += 1 |
|
|
if limit is not None and total >= limit: |
|
|
break |
|
|
continue |
|
|
|
|
|
record = json.loads(line) |
|
|
original = copy.deepcopy(record) |
|
|
|
|
|
try: |
|
|
enriched = enrich(record) |
|
|
except ET.ParseError as exc: |
|
|
errors += 1 |
|
|
if strict: |
|
|
raise |
|
|
enriched = original |
|
|
LOGGER.warning("XML parse error in %s: %s", path, exc) |
|
|
|
|
|
if enriched != original: |
|
|
changed += 1 |
|
|
|
|
|
if LOGGER.isEnabledFor(logging.DEBUG): |
|
|
LOGGER.debug( |
|
|
"Processed record #%s (id=%s) – changed=%s", |
|
|
total + 1, |
|
|
record.get("id"), |
|
|
enriched != original, |
|
|
) |
|
|
|
|
|
if not dry_run and tmp_file is not None: |
|
|
tmp_file.write(json.dumps(enriched)) |
|
|
tmp_file.write("\n") |
|
|
|
|
|
total += 1 |
|
|
if limit is not None and total >= limit: |
|
|
break |
|
|
finally: |
|
|
if tmp_file is not None: |
|
|
tmp_file.close() |
|
|
|
|
|
if not dry_run and tmp_path is not None: |
|
|
tmp_path.replace(path) |
|
|
|
|
|
return total, changed, errors |
|
|
|
|
|
|
|
|
def main(argv: List[str] | None = None) -> int: |
|
|
parser = argparse.ArgumentParser(description="Enrich dataset records with metadata extracted from XML content") |
|
|
parser.add_argument( |
|
|
"--dataset-config", |
|
|
default="dataset_config.json", |
|
|
type=Path, |
|
|
help="Path to the dataset_config.json file (default: dataset_config.json)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--split", |
|
|
action="append", |
|
|
help="Specific dataset split(s) to process (default: all splits in the config)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--dry-run", |
|
|
action="store_true", |
|
|
help="Run enrichment without writing changes back to disk", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--limit", |
|
|
type=int, |
|
|
help="Only process the first N records (implies --dry-run)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--strict", |
|
|
action="store_true", |
|
|
help="Fail immediately on XML parse errors instead of skipping the affected records", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--verbose", |
|
|
action="store_true", |
|
|
help="Enable verbose logging (includes per-file progress details)", |
|
|
) |
|
|
|
|
|
args = parser.parse_args(argv) |
|
|
|
|
|
if args.limit is not None: |
|
|
args.dry_run = True |
|
|
|
|
|
_configure_logging(args.verbose) |
|
|
|
|
|
config_path: Path = args.dataset_config.resolve() |
|
|
|
|
|
files = _iter_data_files(config_path, args.split) |
|
|
|
|
|
overall_total = 0 |
|
|
overall_changed = 0 |
|
|
overall_errors = 0 |
|
|
|
|
|
for split, data_path in files: |
|
|
total, changed, errors = _process_file(data_path, dry_run=args.dry_run, strict=args.strict, limit=args.limit) |
|
|
overall_total += total |
|
|
overall_changed += changed |
|
|
overall_errors += errors |
|
|
LOGGER.info( |
|
|
"Processed %s record(s) for split '%s' in %s – %s updated, %s error(s)", |
|
|
total, |
|
|
split, |
|
|
data_path, |
|
|
changed, |
|
|
errors, |
|
|
) |
|
|
|
|
|
if args.dry_run: |
|
|
LOGGER.info("Dry run – no files were modified") |
|
|
|
|
|
if overall_errors: |
|
|
LOGGER.warning("Completed with %s XML error(s)", overall_errors) |
|
|
|
|
|
return 0 if (args.strict and overall_errors == 0) or not args.strict else int(overall_errors > 0) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
raise SystemExit(main()) |
|
|
|