# /// script # requires-python = ">=3.12" # dependencies = [ # "beautifulsoup4==4.13.3", # "datasets>=3.0.0", # ] # /// """ Danske Taler API Downloader This script downloads speeches/articles from the Danske Taler API: https://www.dansketaler.dk/api/v1 It saves it into the following structure: ``` { "text": "Lav et referat af nedenstående tekst:\n\nTekst:\nOpdatering: Manden er nu fundet af Nordjyllands Politi[...]", "source": "nordjyllandnews", "id": "nordjyllandnews_0", "added": "2024-12-16", "created": "2000-01-01, 2024-01-01", "license": "Creative Commons Legal Code\n\nCC0 1.0 Universal", "domain": "News", "metadata": { "source-pretty": "Nordjylland News" } } ``` """ import logging import time from datetime import date from pathlib import Path from typing import Any import datasets import pandas as pd import requests from bs4 import BeautifulSoup from tqdm import tqdm logger = logging.getLogger(__name__) # Configuration API_BASE_URL = "https://www.dansketaler.dk/api/v1" def get_all_speeches() -> list[dict[str, Any]]: # fetch first page, notably the total number of pages url = f"{API_BASE_URL}/speeches" response = requests.get(url) response.raise_for_status() speeches = response.json() meta = speeches["meta"] total_pages = meta["total_pages"] # fetch all pages all_speeches = [] for page in range(1, total_pages + 1): url = f"{API_BASE_URL}/speeches?page={page}" response = requests.get(url) response.raise_for_status() speeches = response.json() all_speeches.extend(speeches["speeches"]) return all_speeches def fetch_license_div( url: str, max_retries: int = 3, backoff_factor: float = 0.5 ) -> str | None: """ Fetches the license div from the page with retry logic. Args: url: The URL to fetch the license div from max_retries: Maximum number of retry attempts backoff_factor: Factor to determine exponential backoff time between retries Returns: The text content of the license div if found, None otherwise """ retries = 0 while retries <= max_retries: try: response = requests.get(url, timeout=10) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") license_div = soup.find("div", class_="speech-copyright") return license_div.text if license_div else None except (requests.RequestException, AttributeError) as e: retries += 1 if retries > max_retries: logger.info( f"Failed to fetch license after {max_retries} attempts: {str(e)}" ) return None # Calculate backoff time using exponential backoff wait_time = backoff_factor * (2 ** (retries - 1)) logger.info( f"Attempt {retries} failed. Retrying in {wait_time:.2f} seconds..." ) time.sleep(wait_time) return None def convert_to_license(license_information: str | None) -> str | None: """checks if "Materialet er fri af ophavsret" is in the page""" if license_information and ( ("Materialet er fri af ophavsret" in license_information) or ("Materialet er fri af ophvasret" in license_information) or ("Ophavsretten er bortfaldet" in license_information) or ("Manuskriptet er fri af ophavsret" in license_information) or ("Offentlig " == license_information) ): return "cc0" return license_information def convert_to_row(speech_meta: dict[str, Any]) -> dict[str, Any]: speech_id = speech_meta["id"] date_of_speech = speech_meta["date"]["iso_date"] date_of_speech_start = f"{date_of_speech}" date_of_speech_end = f"{date_of_speech}" license_information = fetch_license_div(speech_meta["url"]) row = { "text": speech_meta["transcription"], "source": "danske-taler", "id": f"danske-taler_{speech_id}", # current date "added": date.today().isoformat(), "created": f"{date_of_speech_start}, {date_of_speech_end}", "license_information": license_information, "domain": "Spoken", "metadata": {"source-pretty": "Danske Taler"}, } return row def download_speeches() -> pd.DataFrame: logger.info("Fetching all speeches from Danske Taler API") speeches = get_all_speeches() logger.info(f"Found {len(speeches)} speeches") rows = [] for speech in tqdm(speeches): row = convert_to_row(speech) rows.append(row) logger.info(f"Saving {len(rows)} speeches to dataset") df = pd.DataFrame(rows) return df def main(): save_path = Path(__file__).parent / "danske-taler.parquet" save_path_all = Path(__file__).parent / "tmp" / "danske-taler-all.parquet" save_path_all.parent.mkdir(parents=False, exist_ok=True) if save_path_all.exists(): logger.info(f"Loading dataset from {save_path_all}") df = pd.read_parquet(save_path_all) else: logger.info(f"Downloading speeches and saving to {save_path_all}") df = download_speeches() df.to_parquet(save_path_all) licenses = [convert_to_license(license) for license in df["license_information"]] df["license"] = licenses uniques_licenses = set(df["license"].tolist()) logger.info("Unique licenses:") for license in uniques_licenses: logger.info(f"\t{license}") # remove documents without a cc0 license len_df = len(df) df = df[df["license"] == "cc0"] logger.info(f"Removed {len_df - len(df)} documents without a cc0 license") # remove duplicate ids len_df = len(df) df = df.drop_duplicates(subset=["id"]) logger.info(f"Removed {len_df - len(df)} duplicate ids") # remove rows with empty text len_df = len(df) df = df[df["text"].str.strip() != ""] logger.info(f"Removed {len_df - len(df)} rows with empty text") # remove rows with duplicate text len_df = len(df) df = df.drop_duplicates(subset=["text"]) logger.info(f"Removed {len_df - len(df)} rows with duplicate text") dataset = datasets.Dataset.from_pandas(df) assert len(set(dataset["id"])) == len(dataset), "IDs are not unique" assert len(set(dataset["text"])) == len(dataset), "Texts are not unique" assert len(set(dataset["license"])) == 1, "Multiple licenses found" # check for html tags in text assert not df["text"].str.contains("<[^>]*>").any(), "HTML tags found in text" dataset.to_parquet(save_path) if __name__ == "__main__": log_path = Path(__file__).parent / "danske-taler.log" logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(), logging.FileHandler(log_path), ], ) main()