# /// script # requires-python = ">=3.12" # dependencies = [ # "datasets", # "dynaword", # ] # # [tool.uv.sources] # dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword" } # /// """ Script for downloading and processing the dataset Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword: ```bash GIT_LFS_SKIP_SMUDGE=1 uv run data/grundtvig/create.py ``` """ import logging from datetime import date from pathlib import Path from typing import Any, cast from datasets import Dataset, load_dataset from dynaword.process_dataset import ( add_token_count, ensure_column_order, remove_duplicate_text, remove_empty_texts, ) logger = logging.getLogger(__name__) SOURCE = "grundtvig" def reformat_samples(example: dict[str, Any]) -> dict[str, Any]: year_of_creation = example["id"].split("_")[0] # Reformatting the date to YYYY-MM-DD format start = f"{year_of_creation}-01-01" end = f"{year_of_creation}-12-31" return { "id": f"grundtvig_{example['id']}", "text": example["md"], "source": SOURCE, "added": date.today().strftime("%Y-%m-%d"), "created": f"{start}, {end}", } def main(): dataset = load_dataset( "chcaa/grundtvigs-works", split="train", revision="945dd72c1e902632ed581d90c8ff1571ef211a63", ) dataset = cast(Dataset, dataset) dataset = dataset.map(reformat_samples) dataset = remove_empty_texts(dataset) # remove rows with empty text dataset = remove_duplicate_text(dataset) # remove rows with duplicate text dataset = add_token_count(dataset) dataset = ensure_column_order(dataset) dataset.to_parquet( Path(__file__).parent / f"{SOURCE}.parquet", ) if __name__ == "__main__": log_path = Path(__file__).parent / f"{SOURCE}.log" logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(), logging.FileHandler(log_path), ], ) main()