# /// script # requires-python = ">=3.12" # dependencies = [ # "datasets", # "dynaword", # ] # # [tool.uv.sources] # dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword" } # /// """ Script for downloading and processing the dataset Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword: ```bash GIT_LFS_SKIP_SMUDGE=1 uv run data/enevaeldens_nyheder/create.py ``` """ import logging from datetime import date from pathlib import Path from typing import Any, cast from datasets import Dataset, load_dataset from dynaword.process_dataset import ( add_token_count, ensure_column_order, remove_duplicate_text, remove_empty_texts, ) logger = logging.getLogger(__name__) SOURCE = "enevaeldens_nyheder" def reformat_samples(example: dict[str, Any]) -> dict[str, Any]: creation_date = example["date"] # Reformatting the date to YYYY-MM-DD format start = creation_date end = creation_date return { "id": f"{SOURCE}_{example['id']}", "text": example["text"], "source": SOURCE, "added": date.today().strftime("%Y-%m-%d"), "created": f"{start}, {end}", } def main(): dataset = load_dataset( "JohanHeinsen/ENO", split="train", revision="009f45ef63a1a41705781840807eb620f380d17d", ) dataset = cast(Dataset, dataset) logger.info("Removing 1 word texts") len_ds = len(dataset) dataset = dataset.filter( lambda x: len(x["text"].split()) >= 2 ) # require at least 2 word in the text logger.info(f"Filtered {len_ds - len(dataset)} 1 word examples") logger.info("Filtering out texts with predicted word acuracy < 0.7") dataset = dataset.filter(lambda x: x["pwa"] >= 0.7) logger.info(f"Filtered {len_ds - len(dataset)} low accuracy examples") dataset = dataset.map(reformat_samples) dataset = remove_empty_texts(dataset) # remove rows with empty text dataset = remove_duplicate_text(dataset) # remove rows with duplicate text dataset = add_token_count(dataset) dataset = ensure_column_order(dataset) dataset.to_parquet( Path(__file__).parent / f"{SOURCE}.parquet", ) if __name__ == "__main__": log_path = Path(__file__).parent / f"{SOURCE}.log" logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(), logging.FileHandler(log_path), ], ) main()