Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
File size: 2,138 Bytes
439e14c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# /// script
# requires-python = ">=3.12"
# dependencies = [
#     "datasets",
#     "dynaword",
# ]
#
# [tool.uv.sources]
# dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword" }
# ///

"""
Script for downloading and processing the dataset

Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword:

```bash
GIT_LFS_SKIP_SMUDGE=1 uv run data/grundtvig/create.py
```
"""

import logging
from datetime import date
from pathlib import Path
from typing import Any, cast

from datasets import Dataset, load_dataset

from dynaword.process_dataset import (
    add_token_count,
    ensure_column_order,
    remove_duplicate_text,
    remove_empty_texts,
)

logger = logging.getLogger(__name__)

SOURCE = "grundtvig"


def reformat_samples(example: dict[str, Any]) -> dict[str, Any]:
    year_of_creation = example["id"].split("_")[0]
    # Reformatting the date to YYYY-MM-DD format
    start = f"{year_of_creation}-01-01"
    end = f"{year_of_creation}-12-31"
    return {
        "id": f"grundtvig_{example['id']}",
        "text": example["md"],
        "source": SOURCE,
        "added": date.today().strftime("%Y-%m-%d"),
        "created": f"{start}, {end}",
    }


def main():
    dataset = load_dataset(
        "chcaa/grundtvigs-works",
        split="train",
        revision="945dd72c1e902632ed581d90c8ff1571ef211a63",
    )
    dataset = cast(Dataset, dataset)

    dataset = dataset.map(reformat_samples)

    dataset = remove_empty_texts(dataset)  # remove rows with empty text
    dataset = remove_duplicate_text(dataset)  # remove rows with duplicate text
    dataset = add_token_count(dataset)
    dataset = ensure_column_order(dataset)

    dataset.to_parquet(
        Path(__file__).parent / f"{SOURCE}.parquet",
    )


if __name__ == "__main__":
    log_path = Path(__file__).parent / f"{SOURCE}.log"
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(log_path),
        ],
    )
    main()