Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
Kenneth Enevoldsen commited on
Commit
566156e
·
unverified ·
1 Parent(s): 65faa6e

updates to datasheet

Browse files
clean_and_deduplicate.py CHANGED
@@ -4,13 +4,13 @@ from typing import cast
4
 
5
  from datasets import Dataset, load_dataset
6
 
 
7
  from dynaword.process_dataset import (
8
- add_token_count,
9
  ensure_column_order,
10
- remove_duplicate_text,
11
- remove_empty_texts,
 
12
  )
13
- from src.tests.readme_parsing import read_frontmatter_and_body
14
 
15
  logger = logging.getLogger(__name__)
16
 
@@ -22,21 +22,19 @@ logging.basicConfig(
22
  root = Path(__file__).parent
23
 
24
  data_path = root / "data"
25
- frontmatter, _ = read_frontmatter_and_body(root / "README.md")
26
 
27
- for dataset_cfg in frontmatter["configs"][1:]:
28
  logger.info(f"Processing {dataset_cfg['config_name']}")
29
  _data_path = data_path / dataset_cfg["config_name"]
30
- readme = data_path / dataset_cfg["config_name"] / f"{dataset_cfg['config_name']}.md"
31
- frontmatter, _ = read_frontmatter_and_body(readme)
32
  ds = load_dataset(_data_path.as_posix(), split="train")
33
 
34
  ds = cast(Dataset, ds)
35
 
36
- ds = ds.remove_columns(["license", "metadata"])
37
- ds = add_token_count(ds)
38
- ds = remove_empty_texts(ds)
39
- ds = remove_duplicate_text(ds)
40
  ds = ensure_column_order(ds)
41
 
42
  # save dataset
 
4
 
5
  from datasets import Dataset, load_dataset
6
 
7
+ from dynaword.datasheet import DataSheet
8
  from dynaword.process_dataset import (
 
9
  ensure_column_order,
10
+ # add_token_count,
11
+ # remove_duplicate_text,
12
+ # remove_empty_texts,
13
  )
 
14
 
15
  logger = logging.getLogger(__name__)
16
 
 
22
  root = Path(__file__).parent
23
 
24
  data_path = root / "data"
25
+ sheet = DataSheet.load_from_path(root / "README.md")
26
 
27
+ for dataset_cfg in sheet.frontmatter["configs"][1:]:
28
  logger.info(f"Processing {dataset_cfg['config_name']}")
29
  _data_path = data_path / dataset_cfg["config_name"]
 
 
30
  ds = load_dataset(_data_path.as_posix(), split="train")
31
 
32
  ds = cast(Dataset, ds)
33
 
34
+ ds = ds.remove_columns(["license", "metadata", "domain"])
35
+ # ds = add_token_count(ds)
36
+ # ds = remove_empty_texts(ds)
37
+ # ds = remove_duplicate_text(ds)
38
  ds = ensure_column_order(ds)
39
 
40
  # save dataset
data/botxt/botxt.md CHANGED
@@ -1,21 +1,21 @@
1
  ---
2
  pretty_name: Bornholmsk
3
  language:
4
- - da
5
  license: cc0-1.0
6
  license_name: CC-0
7
  size_categories:
8
- - 1-10k
9
  task_categories:
10
- - text-generation
11
- - fill-mask
12
  task_ids:
13
- - language-modeling
14
  domains:
15
- - Dialect
16
- - Web
17
  source_datasets:
18
- - danish-foundation-models/danish-gigaword
19
  ---
20
 
21
  # Dataset Card for Bornholmsk
 
1
  ---
2
  pretty_name: Bornholmsk
3
  language:
4
+ - da
5
  license: cc0-1.0
6
  license_name: CC-0
7
  size_categories:
8
+ - 1-10k
9
  task_categories:
10
+ - text-generation
11
+ - fill-mask
12
  task_ids:
13
+ - language-modeling
14
  domains:
15
+ - Dialect
16
+ - Web
17
  source_datasets:
18
+ - danish-foundation-models/danish-gigaword
19
  ---
20
 
21
  # Dataset Card for Bornholmsk
data/dannet/dannet.md CHANGED
@@ -1,20 +1,20 @@
1
  ---
2
  pretty_name: DanNet
3
  language:
4
- - da
5
  license: other
6
  license_name: DanNet 1.0
7
  size_categories:
8
- - 10k-100k
9
  task_categories:
10
- - text-generation
11
- - fill-mask
12
  task_ids:
13
- - language-modeling
14
  source_datasets:
15
- - danish-foundation-models/danish-gigaword
16
  domains:
17
- - Other
18
  ---
19
 
20
  # Dataset Card for DanNet
@@ -32,6 +32,7 @@ A WordNet is a lexico-semantic network which show the meaning and the relation b
32
 
33
  <!-- START-DESC-STATS -->
34
  - **Language**: dan, dansk, Danish
 
35
  - **Number of samples**: 47.60K
36
  - **Number of tokens (Llama 3)**: 1.48M
37
  - **Average document length (characters)**: 90.88
 
1
  ---
2
  pretty_name: DanNet
3
  language:
4
+ - da
5
  license: other
6
  license_name: DanNet 1.0
7
  size_categories:
8
+ - 10k-100k
9
  task_categories:
10
+ - text-generation
11
+ - fill-mask
12
  task_ids:
13
+ - language-modeling
14
  source_datasets:
15
+ - danish-foundation-models/danish-gigaword
16
  domains:
17
+ - Other
18
  ---
19
 
20
  # Dataset Card for DanNet
 
32
 
33
  <!-- START-DESC-STATS -->
34
  - **Language**: dan, dansk, Danish
35
+ - **Domains**: dan, dansk, Danish
36
  - **Number of samples**: 47.60K
37
  - **Number of tokens (Llama 3)**: 1.48M
38
  - **Average document length (characters)**: 90.88
src/dynaword/datasheet.py CHANGED
@@ -1,32 +1,126 @@
 
 
 
 
1
  from pathlib import Path
2
- from typing import Any, Literal, Self
 
3
 
4
  import yaml
 
5
  from pydantic import BaseModel
6
 
7
- from dynaword.paths import repo_path
 
 
 
 
8
 
9
  LICENSE_HEADER = "## License Information"
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  class DataSheet(BaseModel):
13
  pretty_name: str
14
  license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"]
 
15
  language: list[Literal["da"]]
 
16
  path: Path
17
  frontmatter: dict[str, Any]
18
  body: str
19
 
 
 
 
 
 
 
 
 
 
20
  @property
21
  def license_information(self) -> str:
22
  return self.get_section_by_header(LICENSE_HEADER)
23
 
24
  @property
25
  def frontmatter_as_str(self) -> str:
26
- return yaml.dump(self.frontmatter, indent=2)
27
 
28
  def to_str(self) -> str:
29
- return f"---\n{self.frontmatter_as_str}---\n{self.body}"
 
 
 
 
 
 
 
 
 
 
30
 
31
  def get_section_indices_by_header(self, header: str) -> tuple[int, int]:
32
  level = header.split(" ")[0].count("#")
@@ -80,7 +174,57 @@ class DataSheet(BaseModel):
80
  tag_start = f"<!-- START-{tag} -->"
81
  return self.body[s + len(tag_start) : e].strip()
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  def replace_tag(self, package: str, tag: str) -> str:
 
 
 
 
 
 
 
 
 
84
  tag_start = f"<!-- START-{tag} -->"
85
  tag_end = f"<!-- END-{tag} -->"
86
 
@@ -100,9 +244,10 @@ class DataSheet(BaseModel):
100
  content = f.read()
101
  if content.startswith("---"):
102
  end_idx = content.find("---", 3)
 
103
  if end_idx != -1:
104
  frontmatter = content[3:end_idx].strip()
105
- return yaml.safe_load(frontmatter), content[end_idx:]
106
  raise ValueError(f"No frontmatter found in file: {file_path}")
107
 
108
  @classmethod
@@ -114,10 +259,12 @@ class DataSheet(BaseModel):
114
  license=frontmatter["license"],
115
  language=frontmatter["language"],
116
  pretty_name=frontmatter["pretty_name"],
 
 
117
  path=readme_path,
118
  )
119
 
120
- def write_to_path(self, readme_path: Path | None) -> None:
121
  if readme_path is None:
122
  readme_path = self.path
123
  with readme_path.open("w") as f:
@@ -125,7 +272,10 @@ class DataSheet(BaseModel):
125
 
126
 
127
  if __name__ == "__main__":
128
- sheet = DataSheet.load_from_path(repo_path / "data" / "botxt" / "botxt.md")
129
- sheet.frontmatter
 
 
130
 
131
- print(yaml.dump(sheet.frontmatter, indent=2))
 
 
1
+ import json
2
+ import logging
3
+ from datetime import datetime
4
+ from enum import Enum
5
  from pathlib import Path
6
+ from textwrap import dedent
7
+ from typing import Any, Literal, Self, cast
8
 
9
  import yaml
10
+ from datasets import Dataset, load_dataset
11
  from pydantic import BaseModel
12
 
13
+ from dynaword.descriptive_stats import DescriptiveStatsOverview
14
+ from dynaword.plots import create_descriptive_statistics_plots
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
 
19
  LICENSE_HEADER = "## License Information"
20
 
21
 
22
+ class DEFAULT_SECTION_TAGS(Enum):
23
+ desc_stats = "DESC-STATS"
24
+ sample = "SAMPLE"
25
+ dataset_plots = "DATASET PLOTS"
26
+ short_description = "SHORT DESCRIPTION"
27
+
28
+
29
+ DATASET_PLOTS_template = """
30
+ <p align="center">
31
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
32
+ </p>
33
+ """
34
+
35
+
36
+ SAMPLE_template = """
37
+ ```py
38
+ {sample}
39
+ ```
40
+
41
+ ### Data Fields
42
+
43
+ An entry in the dataset consists of the following fields:
44
+
45
+ - `text`(`str`): The content of the document.
46
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
47
+ - `id` (`str`): An unique identifier for each document.
48
+ - `added` (`str`): An date for when the document was added to this collection.
49
+ - `created` (`str`): An date range for when the document was originally created.
50
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
51
+ - `domain` (`str`): The domain of the source
52
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
53
+ - `metadata/*`: Potentially additional metadata
54
+
55
+ """
56
+
57
+
58
+ def human_readable_large_int(value: int) -> str:
59
+ thresholds = [
60
+ (1_000_000_000, "B"),
61
+ (1_000_000, "M"),
62
+ (1_000, "K"),
63
+ ]
64
+ for threshold, label in thresholds:
65
+ if value > threshold:
66
+ return f"{value/threshold:.2f}{label}"
67
+
68
+ return str(value)
69
+
70
+
71
+ def create_sample_str(dataset: Dataset, max_str_len: int = 100):
72
+ sample = dataset[0]
73
+ for k in sample:
74
+ if isinstance(sample[k], str) and len(sample[k]) > max_str_len:
75
+ sample[k] = sample[k][:max_str_len] + "[...]"
76
+ if isinstance(sample[k], datetime):
77
+ sample[k] = str(sample[k])
78
+
79
+ json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
80
+ sample_str = SAMPLE_template.format(sample=json_sample)
81
+
82
+ return sample_str
83
+
84
+
85
  class DataSheet(BaseModel):
86
  pretty_name: str
87
  license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"]
88
+ license_name: str
89
  language: list[Literal["da"]]
90
+ domains: list[str] # TODO: make literal
91
  path: Path
92
  frontmatter: dict[str, Any]
93
  body: str
94
 
95
+ @property
96
+ def short_description(self) -> str:
97
+ short_description = self.get_tag_content(
98
+ DEFAULT_SECTION_TAGS.short_description.value
99
+ )
100
+ if short_description.endswith("."):
101
+ short_description = short_description[:-1]
102
+ return short_description
103
+
104
  @property
105
  def license_information(self) -> str:
106
  return self.get_section_by_header(LICENSE_HEADER)
107
 
108
  @property
109
  def frontmatter_as_str(self) -> str:
110
+ return yaml.dump(self.frontmatter, indent=2, sort_keys=False)
111
 
112
  def to_str(self) -> str:
113
+ return f"---\n{self.frontmatter_as_str.strip()}\n---\n\n{self.body.strip()}\n"
114
+
115
+ def get_dataset(self, **kwargs) -> Dataset:
116
+ ds_path = self.path.parent
117
+ ds = load_dataset(ds_path.as_posix(), split="train", **kwargs)
118
+ ds = cast(Dataset, ds)
119
+ return ds
120
+
121
+ def get_descritive_stats(self) -> DescriptiveStatsOverview:
122
+ path = self.path.parent / "descriptive_stats.json"
123
+ return DescriptiveStatsOverview.from_disk(path)
124
 
125
  def get_section_indices_by_header(self, header: str) -> tuple[int, int]:
126
  level = header.split(" ")[0].count("#")
 
174
  tag_start = f"<!-- START-{tag} -->"
175
  return self.body[s + len(tag_start) : e].strip()
176
 
177
+ def add_descriptive_stats(
178
+ self, descriptive_stats: DescriptiveStatsOverview | None = None
179
+ ) -> str:
180
+ if descriptive_stats is None:
181
+ d_stats = DescriptiveStatsOverview.from_dataset(self.get_dataset())
182
+ else:
183
+ d_stats = descriptive_stats
184
+
185
+ if len(self.language) != 1 and self.language[0] != "da":
186
+ raise NotImplementedError(
187
+ "This script only handles the language codes 'da'"
188
+ )
189
+ languages = "dan, dansk, Danish"
190
+
191
+ package = dedent(f"""
192
+ - **Language**: {languages}
193
+ - **Domains**: {self.domains}
194
+ - **Number of samples**: {human_readable_large_int(d_stats.number_of_samples)}
195
+ - **Number of tokens (Llama 3)**: {human_readable_large_int(d_stats.number_of_tokens)}
196
+ - **Average document length (characters)**: {d_stats.average_document_length:.2f}
197
+ """)
198
+
199
+ return self.replace_tag(
200
+ package=package,
201
+ tag=DEFAULT_SECTION_TAGS.desc_stats.value,
202
+ )
203
+
204
+ def add_dataset_plots(self, dataset: Dataset, create_plot: bool = True) -> str:
205
+ if create_plot:
206
+ create_descriptive_statistics_plots(
207
+ dataset=dataset, save_dir=self.path.parent
208
+ )
209
+ return self.replace_tag(
210
+ package=DATASET_PLOTS_template, tag=DEFAULT_SECTION_TAGS.dataset_plots.value
211
+ )
212
+
213
+ def add_sample_and_description(self, dataset: Dataset) -> str:
214
+ return self.replace_tag(
215
+ package=create_sample_str(dataset), tag=DEFAULT_SECTION_TAGS.sample.value
216
+ )
217
+
218
  def replace_tag(self, package: str, tag: str) -> str:
219
+ """Add replace a tag in the datasheet body.
220
+
221
+ Args:
222
+ package: What you want to replace it with
223
+ tag: What tag you want to replace
224
+
225
+ Returns:
226
+ The entire body text
227
+ """
228
  tag_start = f"<!-- START-{tag} -->"
229
  tag_end = f"<!-- END-{tag} -->"
230
 
 
244
  content = f.read()
245
  if content.startswith("---"):
246
  end_idx = content.find("---", 3)
247
+ start_idx_body = end_idx + 3
248
  if end_idx != -1:
249
  frontmatter = content[3:end_idx].strip()
250
+ return yaml.safe_load(frontmatter), content[start_idx_body:]
251
  raise ValueError(f"No frontmatter found in file: {file_path}")
252
 
253
  @classmethod
 
259
  license=frontmatter["license"],
260
  language=frontmatter["language"],
261
  pretty_name=frontmatter["pretty_name"],
262
+ domains=frontmatter["domains"],
263
+ license_name=frontmatter["license_name"],
264
  path=readme_path,
265
  )
266
 
267
+ def write_to_path(self, readme_path: Path | None = None) -> None:
268
  if readme_path is None:
269
  readme_path = self.path
270
  with readme_path.open("w") as f:
 
272
 
273
 
274
  if __name__ == "__main__":
275
+ from dynaword.paths import repo_path
276
+
277
+ sheet = DataSheet.load_from_path(repo_path / "data" / "dannet" / "dannet.md")
278
+ ds = sheet.get_dataset()
279
 
280
+ sheet.body = sheet.add_descriptive_stats(descriptive_stats=None)
281
+ sheet.write_to_path()
src/dynaword/descriptive_stats.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+ from typing import Self
6
+
7
+ from datasets import Dataset
8
+
9
+ from dynaword.git_utilities import (
10
+ get_current_revision,
11
+ )
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ def calculate_average_document_length(
17
+ dataset: Dataset, text_column: str = "text"
18
+ ) -> float:
19
+ texts = sum(len(t) for t in dataset[text_column])
20
+ return texts / len(dataset)
21
+
22
+
23
+ @dataclass()
24
+ class DescriptiveStatsOverview:
25
+ number_of_samples: int
26
+ average_document_length: float
27
+ number_of_tokens: int
28
+
29
+ @classmethod
30
+ def from_disk(cls, path: Path):
31
+ with path.open("r") as f:
32
+ data = json.load(f)
33
+ if "revision" in data:
34
+ data.pop("revision")
35
+ obj = cls(**data)
36
+ return obj
37
+
38
+ def to_disk(self, path: Path):
39
+ data = self.__dict__
40
+ data["revision"] = get_current_revision()
41
+ with path.with_suffix(".json").open("w") as f:
42
+ json.dump(self.__dict__, f, indent=2)
43
+
44
+ @classmethod
45
+ def from_dataset(cls, dataset: Dataset) -> Self:
46
+ return cls(
47
+ number_of_samples=len(dataset),
48
+ average_document_length=calculate_average_document_length(dataset),
49
+ number_of_tokens=sum(dataset["token_count"]),
50
+ )
src/dynaword/plots.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import pandas as pd
4
+ import plotnine as pn
5
+ from datasets import Dataset
6
+
7
+ from tests.readme_parsing import replace_tag
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def create_descriptive_statistics_plots(
13
+ dataset: Dataset,
14
+ save_dir: Path,
15
+ ):
16
+ logger.info("creating descriptive statistics plot to readme.")
17
+ lengths = dataset["token_count"]
18
+ df = pd.DataFrame({"lengths": lengths, "Source": dataset["source"]})
19
+
20
+ plot = (
21
+ pn.ggplot(df, pn.aes(x="lengths", y=pn.after_stat("count")))
22
+ + pn.geom_histogram(bins=100)
23
+ + pn.labs(
24
+ x="Document Length (Tokens)",
25
+ y="Count",
26
+ title="Distribution of Document Lengths",
27
+ )
28
+ + pn.theme_minimal()
29
+ + pn.facet_wrap("Source", scales="free", ncol=3)
30
+ )
31
+
32
+ img_path = save_dir / "images"
33
+ img_path.mkdir(parents=False, exist_ok=True)
34
+ pn.ggsave(
35
+ plot,
36
+ img_path / "dist_document_length.png",
37
+ dpi=500,
38
+ width=10,
39
+ height=10,
40
+ units="in",
41
+ verbose=False,
42
+ )
43
+
44
+ replace_tag(
45
+ markdown=markdown_path, package=DATASET_PLOTS_template, tag="DATASET PLOTS"
46
+ )
src/dynaword/tables.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import pandas as pd
4
+
5
+ from dynaword.datasheet import DataSheet, human_readable_large_int
6
+ from dynaword.paths import repo_path
7
+
8
+ main_sheet = DataSheet.load_from_path(repo_path / "README.md")
9
+ frontmatter, _ = main_sheet.frontmatter
10
+ _datasets = [
11
+ cfg["config_name"] # type: ignore
12
+ for cfg in frontmatter["configs"] # type: ignore
13
+ if cfg["config_name"] != "default" # type: ignore
14
+ ]
15
+
16
+ DEFAULT_LICENSE_REFERENCES = """[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
17
+ [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
18
+ [Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0
19
+ """
20
+
21
+
22
+ def create_license_references() -> str:
23
+ license_references = DEFAULT_LICENSE_REFERENCES
24
+ for dataset in _datasets:
25
+ dataset_path = repo_path / "data" / dataset
26
+ readme_path = dataset_path / f"{dataset_path.name}.md"
27
+
28
+ sheet = DataSheet.load_from_path(readme_path)
29
+
30
+ if sheet.license == "other":
31
+ license_name = sheet.frontmatter["license_name"]
32
+ license_references += f"[{license_name}]: ./data/{dataset_path.name}/{dataset_path.name}.md#license-information\n"
33
+
34
+ return license_references
35
+
36
+
37
+ def create_dataset_readme_references():
38
+ readme_references = ""
39
+
40
+ for dataset in _datasets:
41
+ dataset_path = repo_path / "data" / dataset
42
+
43
+ readme_references += (
44
+ f"[{dataset_path.name}]: data/{dataset_path.name}/{dataset_path.name}.md\n"
45
+ )
46
+ return readme_references
47
+
48
+
49
+ def create_overview_table(repo_path: Path = repo_path) -> pd.DataFrame:
50
+ table = {
51
+ "Source": [],
52
+ "Description": [],
53
+ "Domain": [],
54
+ "N. Tokens": [],
55
+ "License": [],
56
+ }
57
+
58
+ for dataset in _datasets:
59
+ dataset_path = repo_path / "data" / dataset
60
+ readme_path = dataset_path / f"{dataset_path.name}.md"
61
+
62
+ sheet = DataSheet.load_from_path(readme_path)
63
+ desc_stats = sheet.get_descritive_stats()
64
+
65
+ table["Source"] += [f"[{dataset_path.name}]"]
66
+ table["License"] += [f"[{sheet.license_name}]"]
67
+ table["Description"] += [sheet.short_description]
68
+ table["N. Tokens"] += [desc_stats.number_of_tokens]
69
+
70
+ # total
71
+ table["Source"] += ["**Total**"]
72
+ table["Domain"] += [""]
73
+ table["License"] += [""]
74
+ table["Description"] += [""]
75
+ table["N. Tokens"] += [sum(table["N. Tokens"])]
76
+
77
+ df = pd.DataFrame.from_dict(table)
78
+ df = df.sort_values("N. Tokens")
79
+ df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
80
+
81
+ return df
82
+
83
+ def create_overview_table_str(repo_path: Path = repo_path) -> str:
84
+ main_table = create_overview_table(repo_path)
85
+ readme_references = create_dataset_readme_references()
86
+ license_references = create_license_references()
87
+ package = f"{main_table.to_markdown(index=False)}\n\n{readme_references}\n\n{license_references}\n\n"
88
+ return package
src/dynaword/update_descriptive_statistics.py CHANGED
@@ -10,195 +10,29 @@ Example use:
10
  import argparse
11
  import json
12
  import logging
13
- from dataclasses import dataclass
14
- from datetime import datetime
15
  from pathlib import Path
16
- from textwrap import dedent
17
- from typing import Self, cast
18
 
19
- import pandas as pd
20
- import plotnine as pn
21
  from datasets import Dataset, load_dataset
22
 
23
  from dynaword.datasheet import DataSheet
 
24
  from dynaword.git_utilities import (
25
  check_is_ancestor,
26
- get_current_revision,
27
  get_latest_revision,
28
  )
29
  from dynaword.paths import repo_path
30
- from tests.readme_parsing import get_tag_content, read_frontmatter_and_body, replace_tag
31
-
32
- frontmatter, _ = read_frontmatter_and_body(repo_path / "README.md")
33
- _datasets = [
34
- cfg["config_name"]
35
- for cfg in frontmatter["configs"]
36
- if cfg["config_name"] != "default"
37
- ]
38
 
39
  logger = logging.getLogger(__name__)
40
 
41
-
42
- def human_readable_large_int(value: int) -> str:
43
- thresholds = [
44
- (1_000_000_000, "B"),
45
- (1_000_000, "M"),
46
- (1_000, "K"),
47
- ]
48
- for threshold, label in thresholds:
49
- if value > threshold:
50
- return f"{value/threshold:.2f}{label}"
51
-
52
- return str(value)
53
-
54
-
55
- def calculate_average_document_length(
56
- dataset: Dataset, text_column: str = "text"
57
- ) -> float:
58
- texts = sum(len(t) for t in dataset[text_column])
59
- return texts / len(dataset)
60
-
61
-
62
- @dataclass()
63
- class DescriptiveStatsOverview:
64
- number_of_samples: int
65
- average_document_length: float
66
- number_of_tokens: int
67
- language: str = "dan, dansk, Danish"
68
-
69
- @classmethod
70
- def from_dataset(cls, dataset: Dataset) -> Self:
71
- return cls(
72
- number_of_samples=len(dataset),
73
- average_document_length=calculate_average_document_length(dataset),
74
- number_of_tokens=sum(dataset["token_count"]),
75
- )
76
-
77
- def to_markdown(self) -> str:
78
- format = dedent(f"""
79
-
80
- - **Language**: {self.language}
81
- - **Domains**: {self.language}
82
- - **Number of samples**: {human_readable_large_int(self.number_of_samples)}
83
- - **Number of tokens (Llama 3)**: {human_readable_large_int(self.number_of_tokens)}
84
- - **Average document length (characters)**: {self.average_document_length:.2f}
85
-
86
- """)
87
- return format
88
-
89
- def add_to_markdown(self, markdown: str | Path) -> str:
90
- return replace_tag(
91
- markdown=markdown, package=self.to_markdown(), tag="DESC-STATS"
92
- )
93
-
94
- def to_disk(
95
- self, path: Path
96
- ): # TODO: instead write this to the yaml header (and revision should not be added here)
97
- data = self.__dict__
98
- data["revision"] = get_current_revision()
99
- with path.with_suffix(".json").open("w") as f:
100
- json.dump(self.__dict__, f, indent=2)
101
-
102
- @classmethod
103
- def from_disk(cls, path: Path):
104
- with path.open("r") as f:
105
- data = json.load(f)
106
- if "revision" in data:
107
- data.pop("revision")
108
- obj = cls(**data)
109
- return obj
110
-
111
-
112
- sample_template = """
113
- ```py
114
- {sample}
115
- ```
116
-
117
- ### Data Fields
118
-
119
- An entry in the dataset consists of the following fields:
120
-
121
- - `text`(`str`): The content of the document.
122
- - `source` (`str`): The source of the document (see [Source Data](#source-data)).
123
- - `id` (`str`): An unique identifier for each document.
124
- - `added` (`str`): An date for when the document was added to this collection.
125
- - `created` (`str`): An date range for when the document was originally created.
126
- - `license` (`str`): The license of the document. The licenses vary according to the source.
127
- - `domain` (`str`): The domain of the source
128
- - `metadata/source-pretty` (`str`): The long form version of the short-form source name
129
- - `metadata/*`: Potentially additional metadata
130
-
131
- """
132
-
133
-
134
- def add_sample(markdown_path: Path, dataset: Dataset, max_str_len: int = 100):
135
- logger.info("Adding dataset sample to readme")
136
- sample = dataset[0]
137
- for k in sample:
138
- if isinstance(sample[k], str) and len(sample[k]) > max_str_len:
139
- sample[k] = sample[k][:max_str_len] + "[...]"
140
- if isinstance(sample[k], datetime):
141
- sample[k] = str(sample[k])
142
-
143
- json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
144
- sample_str = sample_template.format(sample=json_sample)
145
-
146
- replace_tag(markdown=markdown_path, package=sample_str, tag="SAMPLE")
147
-
148
-
149
- DATASET_PLOTS_template = """
150
- <p align="center">
151
- <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
152
- </p>
153
- """
154
-
155
-
156
- def add_descriptive_statistics_plots(
157
- markdown_path: Path,
158
- dataset: Dataset,
159
- ):
160
- logger.info("Adding descriptive statistics plot to readme.")
161
- lengths = [len(s) for s in dataset["text"]]
162
- df = pd.DataFrame({"lengths": lengths, "Source": dataset["source"]})
163
-
164
- plot = (
165
- pn.ggplot(df, pn.aes(x="lengths", y=pn.after_stat("count")))
166
- + pn.geom_histogram(bins=100)
167
- + pn.labs(
168
- x="Document Length (Characters)",
169
- y="Count",
170
- title="Distribution of Document Lengths",
171
- )
172
- + pn.theme_minimal()
173
- + pn.facet_wrap("Source", scales="free", ncol=3)
174
- )
175
-
176
- img_path = markdown_path.parent / "images"
177
- img_path.mkdir(parents=False, exist_ok=True)
178
- pn.ggsave(
179
- plot,
180
- img_path / "dist_document_length.png",
181
- dpi=500,
182
- width=10,
183
- height=10,
184
- units="in",
185
- verbose=False,
186
- )
187
-
188
- replace_tag(
189
- markdown=markdown_path, package=DATASET_PLOTS_template, tag="DATASET PLOTS"
190
- )
191
-
192
-
193
- def add_desc_statitics(
194
- markdown_path: Path,
195
- dataset: Dataset,
196
- desc_stats_path: Path,
197
- ) -> None:
198
- logger.info("Adding descriptive statistics to readme.")
199
- desc_stats = DescriptiveStatsOverview.from_dataset(dataset)
200
- desc_stats.to_disk(desc_stats_path)
201
- desc_stats.add_to_markdown(markdown_path)
202
 
203
 
204
  def update_dataset(
@@ -216,6 +50,7 @@ def update_dataset(
216
 
217
  rev = get_latest_revision(dataset_path)
218
  desc_stats_path = dataset_path / "descriptive_stats.json"
 
219
 
220
  if desc_stats_path.exists() and force is False:
221
  with desc_stats_path.open("r") as f:
@@ -229,20 +64,22 @@ def update_dataset(
229
  )
230
  return
231
 
232
- readme_name = f"{dataset_name}.md" if readme_name is None else readme_name
233
- markdown_path = dataset_path / readme_name
234
-
235
- logger.info(f"Updating dataset: {dataset_name}")
236
-
237
  ds = load_dataset(str(repo_path), dataset_name, split="train")
238
  ds = cast(Dataset, ds)
 
 
239
 
240
- add_desc_statitics(markdown_path, ds, desc_stats_path)
241
- add_sample(markdown_path, ds)
242
- add_descriptive_statistics_plots(markdown_path, ds)
 
 
243
 
 
244
  if dataset_name == "default":
245
- update_main_table()
 
246
 
247
 
248
  def create_parser():
@@ -277,74 +114,6 @@ def create_parser():
277
  return parser
278
 
279
 
280
- def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, str]:
281
- table = {
282
- "Source": [],
283
- "Description": [],
284
- "Domain": [],
285
- "N. Tokens": [],
286
- "License": [],
287
- }
288
- readme_references = ""
289
- license_references = (
290
- "[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n"
291
- + "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n"
292
- + "[Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0\n"
293
- )
294
-
295
- for dataset in _datasets:
296
- dataset_path = repo_path / "data" / dataset
297
- readme_path = dataset_path / f"{dataset_path.name}.md"
298
-
299
- sheet = DataSheet.load_from_path(readme_path)
300
- frontmatter = sheet.frontmatter
301
- body = sheet.body
302
- desc_stats = DescriptiveStatsOverview.from_disk(
303
- dataset_path / "descriptive_stats.json"
304
- )
305
-
306
- short_description = get_tag_content(body, tag="SHORT DESCRIPTION").strip()
307
- if short_description.endswith("."):
308
- short_description = short_description[:-1]
309
- license, license_name = frontmatter["license"], frontmatter["license_name"]
310
-
311
- table["Source"] += [f"[{dataset_path.name}]"]
312
- readme_references += (
313
- f"[{dataset_path.name}]: data/{dataset_path.name}/{dataset_path.name}.md\n"
314
- )
315
-
316
- table["License"] += [f"[{license_name}]"]
317
- if license == "other":
318
- license_references += f"[{license_name}]: ./data/{dataset_path.name}/{dataset_path.name}.md#license-information\n"
319
- table["Description"] += [short_description]
320
- table["N. Tokens"] += [desc_stats.number_of_tokens]
321
-
322
- # total
323
- table["Source"] += ["**Total**"]
324
- table["Domain"] += [""]
325
- table["License"] += [""]
326
- table["Description"] += [""]
327
- table["N. Tokens"] += [sum(table["N. Tokens"])]
328
-
329
- df = pd.DataFrame.from_dict(table)
330
- df = df.sort_values("N. Tokens")
331
- df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
332
-
333
- return df, readme_references, license_references
334
-
335
-
336
- def update_main_table(repo_path: Path = repo_path) -> None:
337
- logger.info("Updating MAIN TABLE")
338
- main_table, readme_references, license_references = create_main_table(repo_path)
339
- readme_path = repo_path / "README.md"
340
- with readme_path.open("r") as f:
341
- markdown = f.read()
342
- package = f"{main_table.to_markdown(index=False)}\n\n{readme_references}\n\n{license_references}\n\n"
343
- markdown = replace_tag(markdown, package=package, tag="MAIN TABLE")
344
- with readme_path.open("w") as f:
345
- f.write(markdown)
346
-
347
-
348
  def main(
349
  dataset: str | None = None,
350
  logging_level: int = 20,
 
10
  import argparse
11
  import json
12
  import logging
 
 
13
  from pathlib import Path
14
+ from typing import cast
 
15
 
 
 
16
  from datasets import Dataset, load_dataset
17
 
18
  from dynaword.datasheet import DataSheet
19
+ from dynaword.descriptive_stats import DescriptiveStatsOverview
20
  from dynaword.git_utilities import (
21
  check_is_ancestor,
 
22
  get_latest_revision,
23
  )
24
  from dynaword.paths import repo_path
25
+ from dynaword.tables import create_overview_table_str
 
 
 
 
 
 
 
26
 
27
  logger = logging.getLogger(__name__)
28
 
29
+ main_sheet = DataSheet.load_from_path(repo_path / "README.md")
30
+ frontmatter, _ = main_sheet.frontmatter
31
+ _datasets = [
32
+ cfg["config_name"] # type: ignore
33
+ for cfg in frontmatter["configs"] # type: ignore
34
+ if cfg["config_name"] != "default" # type: ignore
35
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  def update_dataset(
 
50
 
51
  rev = get_latest_revision(dataset_path)
52
  desc_stats_path = dataset_path / "descriptive_stats.json"
53
+ markdown_path = dataset_path / readme_name
54
 
55
  if desc_stats_path.exists() and force is False:
56
  with desc_stats_path.open("r") as f:
 
64
  )
65
  return
66
 
67
+ logger.info(f"Computing descriptive stats for: {dataset_name}")
 
 
 
 
68
  ds = load_dataset(str(repo_path), dataset_name, split="train")
69
  ds = cast(Dataset, ds)
70
+ desc_stats = DescriptiveStatsOverview.from_dataset(ds)
71
+ desc_stats.to_disk(desc_stats_path)
72
 
73
+ logger.info(f"Updating datasheet for: {dataset_name}")
74
+ sheet = DataSheet.load_from_path(markdown_path)
75
+ sheet.body = sheet.add_descriptive_stats(descriptive_stats=desc_stats)
76
+ sheet.body = sheet.add_sample_and_description(ds)
77
+ sheet.body = sheet.add_dataset_plots(ds, create_plot=True)
78
 
79
+ logger.info("Updating Overview table")
80
  if dataset_name == "default":
81
+ package = create_overview_table_str()
82
+ sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE")
83
 
84
 
85
  def create_parser():
 
114
  return parser
115
 
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  def main(
118
  dataset: str | None = None,
119
  logging_level: int = 20,
src/tests/conftest.py CHANGED
@@ -1,33 +1,14 @@
1
  from pathlib import Path
2
- from typing import Any
3
 
4
- import pytest
5
- import yaml
6
-
7
- from tests.readme_parsing import read_frontmatter_and_body
8
 
9
  root_path = Path(__file__).parent.parent.parent
10
-
11
  main_readme = root_path / "README.md"
12
 
13
- frontmatter, _ = read_frontmatter_and_body(main_readme)
 
14
  DATASET_NAMES = [
15
  cfg["config_name"]
16
- for cfg in frontmatter["configs"]
17
  if cfg["config_name"] != "default"
18
  ]
19
-
20
-
21
- @pytest.fixture()
22
- def repo_path() -> Path:
23
- return root_path
24
-
25
-
26
- def readme_yaml_header(repo_path: Path) -> dict[str, Any]:
27
- readme_path = repo_path / "README.md"
28
-
29
- with readme_path.open("r") as f:
30
- readme = f.read()
31
-
32
- frontmatter = readme.split("---")[1]
33
- return yaml.safe_load(frontmatter)
 
1
  from pathlib import Path
 
2
 
3
+ from dynaword.datasheet import DataSheet
 
 
 
4
 
5
  root_path = Path(__file__).parent.parent.parent
 
6
  main_readme = root_path / "README.md"
7
 
8
+ main_sheet = DataSheet.load_from_path(main_readme)
9
+
10
  DATASET_NAMES = [
11
  cfg["config_name"]
12
+ for cfg in main_sheet.frontmatter["configs"]
13
  if cfg["config_name"] != "default"
14
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/tests/test_dataset_schema.py CHANGED
@@ -1,9 +1,7 @@
1
  from pathlib import Path
2
- from typing import Literal
3
 
4
  import pytest
5
  from datasets import load_dataset
6
- from pydantic import BaseModel
7
 
8
  from dynaword.dataset_structure import SampleSchema
9
  from dynaword.datasheet import DataSheet
@@ -22,21 +20,14 @@ def test_sample_schema(repo_path: Path, dataset_name: str):
22
  SampleSchema(**sample)
23
 
24
 
25
- class FrontmatterSchema(BaseModel):
26
- pretty_name: str
27
- language: list[Literal["da"]]
28
- license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"]
29
-
30
-
31
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
32
  def test_dataset_readme(repo_path: Path, dataset_name: str):
33
  """tests that the dataset frontmatter and markdown follows the correct format."""
34
 
35
  readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
36
 
37
- ds_sheet = DataSheet.load_from_path(readme)
38
- frontmatter = ds_sheet.frontmatter
39
- frontmatter_validated = FrontmatterSchema(**frontmatter)
40
 
41
  # ensure tags:
42
  body = ds_sheet.body
@@ -46,9 +37,7 @@ def test_dataset_readme(repo_path: Path, dataset_name: str):
46
 
47
  h2_headings = {line for line in body.splitlines() if line.startswith("## ")}
48
 
49
- if (
50
- frontmatter_validated.license == "other"
51
- ): # ensure description of underspecified licenses
52
  assert "## License Information" in h2_headings
53
 
54
  # required headings
 
1
  from pathlib import Path
 
2
 
3
  import pytest
4
  from datasets import load_dataset
 
5
 
6
  from dynaword.dataset_structure import SampleSchema
7
  from dynaword.datasheet import DataSheet
 
20
  SampleSchema(**sample)
21
 
22
 
 
 
 
 
 
 
23
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
24
  def test_dataset_readme(repo_path: Path, dataset_name: str):
25
  """tests that the dataset frontmatter and markdown follows the correct format."""
26
 
27
  readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
28
 
29
+ # ensure that it can be loaded as a datasheet
30
+ ds_sheet = DataSheet.load_from_path(readme) # fill fail if format is not correct
 
31
 
32
  # ensure tags:
33
  body = ds_sheet.body
 
37
 
38
  h2_headings = {line for line in body.splitlines() if line.startswith("## ")}
39
 
40
+ if ds_sheet.license == "other": # ensure description of underspecified licenses
 
 
41
  assert "## License Information" in h2_headings
42
 
43
  # required headings