Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
| from pathlib import Path | |
| from typing import Literal | |
| import pytest | |
| from datasets import load_dataset | |
| from pydantic import BaseModel | |
| from dynaword.dataset_structure import SampleSchema | |
| from dynaword.datasheet import DataSheet | |
| from .conftest import DATASET_NAMES | |
| def test_sample_schema(repo_path: Path, dataset_name: str): | |
| """Ensure that the dataset samples follow the correct schema""" | |
| ds = load_dataset( | |
| str(repo_path.resolve()), dataset_name, split="train", streaming=True | |
| ) | |
| sample = next(iter(ds)) | |
| SampleSchema(**sample) | |
| class FrontmatterSchema(BaseModel): | |
| pretty_name: str | |
| language: list[Literal["da"]] | |
| license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"] | |
| def test_dataset_readme(repo_path: Path, dataset_name: str): | |
| """tests that the dataset frontmatter and markdown follows the correct format.""" | |
| readme = repo_path / "data" / dataset_name / f"{dataset_name}.md" | |
| ds_sheet = DataSheet.load_from_path(readme) | |
| frontmatter = ds_sheet.frontmatter | |
| frontmatter_validated = FrontmatterSchema(**frontmatter) | |
| # ensure tags: | |
| body = ds_sheet.body | |
| tags = ["SHORT DESCRIPTION", "DESC-STATS", "DATASET PLOTS", "SAMPLE"] | |
| for tag in tags: | |
| ds_sheet.get_tag_idx(tag) | |
| h2_headings = {line for line in body.splitlines() if line.startswith("## ")} | |
| if ( | |
| frontmatter_validated.license == "other" | |
| ): # ensure description of underspecified licenses | |
| assert "## License Information" in h2_headings | |
| # required headings | |
| req_h2_headings = ["## Dataset Description", "## Additional Information"] | |
| for req_h2 in req_h2_headings: | |
| assert req_h2 in h2_headings | |
| pass | |
| def test_dataset_folder_structure(repo_path: Path, dataset_name: str): | |
| """tests that the dataset folder structure is as follows. | |
| dataset_name | |
| |- dataset_name.md | |
| |- dataset_name.parquet | |
| If there is a python file, there should at least be one called `create.py`, but there can be additional. | |
| """ | |
| path = repo_path / "data" / dataset_name | |
| assert (path / f"{path.name}.parquet").exists() | |
| assert (path / f"{path.name}.md").exists() | |
| if any(p.name.endswith(".py") for p in path.glob("*")): | |
| assert (path / "create.py").exists() | |