|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""AI-MO Olympiad Reference Dataset""" |
|
|
|
|
|
|
|
|
|
|
|
import re |
|
|
import json |
|
|
from pathlib import Path |
|
|
|
|
|
import datasets |
|
|
from huggingface_hub import HfApi |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """""" |
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """""" |
|
|
|
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
class OlympiadReferenceDataset(datasets.GeneratorBasedBuilder): |
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
|
|
def __init__(self, *args, **kwargs): |
|
|
super().__init__(*args, **kwargs) |
|
|
self._hfapi = HfApi() |
|
|
self.pattern = re.compile(r'.*/segmented/[^/]+\.jsonl$') |
|
|
|
|
|
def _info(self): |
|
|
features = datasets.Features( |
|
|
{ |
|
|
"problem_type": datasets.Value("string"), |
|
|
"problem_label": datasets.Value("string"), |
|
|
"problem": datasets.Value("string"), |
|
|
"solution": datasets.Value("string"), |
|
|
"year": datasets.Value("int32"), |
|
|
"tier": datasets.Value("int32"), |
|
|
"resource_path": datasets.Value("string") |
|
|
} |
|
|
) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
data_root_path = Path(dl_manager._base_path) |
|
|
|
|
|
repo_files = self._hfapi.list_repo_files(repo_id="AI-MO/olympiads-ref", repo_type="dataset") |
|
|
seg_jsonl_files = [s for s in repo_files if self.pattern.match(s)] |
|
|
|
|
|
data_files = [(sjf, dl_manager.extract(dl_manager.download(data_root_path / sjf))) for sjf in seg_jsonl_files] |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"data_files": data_files, |
|
|
"split": "train", |
|
|
}, |
|
|
) |
|
|
] |
|
|
|
|
|
def _generate_examples(self, data_files, split): |
|
|
key = 0 |
|
|
|
|
|
for resource_path, file in data_files: |
|
|
with open(file, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
data = json.loads(line) |
|
|
yield key, { |
|
|
"problem_type": data.get("problem_type"), |
|
|
"problem_label": data.get("problem_label") or data.get("label"), |
|
|
"problem": data.get("problem"), |
|
|
"solution": data.get("solution"), |
|
|
"year": data.get("year"), |
|
|
"tier": data.get("tier"), |
|
|
"resource_path": resource_path |
|
|
} |
|
|
key += 1 |
|
|
|