Datasets:
Create dl_dataset.py
Browse files- dl_dataset.py +50 -0
dl_dataset.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import datasets
|
| 5 |
+
from beir.datasets.data_loader import GenericDataLoader
|
| 6 |
+
|
| 7 |
+
# ----------------------------------------
|
| 8 |
+
# This scripts downloads the BEIR compatible deepsetDPR dataset from "Huggingface Datasets" to your local machine.
|
| 9 |
+
# Please see dataset's description/readme to learn more about how the dataset was created.
|
| 10 |
+
# If you want to use deepset/germandpr without any changes, use TYPE "original"
|
| 11 |
+
# If you want to reproduce PM-AI/bi-encoder_msmarco_bert-base_german, use TYPE "processed"
|
| 12 |
+
# ----------------------------------------
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
TYPE = "processed" # or "original"
|
| 16 |
+
SPLIT = "train" # or "train"
|
| 17 |
+
DOWNLOAD_DIR = "germandpr-beir-dataset"
|
| 18 |
+
DOWNLOAD_DIR = os.path.join(DOWNLOAD_DIR, f'{TYPE}/{SPLIT}')
|
| 19 |
+
DOWNLOAD_QREL_DIR = os.path.join(DOWNLOAD_DIR, f'qrels/')
|
| 20 |
+
|
| 21 |
+
os.makedirs(DOWNLOAD_QREL_DIR, exist_ok=True)
|
| 22 |
+
|
| 23 |
+
# for BEIR compatibility we need queries, corpus and qrels all together
|
| 24 |
+
# ensure to always load these three based on the same type (all "processed" or all "original")
|
| 25 |
+
for subset_name in ["queries", "corpus", "qrels"]:
|
| 26 |
+
subset = datasets.load_dataset("PM-AI/germandpr-beir", f'{TYPE}-{subset_name}', split=SPLIT)
|
| 27 |
+
if subset_name == "qrels":
|
| 28 |
+
out_path = os.path.join(DOWNLOAD_QREL_DIR, f'{SPLIT}.tsv')
|
| 29 |
+
subset.to_csv(out_path, sep="\t", index=False)
|
| 30 |
+
else:
|
| 31 |
+
if subset_name == "queries":
|
| 32 |
+
_row_to_json = lambda row: json.dumps({"_id": row["_id"], "text": row["text"]}, ensure_ascii=False)
|
| 33 |
+
else:
|
| 34 |
+
_row_to_json = lambda row: json.dumps({"_id": row["_id"], "title": row["title"], "text": row["text"]}, ensure_ascii=False)
|
| 35 |
+
|
| 36 |
+
with open(os.path.join(DOWNLOAD_DIR, f'{subset_name}.jsonl'), "w", encoding="utf-8") as out_file:
|
| 37 |
+
for row in subset:
|
| 38 |
+
out_file.write(_row_to_json(row) + "\n")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# GenericDataLoader is part of BEIR. If everything is working correctly we can now load the dataset
|
| 42 |
+
corpus, queries, qrels = GenericDataLoader(data_folder=DOWNLOAD_DIR).load(SPLIT)
|
| 43 |
+
print(f'{SPLIT} corpus size: {len(corpus)}\n'
|
| 44 |
+
f'{SPLIT} queries size: {len(queries)}\n'
|
| 45 |
+
f'{SPLIT} qrels: {len(qrels)}\n')
|
| 46 |
+
|
| 47 |
+
print("--------------------------------------------------------------------------------------------------------------\n"
|
| 48 |
+
"Now you can use the downloaded files in BEIR framework\n"
|
| 49 |
+
"Example: https://github.com/beir-cellar/beir/blob/v1.0.1/examples/retrieval/evaluation/dense/evaluate_sbert.py\n"
|
| 50 |
+
"--------------------------------------------------------------------------------------------------------------")
|