MrLight commited on
Commit
21a2e1a
·
verified ·
1 Parent(s): 5b250ef

Delete msmarco-passage-corpus.py

Browse files
Files changed (1) hide show
  1. msmarco-passage-corpus.py +0 -91
msmarco-passage-corpus.py DELETED
@@ -1,91 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.Wikipedia
15
-
16
- # Lint as: python3
17
- """MsMarco Passage dataset."""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @misc{bajaj2018ms,
25
- title={MS MARCO: A Human Generated MAchine Reading COmprehension Dataset},
26
- author={Payal Bajaj and Daniel Campos and Nick Craswell and Li Deng and Jianfeng Gao and Xiaodong Liu
27
- and Rangan Majumder and Andrew McNamara and Bhaskar Mitra and Tri Nguyen and Mir Rosenberg and Xia Song
28
- and Alina Stoica and Saurabh Tiwary and Tong Wang},
29
- year={2018},
30
- eprint={1611.09268},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CL}
33
- }
34
- """
35
-
36
- _DESCRIPTION = "dataset load script for MSMARCO Passage Corpus"
37
-
38
- _DATASET_URLS = {
39
- 'train': "https://huggingface.co/datasets/Tevatron/msmarco-passage-corpus/resolve/main/corpus.jsonl.gz",
40
- }
41
-
42
-
43
- class MsMarcoPassageCorpus(datasets.GeneratorBasedBuilder):
44
- VERSION = datasets.Version("0.0.1")
45
-
46
- BUILDER_CONFIGS = [
47
- datasets.BuilderConfig(version=VERSION,
48
- description="MS MARCO passage Corpus"),
49
- ]
50
-
51
- def _info(self):
52
- features = datasets.Features(
53
- {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
54
- )
55
-
56
- return datasets.DatasetInfo(
57
- # This is the description that will appear on the datasets page.
58
- description=_DESCRIPTION,
59
- # This defines the different columns of the dataset and their types
60
- features=features, # Here we define them above because they are different between the two configurations
61
- supervised_keys=None,
62
- # Homepage of the dataset for documentation
63
- homepage="",
64
- # License for the dataset if available
65
- license="",
66
- # Citation for the dataset
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
- if self.config.data_files:
72
- downloaded_files = self.config.data_files
73
- else:
74
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
75
- splits = [
76
- datasets.SplitGenerator(
77
- name=split,
78
- gen_kwargs={
79
- "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
80
- },
81
- ) for split in downloaded_files
82
- ]
83
- return splits
84
-
85
- def _generate_examples(self, files):
86
- """Yields examples."""
87
- for filepath in files:
88
- with open(filepath, encoding="utf-8") as f:
89
- for line in f:
90
- data = json.loads(line)
91
- yield data['docid'], data