Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
kris927b commited on
Commit
2a9b93c
·
1 Parent(s): 12c0ca9

Improved token count

Browse files
Files changed (1) hide show
  1. src/dynaword/process_dataset.py +7 -8
src/dynaword/process_dataset.py CHANGED
@@ -17,14 +17,13 @@ logger = logging.getLogger(__name__)
17
  def _tokenize_function(
18
  examples: dict[str, Any], tokenizer: AutoTokenizer
19
  ) -> dict[str, Any]:
20
- token_count = [
21
- len(tokens)
22
- for tokens in tokenizer(examples[ColumnNames.text.value], padding=False)[ # type: ignore
23
- "input_ids"
24
- ]
25
- ]
26
- examples[ColumnNames.token_count.value] = token_count
27
- return examples
28
 
29
 
30
  def add_token_count(
 
17
  def _tokenize_function(
18
  examples: dict[str, Any], tokenizer: AutoTokenizer
19
  ) -> dict[str, Any]:
20
+ encodings = tokenizer(
21
+ examples["text"],
22
+ padding=False,
23
+ truncation=False,
24
+ return_length=True, # much faster, avoids storing all IDs
25
+ ) # type: ignore
26
+ return {"token_count": encodings["length"]}
 
27
 
28
 
29
  def add_token_count(