Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
KennethEnevoldsen's picture
Add Nota-tekster (#41)
97b3aa5 verified
raw
history blame
2.08 kB
from collections import defaultdict
from io import BytesIO
from pathlib import Path
import requests
from zipfile import ZipFile
import pandas as pd
URL = "https://sprogtek-ressources.digst.govcloud.dk/nota/Nota-txt_only.zip"
column_order = [
"text",
"source",
"id",
"added",
"created",
"license",
"domain",
"metadata",
]
def convert_sample(id: str, text: str) -> dict:
year = id[4:8]
new_example = dict(
text=text,
id=id.split("_")[0],
source="nota",
domain="Readaloud",
license="Creative Commons Legal Code\n\nCC0 1.0 Universal",
added="2025-02-03",
created=f"{year}-01-01, {year}-12-31", # assuming v2018
metadata={"source-pretty": "Nota lyd- og tekstdata"},
)
return new_example
def download_and_process_zip(url):
response = requests.get(url)
response.raise_for_status() # Ensure we got a valid response
with ZipFile(BytesIO(response.content), "r") as z:
file_groups = defaultdict(list)
# Read all text files from the ZIP
for file_name in z.namelist():
if file_name.endswith(".txt"): # Process only text files
prefix = file_name.split("/")[1].split("_")[0]
with z.open(file_name) as f:
file_groups[prefix].append(f.read().decode("utf-8"))
# Combine files with the same prefix
combined_files = {
f"{prefix}_combined.txt": "\n".join(contents)
for prefix, contents in file_groups.items()
}
return combined_files # Dictionary with combined file names and contents
def main():
combined_results = download_and_process_zip(URL)
dataset = []
for filename, content in combined_results.items():
sample = convert_sample(filename, content)
dataset.append(sample)
df = pd.DataFrame(dataset)
df = df.drop_duplicates(keep="first", subset=["text"])
save_path = Path(__file__).parent / "nota.parquet"
df.to_parquet(save_path)
if __name__ == "__main__":
main()