Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
kris927b's picture
Update wiki, wikibooks, wikisource (#87)
4781621 verified
raw
history blame
8.11 kB
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "datasets",
# "mediawiki-dump",
# "mwparserfromhell",
# "pandas",
# "requests",
# "tqdm",
# "transformers",
# "dynaword"
# ]
# [tool.uv.sources]
# dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" }
# ///
import bz2
import datetime
import json
import logging
import os
import re
import subprocess
import sys
import threading
from mediawiki_dump.dumps import IteratorDump
from mediawiki_dump.reader import DumpReaderArticles
from mwparserfromhell import parse
import requests
from tqdm import tqdm
import pandas as pd
from datasets import Dataset
from transformers import AutoTokenizer
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from typing import Any, Tuple
from dynaword.process_dataset import (
add_token_count,
ensure_column_order,
remove_duplicate_text,
remove_empty_texts,
)
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
source = "wikibooks"
class WtfNodeBridge:
"""
Persistent Node bridge to wtf_wikipedia.
Call .parse(wikitext, lang=None) -> (text, is_redirect)
Remember to call .close() when done.
"""
def __init__(
self, node_script_path: str = "parser/wtf_bridge.js", node_cmd: str = "node"
):
self.proc = subprocess.Popen(
[node_cmd, node_script_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
encoding="utf-8",
bufsize=1,
)
self._lock = threading.Lock()
# background thread to log stderr (helpful for debugging)
def _drain_stderr(p):
try:
for line in p.stderr:
logger.warning("wtf_node stderr: %s", line.rstrip())
except Exception:
pass
t = threading.Thread(target=_drain_stderr, args=(self.proc,), daemon=True)
t.start()
def parse(self, wikitext: str, lang: str | None = None) -> Tuple[str, bool]:
if self.proc.poll() is not None:
raise RuntimeError("Node bridge process has exited")
payload = {"wikitext": wikitext}
if lang:
payload["lang"] = lang
line = json.dumps(payload, ensure_ascii=False)
with self._lock:
# write and flush a single JSON line
try:
self.proc.stdin.write(line + "\n")
self.proc.stdin.flush()
except BrokenPipeError as e:
raise RuntimeError("Broken pipe writing to node bridge") from e
# read exactly one JSON line back
out_line = self.proc.stdout.readline()
if not out_line:
raise RuntimeError("No response from node bridge (it may have exited)")
res = json.loads(out_line)
if res.get("error"):
# choose to either raise or return empty text; here we raise
raise RuntimeError("Node bridge error: " + res["error"])
return res.get("text", ""), bool(res.get("isRedirect", False))
def close(self):
try:
if self.proc.stdin:
self.proc.stdin.close()
except Exception:
pass
try:
self.proc.terminate()
self.proc.wait(timeout=3)
except Exception:
try:
self.proc.kill()
except Exception:
pass
def download_wiki_dump(url: str, file_path: str):
"""
Downloads a file from a URL with a progress bar.
Args:
url (str): The URL of the file to download.
file_path (str): The local path to save the file.
"""
print(f"Downloading {url} to {file_path}...")
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get("content-length", 0))
block_size = 8192 # 8 Kibibytes
with (
open(file_path, "wb") as f,
tqdm(
total=total_size, unit="B", unit_scale=True, desc="Downloading"
) as pbar,
):
for chunk in r.iter_content(chunk_size=block_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
pbar.update(len(chunk))
logger.info("\nDownload complete.")
except requests.exceptions.RequestException as e:
logger.error(f"\nError downloading file: {e}")
sys.exit(1)
def get_content(file_name: str):
with bz2.open(file_name, mode="r") as fp:
yield from fp
def process_dump_to_parquet(bz2_file_path: str, parquet_file_path: str):
dump = IteratorDump(iterator=get_content(file_name=bz2_file_path))
pages = DumpReaderArticles().read(dump)
articles = []
today = datetime.datetime.now().strftime("%Y-%m-%d")
bridge = WtfNodeBridge("parser/wtf_bridge.js")
try:
for page in tqdm(pages):
try:
plain_text, is_redirect = bridge.parse(page.content, lang="da")
except Exception as exc:
logger.warning(
"wtf parse failed for page %s: %s -- falling back to mwparserfromhell",
getattr(page, "title", "<unknown>"),
exc,
)
plain_text = parse(page.content).strip_code().strip()
is_redirect = plain_text.startswith("REDIRECT")
if is_redirect:
continue
# Additional cleanup if you like (wtf.text() already removes a lot)
plain_text = re.sub(
r"thumb(?:\|(?:left|right|center|\d+px)*)*\|[^\n]*", "", plain_text
).strip()
if len(plain_text) == 0:
logger.warning("Skipping empty article")
continue
date = datetime.datetime.strptime(
page.timestamp, "%Y-%m-%dT%H:%M:%SZ"
).strftime("%Y-%m-%d")
articles.append(
{
"id": f"{source}_{page.page_id}",
"source": source,
"created": f"{date}, {date}",
"text": f"{page.title}\n{plain_text}",
"added": today,
}
)
finally:
bridge.close()
df = pd.DataFrame(articles)
ds = Dataset.from_pandas(df)
ds = add_token_count(ds)
ds = remove_empty_texts(ds)
ds = remove_duplicate_text(ds)
ds = ensure_column_order(ds)
ds.to_parquet(parquet_file_path, compression="snappy")
def main():
"""
Main function to orchestrate the download and processing.
"""
# --- Configuration ---
# URL for the latest Danish Wikipedia articles dump
WIKI_DUMP_URL = f"https://dumps.wikimedia.org/da{source}/latest/da{source}-latest-pages-articles.xml.bz2"
# Local file paths
DOWNLOADED_BZ2_FILE = f"tmp/da{source}-latest-pages-articles.xml.bz2"
OUTPUT_PARQUET_FILE = f"{source}.parquet"
# --- Execution ---
# 1. Download the dump file
if not os.path.exists(DOWNLOADED_BZ2_FILE):
download_wiki_dump(WIKI_DUMP_URL, DOWNLOADED_BZ2_FILE)
else:
print(f"File '{DOWNLOADED_BZ2_FILE}' already exists. Skipping download.")
# 2. Process the dump and save to Parquet
process_dump_to_parquet(DOWNLOADED_BZ2_FILE, OUTPUT_PARQUET_FILE)
# 3. (Optional) Clean up the downloaded file
# If you want to keep the bz2 file, comment out the next line.
print(f"Cleaning up by removing '{DOWNLOADED_BZ2_FILE}'...")
os.remove(DOWNLOADED_BZ2_FILE)
print("\nScript finished successfully.")
if __name__ == "__main__":
# Before running, make sure you have the required libraries installed:
# pip install requests mwparserfromhell pandas pyarrow
main()