|
|
|
|
|
|
|
|
|
|
|
|
|
|
from datasets import load_dataset, Audio |
|
|
from transformers import pipeline, WhisperProcessor |
|
|
from torch.utils.data import DataLoader |
|
|
import torch |
|
|
from jiwer import wer as jiwer_wer |
|
|
from jiwer import cer as jiwer_cer |
|
|
import ipdb |
|
|
import subprocess |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
ws_test_net = load_dataset("wenet-e2e/wenetspeech", "TEST_NET", split="test", streaming=False) |
|
|
ws_test_meeting = load_dataset("wenet-e2e/wenetspeech", "TEST_MEETING", split="test", streaming=False) |
|
|
|
|
|
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline |
|
|
|
|
|
|
|
|
device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
|
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 |
|
|
|
|
|
model_id = "pengyizhou/whisper-wenetspeech-S" |
|
|
|
|
|
model = AutoModelForSpeechSeq2Seq.from_pretrained( |
|
|
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True |
|
|
) |
|
|
model.to(device) |
|
|
whisper_model = "openai/whisper-large-v3" |
|
|
processor = WhisperProcessor.from_pretrained(whisper_model, language="chinese") |
|
|
|
|
|
asr = pipeline( |
|
|
"automatic-speech-recognition", |
|
|
model=model, |
|
|
tokenizer=processor.tokenizer, |
|
|
feature_extractor=processor.feature_extractor, |
|
|
torch_dtype=torch_dtype, |
|
|
chunk_length_s=30, |
|
|
batch_size=64, |
|
|
max_new_tokens=225, |
|
|
device=device, |
|
|
num_beams=1, |
|
|
) |
|
|
generate_kwargs = { |
|
|
"condition_on_prev_tokens": False, |
|
|
"compression_ratio_threshold": 1.35, |
|
|
"temperature": (0.0, 0.2, 0.4, 0.6, 0.8, 1.0), |
|
|
"logprob_threshold": -1.0, |
|
|
"language": "chinese", |
|
|
} |
|
|
|
|
|
|
|
|
def transcribe_batch(batch): |
|
|
|
|
|
inputs = [ ex["array"] for ex in batch["audio"] ] |
|
|
outputs = asr(inputs, generate_kwargs=generate_kwargs) |
|
|
|
|
|
preds = [ out["text"].lower().strip() for out in outputs ] |
|
|
return {"prediction": preds} |
|
|
|
|
|
|
|
|
result_net = ws_test_net.map( |
|
|
transcribe_batch, |
|
|
batched=True, |
|
|
batch_size=64, |
|
|
remove_columns=ws_test_net.column_names |
|
|
) |
|
|
result_meet = ws_test_meeting.map( |
|
|
transcribe_batch, |
|
|
batched=True, |
|
|
batch_size=64, |
|
|
remove_columns=ws_test_meeting.column_names |
|
|
) |
|
|
|
|
|
|
|
|
for idx, (ds, result) in enumerate([(ws_test_net, result_net), (ws_test_meeting, result_meet)]): |
|
|
ids = [key for key in ds["segment_id"]] |
|
|
refs = [t.lower().strip() for t in ds["text"]] |
|
|
preds = [t for t in result["prediction"]] |
|
|
score_cer = jiwer_cer(refs, preds) |
|
|
if idx == 0: |
|
|
print(f"Zero-shot CER on wenet test_net: {score_cer*100:.2f}%") |
|
|
|
|
|
with open("./wenet_net_finetune.pred", "w") as pred_results: |
|
|
for key, pred in zip(ids, preds): |
|
|
pred_results.write("{} {}\n".format(key, pred)) |
|
|
|
|
|
with open("./wenet_net.ref", "w") as ref_results: |
|
|
for key, ref in zip(ids, refs): |
|
|
ref_results.write("{} {}\n".format(key, ref)) |
|
|
if idx == 1: |
|
|
print(f"Zero-shot CER on wenet test_meeting: {score_cer*100:.2f}%") |
|
|
|
|
|
with open("./wenet_meeting_finetune.pred", "w") as pred_results: |
|
|
for key, pred in zip(ids, preds): |
|
|
pred_results.write("{} {}\n".format(key, pred)) |
|
|
pred_results.write("{}\n".format(pred)) |
|
|
|
|
|
with open("./wenet_meeting.ref", "w") as ref_results: |
|
|
for key, ref in zip(ids, refs): |
|
|
ref_results.write("{} {}\n".format(key, ref)) |
|
|
|
|
|
|
|
|
compute_wer_script = "./compute-wer.py" |
|
|
if not os.path.exists(compute_wer_script): |
|
|
|
|
|
possible_locations = [ |
|
|
"./compute-wer.py", |
|
|
] |
|
|
for location in possible_locations: |
|
|
if os.path.exists(location): |
|
|
compute_wer_script = location |
|
|
break |
|
|
else: |
|
|
print(f"Warning: compute-wer.py not found. Tried: {[compute_wer_script] + possible_locations}") |
|
|
print("Skipping detailed WER analysis.") |
|
|
compute_wer_script = None |
|
|
|
|
|
if compute_wer_script: |
|
|
try: |
|
|
|
|
|
ref_file = "./wenet_net.ref" |
|
|
hyp_file = "./wenet_net_finetune.pred" |
|
|
wer_file = "./wenet_net_finetune.wer" |
|
|
|
|
|
cmd = [ |
|
|
"python", compute_wer_script, |
|
|
"--char=1", |
|
|
"--v=1", |
|
|
ref_file, |
|
|
hyp_file |
|
|
] |
|
|
|
|
|
print(f"Running: {' '.join(cmd)} > {wer_file}") |
|
|
|
|
|
|
|
|
with open(wer_file, "w") as wer_output: |
|
|
result = subprocess.run( |
|
|
cmd, |
|
|
stdout=wer_output, |
|
|
stderr=subprocess.PIPE, |
|
|
text=True, |
|
|
check=True |
|
|
) |
|
|
|
|
|
print(f"CER analysis saved to {wer_file}") |
|
|
|
|
|
|
|
|
if os.path.exists(wer_file): |
|
|
print("\nFirst few lines of WER analysis:") |
|
|
with open(wer_file, "r") as f: |
|
|
lines = f.readlines() |
|
|
for i, line in enumerate(lines[:10]): |
|
|
print(f" {line.rstrip()}") |
|
|
if len(lines) > 10: |
|
|
print(f" ... ({len(lines) - 10} more lines)") |
|
|
|
|
|
ref_file = "./wenet_meeting.ref" |
|
|
hyp_file = "./wenet_meeting_finetune.pred" |
|
|
wer_file = "./wenet_meeting_finetune.wer" |
|
|
|
|
|
cmd = [ |
|
|
"python", compute_wer_script, |
|
|
"--char=1", |
|
|
"--v=1", |
|
|
ref_file, |
|
|
hyp_file |
|
|
] |
|
|
|
|
|
print(f"Running: {' '.join(cmd)} > {wer_file}") |
|
|
|
|
|
|
|
|
with open(wer_file, "w") as wer_output: |
|
|
result = subprocess.run( |
|
|
cmd, |
|
|
stdout=wer_output, |
|
|
stderr=subprocess.PIPE, |
|
|
text=True, |
|
|
check=True |
|
|
) |
|
|
|
|
|
print(f"CER analysis saved to {wer_file}") |
|
|
|
|
|
|
|
|
if os.path.exists(wer_file): |
|
|
print("\nFirst few lines of WER analysis:") |
|
|
with open(wer_file, "r") as f: |
|
|
lines = f.readlines() |
|
|
for i, line in enumerate(lines[:10]): |
|
|
print(f" {line.rstrip()}") |
|
|
if len(lines) > 10: |
|
|
print(f" ... ({len(lines) - 10} more lines)") |
|
|
|
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"Error running compute-wer.py: {e}") |
|
|
if e.stderr: |
|
|
print(f"Error details: {e.stderr}") |
|
|
except Exception as e: |
|
|
print(f"Unexpected error: {e}") |
|
|
|
|
|
print("Inference and CER analysis completed!") |