Fixed issue with test file 12052d3b8a19248565c63470ef4b3088 - changed ImDB loaders
Browse files- DUDE_imdb_loader.py +20 -29
DUDE_imdb_loader.py
CHANGED
|
@@ -77,9 +77,7 @@ def pdf_to_images(document_filepath, converter="PyPDF2"):
|
|
| 77 |
def images_to_pagenames(images, document_filepath, page_image_dir):
|
| 78 |
page_image_names = []
|
| 79 |
for page_idx, page_image in enumerate(images):
|
| 80 |
-
page_image_name = document_filepath.replace("PDF", "images").replace(
|
| 81 |
-
".pdf", f"_{page_idx}.jpg"
|
| 82 |
-
)
|
| 83 |
page_image_names.append(
|
| 84 |
page_image_name.replace(page_image_dir, page_image_dir.split("/")[-1])
|
| 85 |
) # without dir
|
|
@@ -172,7 +170,7 @@ def get_ocr_information(ocr_path, num_pages):
|
|
| 172 |
|
| 173 |
ocr_pages = ocr_info[0]["DocumentMetadata"]["Pages"]
|
| 174 |
|
| 175 |
-
if num_pages != ocr_pages:
|
| 176 |
raise AssertionError("Pages from images and OCR not matching, should go for pdf2image")
|
| 177 |
|
| 178 |
page_ocr_tokens = [[] for page_ix in range(num_pages)]
|
|
@@ -181,11 +179,12 @@ def get_ocr_information(ocr_path, num_pages):
|
|
| 181 |
for ocr_extraction in ocr_block["Blocks"]:
|
| 182 |
if ocr_extraction["BlockType"] == "WORD":
|
| 183 |
text = ocr_extraction["Text"].lower()
|
| 184 |
-
bounding_box = parse_textract_bbox(
|
| 185 |
-
ocr_extraction["Geometry"]["BoundingBox"]
|
| 186 |
-
).tolist()
|
| 187 |
page = ocr_extraction["Page"] - 1
|
| 188 |
|
|
|
|
|
|
|
|
|
|
| 189 |
page_ocr_tokens[page].append(text)
|
| 190 |
page_ocr_boxes[page].append(bounding_box)
|
| 191 |
|
|
@@ -222,12 +221,14 @@ def format_answers(answers_list):
|
|
| 222 |
def create_imdb_record_from_json(
|
| 223 |
record, documents_metadata, documents_ocr_info, split, include_answers, include_variants=False
|
| 224 |
):
|
| 225 |
-
|
| 226 |
docId = record["docId"].split("_")[0]
|
| 227 |
try:
|
| 228 |
num_pages, page_image_names = get_document_info(documents_metadata, docId)
|
| 229 |
document_ocr_info = documents_ocr_info[docId]
|
| 230 |
except Exception as e:
|
|
|
|
|
|
|
|
|
|
| 231 |
print(
|
| 232 |
"Missing: ",
|
| 233 |
e,
|
|
@@ -240,17 +241,15 @@ def create_imdb_record_from_json(
|
|
| 240 |
else:
|
| 241 |
answers = None
|
| 242 |
|
| 243 |
-
if include_variants and record["answers_variants"] and not
|
| 244 |
answers += record["answers_variants"]
|
| 245 |
|
| 246 |
page_image_dir = "/".join(record["document"].split("/")[:-2]).replace("PDF", "images")
|
| 247 |
-
if not page_image_names or any(
|
| 248 |
-
[not os.path.exists(os.path.join(page_image_dir, p)) for p in page_image_names]
|
| 249 |
-
):
|
| 250 |
print(
|
| 251 |
"Missing images: ",
|
| 252 |
docId,
|
| 253 |
-
#[p for p in page_image_names if not os.path.exists(os.path.join(page_image_dir, p))],
|
| 254 |
)
|
| 255 |
return {}
|
| 256 |
|
|
@@ -308,9 +307,7 @@ def create_imdb_from_json(
|
|
| 308 |
def parse_arguments():
|
| 309 |
import argparse
|
| 310 |
|
| 311 |
-
parser = argparse.ArgumentParser(
|
| 312 |
-
description="Instantiate HuggingFace dataloader and convert to ImDB format"
|
| 313 |
-
)
|
| 314 |
|
| 315 |
parser.add_argument(
|
| 316 |
"--redo-imdb-build",
|
|
@@ -379,17 +376,13 @@ if __name__ == "__main__":
|
|
| 379 |
num_jobs = 6
|
| 380 |
block_size = int(len(document_paths) / num_jobs) + 1
|
| 381 |
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
|
| 382 |
-
document_blocks = [
|
| 383 |
-
document_paths[block_size * i : block_size * i + block_size]
|
| 384 |
-
for i in range(num_jobs)
|
| 385 |
-
]
|
| 386 |
print(
|
| 387 |
"chunksize",
|
| 388 |
len(set([docId for doc_block in document_blocks for docId in doc_block])),
|
| 389 |
)
|
| 390 |
parallel_results = Parallel(n_jobs=num_jobs)(
|
| 391 |
-
delayed(pdf_to_images_block)(document_blocks[i], "pdf2image")
|
| 392 |
-
for i in range(num_jobs)
|
| 393 |
)
|
| 394 |
|
| 395 |
for block_result in parallel_results:
|
|
@@ -413,9 +406,7 @@ if __name__ == "__main__":
|
|
| 413 |
for i, document_filepath in enumerate(document_paths):
|
| 414 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
| 415 |
try:
|
| 416 |
-
ocr_tokens, ocr_boxes = get_ocr_information(
|
| 417 |
-
OCR_paths[i], documents_metadata[docId]["num_pages"]
|
| 418 |
-
)
|
| 419 |
documents_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
|
| 420 |
except AssertionError as e:
|
| 421 |
print(f"image2pages issue: {e}")
|
|
@@ -423,10 +414,10 @@ if __name__ == "__main__":
|
|
| 423 |
except IndexError as e:
|
| 424 |
print(f"pages issue: {e}")
|
| 425 |
error_ocr.append(docId)
|
| 426 |
-
except FileNotFoundError:
|
| 427 |
print(f"FileNotFoundError issue: {e}")
|
| 428 |
no_ocr.append(docId)
|
| 429 |
-
except KeyError:
|
| 430 |
print(f"Keyerror issue: {e}")
|
| 431 |
error_ocr.append(docId)
|
| 432 |
|
|
@@ -437,14 +428,14 @@ if __name__ == "__main__":
|
|
| 437 |
print(f"Loading from disk: {imdb_filename}")
|
| 438 |
imdb = np.load(imdb_filename, allow_pickle=True)
|
| 439 |
|
| 440 |
-
else:
|
| 441 |
imdb = create_imdb_from_json(
|
| 442 |
dataset[split], # .select(split_indices),
|
| 443 |
documents_metadata=documents_metadata,
|
| 444 |
documents_ocr_info=documents_ocr_info,
|
| 445 |
split=split,
|
| 446 |
version="0.1",
|
| 447 |
-
include_answers=
|
| 448 |
include_variants=(not args.no_include_variants),
|
| 449 |
)
|
| 450 |
np.save(imdb_filename, imdb)
|
|
|
|
| 77 |
def images_to_pagenames(images, document_filepath, page_image_dir):
|
| 78 |
page_image_names = []
|
| 79 |
for page_idx, page_image in enumerate(images):
|
| 80 |
+
page_image_name = document_filepath.replace("PDF", "images").replace(".pdf", f"_{page_idx}.jpg")
|
|
|
|
|
|
|
| 81 |
page_image_names.append(
|
| 82 |
page_image_name.replace(page_image_dir, page_image_dir.split("/")[-1])
|
| 83 |
) # without dir
|
|
|
|
| 170 |
|
| 171 |
ocr_pages = ocr_info[0]["DocumentMetadata"]["Pages"]
|
| 172 |
|
| 173 |
+
if num_pages != ocr_pages and num_pages != MAX_PAGES: # MAX_PAGES is the limit for conversion
|
| 174 |
raise AssertionError("Pages from images and OCR not matching, should go for pdf2image")
|
| 175 |
|
| 176 |
page_ocr_tokens = [[] for page_ix in range(num_pages)]
|
|
|
|
| 179 |
for ocr_extraction in ocr_block["Blocks"]:
|
| 180 |
if ocr_extraction["BlockType"] == "WORD":
|
| 181 |
text = ocr_extraction["Text"].lower()
|
| 182 |
+
bounding_box = parse_textract_bbox(ocr_extraction["Geometry"]["BoundingBox"]).tolist()
|
|
|
|
|
|
|
| 183 |
page = ocr_extraction["Page"] - 1
|
| 184 |
|
| 185 |
+
if page >= num_pages: # additional condition when MAX_PAGES vs. OCR pages
|
| 186 |
+
break
|
| 187 |
+
|
| 188 |
page_ocr_tokens[page].append(text)
|
| 189 |
page_ocr_boxes[page].append(bounding_box)
|
| 190 |
|
|
|
|
| 221 |
def create_imdb_record_from_json(
|
| 222 |
record, documents_metadata, documents_ocr_info, split, include_answers, include_variants=False
|
| 223 |
):
|
|
|
|
| 224 |
docId = record["docId"].split("_")[0]
|
| 225 |
try:
|
| 226 |
num_pages, page_image_names = get_document_info(documents_metadata, docId)
|
| 227 |
document_ocr_info = documents_ocr_info[docId]
|
| 228 |
except Exception as e:
|
| 229 |
+
from pdb import set_trace
|
| 230 |
+
|
| 231 |
+
set_trace()
|
| 232 |
print(
|
| 233 |
"Missing: ",
|
| 234 |
e,
|
|
|
|
| 241 |
else:
|
| 242 |
answers = None
|
| 243 |
|
| 244 |
+
if include_variants and record["answers_variants"] and not "list" in record["answer_type"]:
|
| 245 |
answers += record["answers_variants"]
|
| 246 |
|
| 247 |
page_image_dir = "/".join(record["document"].split("/")[:-2]).replace("PDF", "images")
|
| 248 |
+
if not page_image_names or any([not os.path.exists(os.path.join(page_image_dir, p)) for p in page_image_names]):
|
|
|
|
|
|
|
| 249 |
print(
|
| 250 |
"Missing images: ",
|
| 251 |
docId,
|
| 252 |
+
# [p for p in page_image_names if not os.path.exists(os.path.join(page_image_dir, p))],
|
| 253 |
)
|
| 254 |
return {}
|
| 255 |
|
|
|
|
| 307 |
def parse_arguments():
|
| 308 |
import argparse
|
| 309 |
|
| 310 |
+
parser = argparse.ArgumentParser(description="Instantiate HuggingFace dataloader and convert to ImDB format")
|
|
|
|
|
|
|
| 311 |
|
| 312 |
parser.add_argument(
|
| 313 |
"--redo-imdb-build",
|
|
|
|
| 376 |
num_jobs = 6
|
| 377 |
block_size = int(len(document_paths) / num_jobs) + 1
|
| 378 |
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
|
| 379 |
+
document_blocks = [document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs)]
|
|
|
|
|
|
|
|
|
|
| 380 |
print(
|
| 381 |
"chunksize",
|
| 382 |
len(set([docId for doc_block in document_blocks for docId in doc_block])),
|
| 383 |
)
|
| 384 |
parallel_results = Parallel(n_jobs=num_jobs)(
|
| 385 |
+
delayed(pdf_to_images_block)(document_blocks[i], "pdf2image") for i in range(num_jobs)
|
|
|
|
| 386 |
)
|
| 387 |
|
| 388 |
for block_result in parallel_results:
|
|
|
|
| 406 |
for i, document_filepath in enumerate(document_paths):
|
| 407 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
| 408 |
try:
|
| 409 |
+
ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"])
|
|
|
|
|
|
|
| 410 |
documents_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
|
| 411 |
except AssertionError as e:
|
| 412 |
print(f"image2pages issue: {e}")
|
|
|
|
| 414 |
except IndexError as e:
|
| 415 |
print(f"pages issue: {e}")
|
| 416 |
error_ocr.append(docId)
|
| 417 |
+
except FileNotFoundError as e:
|
| 418 |
print(f"FileNotFoundError issue: {e}")
|
| 419 |
no_ocr.append(docId)
|
| 420 |
+
except KeyError as e:
|
| 421 |
print(f"Keyerror issue: {e}")
|
| 422 |
error_ocr.append(docId)
|
| 423 |
|
|
|
|
| 428 |
print(f"Loading from disk: {imdb_filename}")
|
| 429 |
imdb = np.load(imdb_filename, allow_pickle=True)
|
| 430 |
|
| 431 |
+
else:
|
| 432 |
imdb = create_imdb_from_json(
|
| 433 |
dataset[split], # .select(split_indices),
|
| 434 |
documents_metadata=documents_metadata,
|
| 435 |
documents_ocr_info=documents_ocr_info,
|
| 436 |
split=split,
|
| 437 |
version="0.1",
|
| 438 |
+
include_answers=(not split == "test"),
|
| 439 |
include_variants=(not args.no_include_variants),
|
| 440 |
)
|
| 441 |
np.save(imdb_filename, imdb)
|