Upload medical_prescription_dataset.py with huggingface_hub
Browse files
    	
        medical_prescription_dataset.py
    ADDED
    
    | @@ -0,0 +1,87 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """Medical Prescription OCR Dataset"""
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            import json
         | 
| 4 | 
            +
            import os
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            import datasets
         | 
| 7 | 
            +
            from PIL import Image
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            _DESCRIPTION = """
         | 
| 11 | 
            +
            Medical Prescription OCR Dataset - A collection of synthetic handwritten medical prescriptions
         | 
| 12 | 
            +
            with structured annotations for training OCR models.
         | 
| 13 | 
            +
            """
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            _CITATION = """
         | 
| 16 | 
            +
            @dataset{shrivastava2024medicalprescription,
         | 
| 17 | 
            +
              author = {Chinmay Shrivastava},
         | 
| 18 | 
            +
              title = {Medical Prescription OCR Dataset},
         | 
| 19 | 
            +
              year = {2024},
         | 
| 20 | 
            +
              publisher = {Hugging Face},
         | 
| 21 | 
            +
              url = {https://huggingface.co/datasets/chinmays18/medical-prescription-dataset}
         | 
| 22 | 
            +
            }
         | 
| 23 | 
            +
            """
         | 
| 24 | 
            +
             | 
| 25 | 
            +
             | 
| 26 | 
            +
            class MedicalPrescriptionDataset(datasets.GeneratorBasedBuilder):
         | 
| 27 | 
            +
                """Medical Prescription OCR Dataset"""
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                VERSION = datasets.Version("1.0.0")
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                def _info(self):
         | 
| 32 | 
            +
                    return datasets.DatasetInfo(
         | 
| 33 | 
            +
                        description=_DESCRIPTION,
         | 
| 34 | 
            +
                        features=datasets.Features({
         | 
| 35 | 
            +
                            "image": datasets.Image(),
         | 
| 36 | 
            +
                            "ground_truth": datasets.Value("string"),
         | 
| 37 | 
            +
                        }),
         | 
| 38 | 
            +
                        citation=_CITATION,
         | 
| 39 | 
            +
                    )
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                def _split_generators(self, dl_manager):
         | 
| 42 | 
            +
                    # The dataset files are already in the repository
         | 
| 43 | 
            +
                    return [
         | 
| 44 | 
            +
                        datasets.SplitGenerator(
         | 
| 45 | 
            +
                            name=datasets.Split.TRAIN,
         | 
| 46 | 
            +
                            gen_kwargs={
         | 
| 47 | 
            +
                                "images_path": "train/images",
         | 
| 48 | 
            +
                                "annotations_path": "train/annotations",
         | 
| 49 | 
            +
                            },
         | 
| 50 | 
            +
                        ),
         | 
| 51 | 
            +
                        datasets.SplitGenerator(
         | 
| 52 | 
            +
                            name=datasets.Split.VALIDATION,
         | 
| 53 | 
            +
                            gen_kwargs={
         | 
| 54 | 
            +
                                "images_path": "val/images",
         | 
| 55 | 
            +
                                "annotations_path": "val/annotations",
         | 
| 56 | 
            +
                            },
         | 
| 57 | 
            +
                        ),
         | 
| 58 | 
            +
                        datasets.SplitGenerator(
         | 
| 59 | 
            +
                            name=datasets.Split.TEST,
         | 
| 60 | 
            +
                            gen_kwargs={
         | 
| 61 | 
            +
                                "images_path": "test/images",
         | 
| 62 | 
            +
                                "annotations_path": "test/annotations",
         | 
| 63 | 
            +
                            },
         | 
| 64 | 
            +
                        ),
         | 
| 65 | 
            +
                    ]
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                def _generate_examples(self, images_path, annotations_path):
         | 
| 68 | 
            +
                    # Get all image files
         | 
| 69 | 
            +
                    image_files = sorted([f for f in os.listdir(images_path) if f.endswith('.png')])
         | 
| 70 | 
            +
                    
         | 
| 71 | 
            +
                    for idx, image_file in enumerate(image_files):
         | 
| 72 | 
            +
                        # Get corresponding annotation file
         | 
| 73 | 
            +
                        base_name = os.path.splitext(image_file)[0]
         | 
| 74 | 
            +
                        annotation_file = f"{base_name}.json"
         | 
| 75 | 
            +
                        
         | 
| 76 | 
            +
                        # Read image
         | 
| 77 | 
            +
                        image_path = os.path.join(images_path, image_file)
         | 
| 78 | 
            +
                        
         | 
| 79 | 
            +
                        # Read annotation
         | 
| 80 | 
            +
                        annotation_path = os.path.join(annotations_path, annotation_file)
         | 
| 81 | 
            +
                        with open(annotation_path, 'r') as f:
         | 
| 82 | 
            +
                            annotation = json.load(f)
         | 
| 83 | 
            +
                        
         | 
| 84 | 
            +
                        yield idx, {
         | 
| 85 | 
            +
                            "image": image_path,
         | 
| 86 | 
            +
                            "ground_truth": annotation["ground_truth"],
         | 
| 87 | 
            +
                        }
         |