prithivMLmods commited on
Commit
f7e894c
·
verified ·
1 Parent(s): eda5d9c

upload notebooks (#2)

Browse files

- upload notebooks (e1076ca16b59118caa8413f02a00a0c8998bfb9c)

Gliese-OCR-7B-Post1.0(4-bit)-reportlab/Gliese_OCR_7B_Post1_0(4_bit)_reportlab.ipynb ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "DgpubXociwNK"
7
+ },
8
+ "source": [
9
+ "## **Gliese-OCR-7B-Post1.0(4-bit)**"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "Nb3wNhothvX7"
16
+ },
17
+ "source": [
18
+ "The Gliese-OCR-7B-Post1.0 model is a fine-tuned version of Camel-Doc-OCR-062825, optimized for Document Retrieval, Content Extraction, and Analysis Recognition. Built on top of the Qwen2.5-VL architecture, this model enhances document comprehension capabilities with focused training on the Opendoc2-Analysis-Recognition dataset for superior document analysis and information extraction tasks.\n",
19
+ "\n",
20
+ " > This model shows significant improvements in LaTeX rendering and Markdown rendering for OCR tasks.\n",
21
+ "\n",
22
+ "| Image1 | Image2 |\n",
23
+ "|--------|--------|\n",
24
+ "| ![Screenshot 2025-08-30 at 12-50-11 Gradio.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/sZj3Gx32ICpm2lAVhmY_y.png) | ![Screenshot 2025-08-30 at 12-49-41 (anonymous) - output_426f8ad8-53ee-4609-9d55-6629ac37b055.pdf.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ywaoJWmkDgjbJXVR_hsZO.png) |\n",
25
+ "\n",
26
+ "*multimodal model & notebook by: [prithivMLmods](https://huggingface.co/prithivMLmods)*"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "metadata": {
32
+ "id": "Mk560Wx0j6PY"
33
+ },
34
+ "source": [
35
+ "### **Install packages**"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": null,
41
+ "metadata": {
42
+ "id": "qTD_dNliNS5T"
43
+ },
44
+ "outputs": [],
45
+ "source": [
46
+ "%%capture\n",
47
+ "!pip install git+https://github.com/huggingface/transformers.git \\\n",
48
+ " git+https://github.com/huggingface/accelerate.git \\\n",
49
+ " git+https://github.com/huggingface/peft.git \\\n",
50
+ " transformers-stream-generator huggingface_hub albumentations \\\n",
51
+ " pyvips-binary qwen-vl-utils sentencepiece opencv-python docling-core \\\n",
52
+ " python-docx torchvision safetensors matplotlib num2words \\\n",
53
+ "\n",
54
+ "!pip install xformers requests pymupdf hf_xet spaces pyvips pillow gradio \\\n",
55
+ " einops torch fpdf timm av decord bitsandbytes reportlab\n",
56
+ "#Hold tight, this will take around 1-2 minutes."
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "markdown",
61
+ "metadata": {
62
+ "id": "uiBblyf-kLmf"
63
+ },
64
+ "source": [
65
+ "### **Run Demo App**"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": null,
71
+ "metadata": {
72
+ "id": "pgz93DfvNMfb"
73
+ },
74
+ "outputs": [],
75
+ "source": [
76
+ "import spaces\n",
77
+ "import json\n",
78
+ "import math\n",
79
+ "import os\n",
80
+ "import traceback\n",
81
+ "from io import BytesIO\n",
82
+ "from typing import Any, Dict, List, Optional, Tuple\n",
83
+ "import re\n",
84
+ "import time\n",
85
+ "from threading import Thread\n",
86
+ "from io import BytesIO\n",
87
+ "import uuid\n",
88
+ "import tempfile\n",
89
+ "\n",
90
+ "import gradio as gr\n",
91
+ "import requests\n",
92
+ "import torch\n",
93
+ "from PIL import Image\n",
94
+ "import fitz\n",
95
+ "import numpy as np\n",
96
+ "\n",
97
+ "# --- New Model Imports ---\n",
98
+ "from transformers import (\n",
99
+ " Qwen2_5_VLForConditionalGeneration,\n",
100
+ " AutoProcessor,\n",
101
+ " TextIteratorStreamer,\n",
102
+ " BitsAndBytesConfig,\n",
103
+ ")\n",
104
+ "\n",
105
+ "from reportlab.lib.pagesizes import A4\n",
106
+ "from reportlab.lib.styles import getSampleStyleSheet\n",
107
+ "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n",
108
+ "from reportlab.lib.units import inch\n",
109
+ "\n",
110
+ "# --- Constants and Model Setup ---\n",
111
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
112
+ "\n",
113
+ "print(\"CUDA_VISIBLE_DEVICES=\", os.environ.get(\"CUDA_VISIBLE_DEVICES\"))\n",
114
+ "print(\"torch.__version__ =\", torch.__version__)\n",
115
+ "print(\"torch.version.cuda =\", torch.version.cuda)\n",
116
+ "print(\"cuda available:\", torch.cuda.is_available())\n",
117
+ "print(\"cuda device count:\", torch.cuda.device_count())\n",
118
+ "if torch.cuda.is_available():\n",
119
+ " print(\"current device:\", torch.cuda.current_device())\n",
120
+ " print(\"device name:\", torch.cuda.get_device_name(torch.cuda.current_device()))\n",
121
+ "\n",
122
+ "print(\"Using device:\", device)\n",
123
+ "\n",
124
+ "\n",
125
+ "# --- Model Loading (Updated for Qwen2.5-VL) ---\n",
126
+ "\n",
127
+ "# Define model options\n",
128
+ "MODEL_OPTIONS = {\n",
129
+ " \"Gliese-OCR-7B-Post1.0\": \"prithivMLmods/Gliese-OCR-7B-Post1.0\",\n",
130
+ "}\n",
131
+ "\n",
132
+ "# Define 4-bit quantization configuration\n",
133
+ "# This config will load the model in 4-bit to save VRAM.\n",
134
+ "quantization_config = BitsAndBytesConfig(\n",
135
+ " load_in_4bit=True,\n",
136
+ " bnb_4bit_compute_dtype=torch.float16,\n",
137
+ " bnb_4bit_quant_type=\"nf4\",\n",
138
+ " bnb_4bit_use_double_quant=True,\n",
139
+ ")\n",
140
+ "\n",
141
+ "# Preload models and processors into CUDA\n",
142
+ "models = {}\n",
143
+ "processors = {}\n",
144
+ "for name, model_id in MODEL_OPTIONS.items():\n",
145
+ " print(f\"Loading {name}🤗. This will use 4-bit quantization to save VRAM.\")\n",
146
+ " models[name] = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
147
+ " model_id,\n",
148
+ " trust_remote_code=True,\n",
149
+ " quantization_config=quantization_config,\n",
150
+ " device_map=\"auto\"\n",
151
+ " )\n",
152
+ " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n",
153
+ "print(\"Model loaded successfully.\")\n",
154
+ "\n",
155
+ "\n",
156
+ "# --- PDF Generation and Preview Utility Function (Unchanged) ---\n",
157
+ "def generate_and_preview_pdf(image: Image.Image, text_content: str, font_size: int, line_spacing: float, alignment: str, image_size: str):\n",
158
+ " \"\"\"\n",
159
+ " Generates a PDF, saves it, and then creates image previews of its pages.\n",
160
+ " Returns the path to the PDF and a list of paths to the preview images.\n",
161
+ " \"\"\"\n",
162
+ " if image is None or not text_content or not text_content.strip():\n",
163
+ " raise gr.Error(\"Cannot generate PDF. Image or text content is missing.\")\n",
164
+ "\n",
165
+ " # --- 1. Generate the PDF ---\n",
166
+ " temp_dir = tempfile.gettempdir()\n",
167
+ " pdf_filename = os.path.join(temp_dir, f\"output_{uuid.uuid4()}.pdf\")\n",
168
+ " doc = SimpleDocTemplate(\n",
169
+ " pdf_filename,\n",
170
+ " pagesize=A4,\n",
171
+ " rightMargin=inch, leftMargin=inch,\n",
172
+ " topMargin=inch, bottomMargin=inch\n",
173
+ " )\n",
174
+ " styles = getSampleStyleSheet()\n",
175
+ " style_normal = styles[\"Normal\"]\n",
176
+ " style_normal.fontSize = int(font_size)\n",
177
+ " style_normal.leading = int(font_size) * line_spacing\n",
178
+ " style_normal.alignment = {\"Left\": 0, \"Center\": 1, \"Right\": 2, \"Justified\": 4}[alignment]\n",
179
+ "\n",
180
+ " story = []\n",
181
+ "\n",
182
+ " img_buffer = BytesIO()\n",
183
+ " image.save(img_buffer, format='PNG')\n",
184
+ " img_buffer.seek(0)\n",
185
+ "\n",
186
+ " page_width, _ = A4\n",
187
+ " available_width = page_width - 2 * inch\n",
188
+ " image_widths = {\n",
189
+ " \"Small\": available_width * 0.3,\n",
190
+ " \"Medium\": available_width * 0.6,\n",
191
+ " \"Large\": available_width * 0.9,\n",
192
+ " }\n",
193
+ " img_width = image_widths[image_size]\n",
194
+ " # Create a ReportLab Image object, handling potential transparency\n",
195
+ " img = RLImage(img_buffer, width=img_width, height=image.height * (img_width / image.width))\n",
196
+ " story.append(img)\n",
197
+ " story.append(Spacer(1, 12))\n",
198
+ "\n",
199
+ " # Clean the text for PDF generation\n",
200
+ " cleaned_text = re.sub(r'#+\\s*', '', text_content).replace(\"*\", \"\")\n",
201
+ " text_paragraphs = cleaned_text.split('\\n')\n",
202
+ "\n",
203
+ " for para in text_paragraphs:\n",
204
+ " if para.strip():\n",
205
+ " story.append(Paragraph(para, style_normal))\n",
206
+ "\n",
207
+ " doc.build(story)\n",
208
+ "\n",
209
+ " # --- 2. Render PDF pages as images for preview ---\n",
210
+ " preview_images = []\n",
211
+ " try:\n",
212
+ " pdf_doc = fitz.open(pdf_filename)\n",
213
+ " for page_num in range(len(pdf_doc)):\n",
214
+ " page = pdf_doc.load_page(page_num)\n",
215
+ " pix = page.get_pixmap(dpi=150)\n",
216
+ " preview_img_path = os.path.join(temp_dir, f\"preview_{uuid.uuid4()}_p{page_num}.png\")\n",
217
+ " pix.save(preview_img_path)\n",
218
+ " preview_images.append(preview_img_path)\n",
219
+ " pdf_doc.close()\n",
220
+ " except Exception as e:\n",
221
+ " print(f\"Error generating PDF preview: {e}\")\n",
222
+ "\n",
223
+ " return pdf_filename, preview_images\n",
224
+ "\n",
225
+ "\n",
226
+ "# --- Core Application Logic (Updated for Qwen2.5-VL with Streaming) ---\n",
227
+ "@spaces.GPU\n",
228
+ "def process_document(\n",
229
+ " image: Image.Image,\n",
230
+ " prompt_input: str,\n",
231
+ " max_new_tokens: int,\n",
232
+ " temperature: float,\n",
233
+ " top_p: float,\n",
234
+ " top_k: int,\n",
235
+ " repetition_penalty: float\n",
236
+ "):\n",
237
+ " \"\"\"\n",
238
+ " Main function that handles model inference for the Qwen model with streaming.\n",
239
+ " This function is a generator, yielding text as it is generated.\n",
240
+ " \"\"\"\n",
241
+ " if image is None:\n",
242
+ " yield \"Please upload an image.\", \"Please upload an image.\"\n",
243
+ " return\n",
244
+ " if not prompt_input or not prompt_input.strip():\n",
245
+ " yield \"Please enter a prompt.\", \"Please enter a prompt.\"\n",
246
+ " return\n",
247
+ "\n",
248
+ " model_name = \"Gliese-OCR-7B-Post1.0\"\n",
249
+ " model = models[model_name]\n",
250
+ " processor = processors[model_name]\n",
251
+ "\n",
252
+ " messages = [\n",
253
+ " {\n",
254
+ " \"role\": \"user\",\n",
255
+ " \"content\": [\n",
256
+ " {\"type\": \"image\", \"image\": image},\n",
257
+ " {\"type\": \"text\", \"text\": prompt_input},\n",
258
+ " ],\n",
259
+ " }\n",
260
+ " ]\n",
261
+ "\n",
262
+ " text = processor.apply_chat_template(\n",
263
+ " messages, tokenize=False, add_generation_prompt=True\n",
264
+ " )\n",
265
+ " inputs = processor(\n",
266
+ " text=[text],\n",
267
+ " images=[image],\n",
268
+ " padding=True,\n",
269
+ " return_tensors=\"pt\",\n",
270
+ " ).to(\"cuda\")\n",
271
+ "\n",
272
+ " streamer = TextIteratorStreamer(\n",
273
+ " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n",
274
+ " )\n",
275
+ "\n",
276
+ " generation_kwargs = dict(\n",
277
+ " inputs,\n",
278
+ " streamer=streamer,\n",
279
+ " max_new_tokens=max_new_tokens,\n",
280
+ " temperature=temperature,\n",
281
+ " top_p=top_p,\n",
282
+ " top_k=top_k,\n",
283
+ " repetition_penalty=repetition_penalty,\n",
284
+ " do_sample=True if temperature > 0 else False,\n",
285
+ " )\n",
286
+ "\n",
287
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
288
+ " thread.start()\n",
289
+ "\n",
290
+ " buffer = \"\"\n",
291
+ " for new_text in streamer:\n",
292
+ " buffer += new_text\n",
293
+ " # Remove special tokens from the output stream\n",
294
+ " clean_buffer = buffer.replace(\"<|im_end|>\", \"\").replace(\"<|endoftext|>\", \"\")\n",
295
+ " yield clean_buffer, clean_buffer\n",
296
+ "\n",
297
+ "# --- Gradio UI Definition (Updated Title, otherwise unchanged) ---\n",
298
+ "def create_gradio_interface():\n",
299
+ " \"\"\"Builds and returns the Gradio web interface.\"\"\"\n",
300
+ " css = \"\"\"\n",
301
+ " .main-container { max-width: 1400px; margin: 0 auto; }\n",
302
+ " .process-button { border: none !important; color: white !important; font-weight: bold !important; background-color: blue !important;}\n",
303
+ " .process-button:hover { background-color: darkblue !important; transform: translateY(-2px) !important; box-shadow: 0 4px 8px rgba(0,0,0,0.2) !important; }\n",
304
+ " #gallery { min-height: 400px; }\n",
305
+ " \"\"\"\n",
306
+ " with gr.Blocks(theme=\"bethecloud/storj_theme\", css=css) as demo:\n",
307
+ " gr.HTML(f\"\"\"\n",
308
+ " <div class=\"title\" style=\"text-align: center\">\n",
309
+ " <h1>Gliese-OCR-7B-Post1.0 📄</h1>\n",
310
+ " <p style=\"font-size: 1.1em; color: #6b7280; margin-bottom: 0.6em;\">\n",
311
+ " Image Content Extraction and Markdown Rendering </b>\n",
312
+ " </p>\n",
313
+ " </div>\n",
314
+ " \"\"\")\n",
315
+ "\n",
316
+ " with gr.Row():\n",
317
+ " # Left Column (Inputs)\n",
318
+ " with gr.Column(scale=1):\n",
319
+ " prompt_input = gr.Textbox(label=\"Query Input\", placeholder=\"✦︎ Enter the prompt.\", value=\"Precisely OCR the Image.\")\n",
320
+ " image_input = gr.Image(label=\"Upload Image\", type=\"pil\", sources=['upload'])\n",
321
+ "\n",
322
+ " with gr.Accordion(\"Advanced Settings\", open=False):\n",
323
+ " max_new_tokens = gr.Slider(minimum=64, maximum=2048, value=1024, step=32, label=\"Max New Tokens\")\n",
324
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=2.0, step=0.1, value=0.7)\n",
325
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
326
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=100, step=1, value=50)\n",
327
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.1)\n",
328
+ "\n",
329
+ " with gr.Accordion(\"PDF Export Settings\", open=False):\n",
330
+ " font_size = gr.Dropdown(choices=[\"8\", \"10\", \"12\", \"14\", \"16\", \"18\"], value=\"12\", label=\"Font Size\")\n",
331
+ " line_spacing = gr.Dropdown(choices=[1.0, 1.15, 1.5, 2.0], value=1.15, label=\"Line Spacing\")\n",
332
+ " alignment = gr.Dropdown(choices=[\"Left\", \"Center\", \"Right\", \"Justified\"], value=\"Justified\", label=\"Text Alignment\")\n",
333
+ " image_size = gr.Dropdown(choices=[\"Small\", \"Medium\", \"Large\"], value=\"Medium\", label=\"Image Size in PDF\")\n",
334
+ "\n",
335
+ " process_btn = gr.Button(\"🚀 Process Image\", variant=\"primary\", elem_classes=[\"process-button\"], size=\"lg\")\n",
336
+ " clear_btn = gr.Button(\"🗑️ Clear All\", variant=\"secondary\")\n",
337
+ "\n",
338
+ " # Right Column (Outputs)\n",
339
+ " with gr.Column(scale=2):\n",
340
+ " with gr.Tabs() as tabs:\n",
341
+ " with gr.Tab(\"📝 Extracted Content\"):\n",
342
+ " raw_output = gr.Textbox(label=\"Model Output\", interactive=False, lines=15, show_copy_button=True)\n",
343
+ "\n",
344
+ " gr.Markdown(\"[prithivMLmods🤗](https://huggingface.co/prithivMLmods)\")\n",
345
+ "\n",
346
+ " with gr.Tab(\"📰 Markdown Preview\"):\n",
347
+ " with gr.Accordion(\"(Result.md)\", open=True):\n",
348
+ " markdown_output = gr.Markdown()\n",
349
+ "\n",
350
+ " with gr.Tab(\"📋 PDF Preview\"):\n",
351
+ " generate_pdf_btn = gr.Button(\"📄 Generate PDF & Render\", variant=\"primary\")\n",
352
+ " pdf_output_file = gr.File(label=\"Download Generated PDF\", interactive=False)\n",
353
+ " pdf_preview_gallery = gr.Gallery(label=\"PDF Page Preview\", show_label=True, elem_id=\"gallery\", columns=2, object_fit=\"contain\", height=\"auto\")\n",
354
+ "\n",
355
+ " # Event Handlers\n",
356
+ " def clear_all_outputs():\n",
357
+ " return None, \"\", \"Model output will appear here.\", \"\", None, None\n",
358
+ "\n",
359
+ " # The .click() event will now stream the output from the generator function\n",
360
+ " process_btn.click(\n",
361
+ " fn=process_document,\n",
362
+ " inputs=[image_input, prompt_input, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
363
+ " outputs=[raw_output, markdown_output]\n",
364
+ " )\n",
365
+ "\n",
366
+ " generate_pdf_btn.click(\n",
367
+ " fn=generate_and_preview_pdf,\n",
368
+ " inputs=[image_input, raw_output, font_size, line_spacing, alignment, image_size],\n",
369
+ " outputs=[pdf_output_file, pdf_preview_gallery]\n",
370
+ " )\n",
371
+ "\n",
372
+ " clear_btn.click(\n",
373
+ " clear_all_outputs,\n",
374
+ " outputs=[image_input, prompt_input, raw_output, markdown_output, pdf_output_file, pdf_preview_gallery]\n",
375
+ " )\n",
376
+ " return demo\n",
377
+ "\n",
378
+ "if __name__ == \"__main__\":\n",
379
+ " demo = create_gradio_interface()\n",
380
+ " # Use queue() for better handling of multiple users and streaming\n",
381
+ " demo.queue(max_size=20).launch(share=True, show_error=True)"
382
+ ]
383
+ }
384
+ ],
385
+ "metadata": {
386
+ "accelerator": "GPU",
387
+ "colab": {
388
+ "gpuType": "T4",
389
+ "provenance": []
390
+ },
391
+ "kernelspec": {
392
+ "display_name": "Python 3",
393
+ "name": "python3"
394
+ },
395
+ "language_info": {
396
+ "name": "python"
397
+ }
398
+ },
399
+ "nbformat": 4,
400
+ "nbformat_minor": 0
401
+ }