prithivMLmods commited on
Commit
7c7fad0
·
verified ·
1 Parent(s): a9d4621

upload demo notebook

Browse files
Gliese-OCR-7B-Post2.0-final(4bit)/Gliese_OCR_7B_Post2_0_final.ipynb ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "markdown",
21
+ "source": [
22
+ "# **Gliese-OCR-7B-Post2.0-final**"
23
+ ],
24
+ "metadata": {
25
+ "id": "zeCQura_Ri5N"
26
+ }
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "source": [
31
+ "\n",
32
+ "The [Gliese-OCR-7B-Post2.0-final](https://huggingface.co/prithivMLmods/Gliese-OCR-7B-Post2.0-final) model is a refined and optimized version of Gliese-OCR-7B-Post1.0, built upon the Qwen2.5-VL architecture. It represents the final iteration in the Gliese-OCR series, offering enhanced efficiency, precision, and visualization capabilities for document OCR, visual analysis, and information extraction.\n",
33
+ "\n",
34
+ "Fine-tuned with extended document visualization data and OCR-focused objectives, this model delivers superior accuracy across a wide range of document types, including scanned PDFs, handwritten pages, structured forms, and analytical reports."
35
+ ],
36
+ "metadata": {
37
+ "id": "pTrk5nv-HAMV"
38
+ }
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": null,
43
+ "metadata": {
44
+ "id": "oXHcxUMZGah0"
45
+ },
46
+ "outputs": [],
47
+ "source": [
48
+ "%%capture\n",
49
+ "!pip install git+https://github.com/huggingface/accelerate.git \\\n",
50
+ " git+https://github.com/huggingface/peft.git \\\n",
51
+ " transformers-stream-generator huggingface_hub albumentations \\\n",
52
+ " pyvips-binary qwen-vl-utils sentencepiece opencv-python docling-core \\\n",
53
+ " python-docx torchvision safetensors matplotlib num2words \\\n",
54
+ "\n",
55
+ "!pip install transformers requests pymupdf hf_xet spaces pyvips pillow gradio \\\n",
56
+ " einops torch fpdf timm av decord bitsandbytes\n",
57
+ "#Hold tight, this will take around 1-2 minutes."
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "markdown",
62
+ "source": [
63
+ "### **Load Gliese-OCR-7B-Post2.0 with 4-bit quantization**"
64
+ ],
65
+ "metadata": {
66
+ "id": "5vGwuV-4HaJv"
67
+ }
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "source": [
72
+ "import os\n",
73
+ "import sys\n",
74
+ "import random\n",
75
+ "import uuid\n",
76
+ "import json\n",
77
+ "import time\n",
78
+ "from threading import Thread\n",
79
+ "from typing import Iterable\n",
80
+ "from huggingface_hub import snapshot_download\n",
81
+ "\n",
82
+ "import gradio as gr\n",
83
+ "import spaces\n",
84
+ "import torch\n",
85
+ "import numpy as np\n",
86
+ "from PIL import Image\n",
87
+ "import cv2\n",
88
+ "\n",
89
+ "from transformers import (\n",
90
+ " Qwen2_5_VLForConditionalGeneration,\n",
91
+ " AutoProcessor,\n",
92
+ " TextIteratorStreamer,\n",
93
+ " BitsAndBytesConfig,\n",
94
+ ")\n",
95
+ "\n",
96
+ "from transformers.image_utils import load_image\n",
97
+ "from gradio.themes import Soft\n",
98
+ "from gradio.themes.utils import colors, fonts, sizes\n",
99
+ "\n",
100
+ "# --- Theme and CSS Setup ---\n",
101
+ "colors.steel_blue = colors.Color(\n",
102
+ " name=\"steel_blue\",\n",
103
+ " c50=\"#EBF3F8\",\n",
104
+ " c100=\"#D3E5F0\",\n",
105
+ " c200=\"#A8CCE1\",\n",
106
+ " c300=\"#7DB3D2\",\n",
107
+ " c400=\"#529AC3\",\n",
108
+ " c500=\"#4682B4\",\n",
109
+ " c600=\"#3E72A0\",\n",
110
+ " c700=\"#36638C\",\n",
111
+ " c800=\"#2E5378\",\n",
112
+ " c900=\"#264364\",\n",
113
+ " c950=\"#1E3450\",\n",
114
+ ")\n",
115
+ "\n",
116
+ "class SteelBlueTheme(Soft):\n",
117
+ " def __init__(\n",
118
+ " self,\n",
119
+ " *,\n",
120
+ " primary_hue: colors.Color | str = colors.gray,\n",
121
+ " secondary_hue: colors.Color | str = colors.steel_blue,\n",
122
+ " neutral_hue: colors.Color | str = colors.slate,\n",
123
+ " text_size: sizes.Size | str = sizes.text_lg,\n",
124
+ " font: fonts.Font | str | Iterable[fonts.Font | str] = (\n",
125
+ " fonts.GoogleFont(\"Outfit\"), \"Arial\", \"sans-serif\",\n",
126
+ " ),\n",
127
+ " font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (\n",
128
+ " fonts.GoogleFont(\"IBM Plex Mono\"), \"ui-monospace\", \"monospace\",\n",
129
+ " ),\n",
130
+ " ):\n",
131
+ " super().__init__(\n",
132
+ " primary_hue=primary_hue,\n",
133
+ " secondary_hue=secondary_hue,\n",
134
+ " neutral_hue=neutral_hue,\n",
135
+ " text_size=text_size,\n",
136
+ " font=font,\n",
137
+ " font_mono=font_mono,\n",
138
+ " )\n",
139
+ " super().set(\n",
140
+ " background_fill_primary=\"*primary_50\",\n",
141
+ " background_fill_primary_dark=\"*primary_900\",\n",
142
+ " body_background_fill=\"linear-gradient(135deg, *primary_200, *primary_100)\",\n",
143
+ " body_background_fill_dark=\"linear-gradient(135deg, *primary_900, *primary_800)\",\n",
144
+ " button_primary_text_color=\"white\",\n",
145
+ " button_primary_text_color_hover=\"white\",\n",
146
+ " button_primary_background_fill=\"linear-gradient(90deg, *secondary_500, *secondary_600)\",\n",
147
+ " button_primary_background_fill_hover=\"linear-gradient(90deg, *secondary_600, *secondary_700)\",\n",
148
+ " button_primary_background_fill_dark=\"linear-gradient(90deg, *secondary_600, *secondary_800)\",\n",
149
+ " button_primary_background_fill_hover_dark=\"linear-gradient(90deg, *secondary_500, *secondary_500)\",\n",
150
+ " button_secondary_text_color=\"black\",\n",
151
+ " button_secondary_text_color_hover=\"white\",\n",
152
+ " button_secondary_background_fill=\"linear-gradient(90deg, *primary_300, *primary_300)\",\n",
153
+ " button_secondary_background_fill_hover=\"linear-gradient(90deg, *primary_400, *primary_400)\",\n",
154
+ " button_secondary_background_fill_dark=\"linear-gradient(90deg, *primary_500, *primary_600)\",\n",
155
+ " button_secondary_background_fill_hover_dark=\"linear-gradient(90deg, *primary_500, *primary_500)\",\n",
156
+ " slider_color=\"*secondary_500\",\n",
157
+ " slider_color_dark=\"*secondary_600\",\n",
158
+ " block_title_text_weight=\"600\",\n",
159
+ " block_border_width=\"3px\",\n",
160
+ " block_shadow=\"*shadow_drop_lg\",\n",
161
+ " button_primary_shadow=\"*shadow_drop_lg\",\n",
162
+ " button_large_padding=\"11px\",\n",
163
+ " color_accent_soft=\"*primary_100\",\n",
164
+ " block_label_background_fill=\"*primary_200\",\n",
165
+ " )\n",
166
+ "\n",
167
+ "steel_blue_theme = SteelBlueTheme()\n",
168
+ "\n",
169
+ "css = \"\"\"\n",
170
+ "#main-title h1 {\n",
171
+ " font-size: 2.3em !important;\n",
172
+ "}\n",
173
+ "#output-title h2 {\n",
174
+ " font-size: 2.1em !important;\n",
175
+ "}\n",
176
+ "\"\"\"\n",
177
+ "\n",
178
+ "# --- Model Configuration ---\n",
179
+ "MAX_MAX_NEW_TOKENS = 4096\n",
180
+ "DEFAULT_MAX_NEW_TOKENS = 1024\n",
181
+ "MAX_INPUT_TOKEN_LENGTH = int(os.getenv(\"MAX_INPUT_TOKEN_LENGTH\", \"4096\"))\n",
182
+ "\n",
183
+ "print(\"CUDA_VISIBLE_DEVICES=\", os.environ.get(\"CUDA_VISIBLE_DEVICES\"))\n",
184
+ "print(\"torch.__version__ =\", torch.__version__)\n",
185
+ "print(\"torch.version.cuda =\", torch.version.cuda)\n",
186
+ "print(\"cuda available:\", torch.cuda.is_available())\n",
187
+ "print(\"cuda device count:\", torch.cuda.device_count())\n",
188
+ "if torch.cuda.is_available():\n",
189
+ " print(\"current device:\", torch.cuda.current_device())\n",
190
+ " print(\"device name:\", torch.cuda.get_device_name(torch.cuda.current_device()))\n",
191
+ "\n",
192
+ "# Define 4-bit quantization configuration\n",
193
+ "# This config will load the model in 4-bit to save VRAM.\n",
194
+ "quantization_config = BitsAndBytesConfig(\n",
195
+ " load_in_4bit=True,\n",
196
+ " bnb_4bit_compute_dtype=torch.float16,\n",
197
+ " bnb_4bit_quant_type=\"nf4\",\n",
198
+ " bnb_4bit_use_double_quant=True,\n",
199
+ ")\n",
200
+ "\n",
201
+ "# Load Gliese-OCR-7B-Post2.0 with 4-bit quantization\n",
202
+ "MODEL_ID_X = \"prithivMLmods/Gliese-OCR-7B-Post2.0-final\"\n",
203
+ "print(f\"Loading {MODEL_ID_X}🤗. This will use 4-bit quantization to save VRAM.\")\n",
204
+ "\n",
205
+ "processor = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)\n",
206
+ "model = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
207
+ " MODEL_ID_X,\n",
208
+ " trust_remote_code=True,\n",
209
+ " quantization_config=quantization_config,\n",
210
+ " device_map=\"auto\"\n",
211
+ ").eval()\n",
212
+ "\n",
213
+ "\n",
214
+ "@spaces.GPU\n",
215
+ "def generate_image(text: str, image: Image.Image,\n",
216
+ " max_new_tokens: int, temperature: float, top_p: float,\n",
217
+ " top_k: int, repetition_penalty: float):\n",
218
+ " \"\"\"\n",
219
+ " Generates responses using the Nanonets model for image input.\n",
220
+ " Yields raw text and Markdown-formatted text.\n",
221
+ " \"\"\"\n",
222
+ " if image is None:\n",
223
+ " yield \"Please upload an image.\", \"Please upload an image.\"\n",
224
+ " return\n",
225
+ "\n",
226
+ " messages = [{\n",
227
+ " \"role\": \"user\",\n",
228
+ " \"content\": [\n",
229
+ " {\"type\": \"image\"},\n",
230
+ " {\"type\": \"text\", \"text\": text},\n",
231
+ " ]\n",
232
+ " }]\n",
233
+ " prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
234
+ "\n",
235
+ " inputs = processor(\n",
236
+ " text=[prompt_full],\n",
237
+ " images=[image],\n",
238
+ " return_tensors=\"pt\",\n",
239
+ " padding=True).to(model.device)\n",
240
+ "\n",
241
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
242
+ " generation_kwargs = {\n",
243
+ " **inputs,\n",
244
+ " \"streamer\": streamer,\n",
245
+ " \"max_new_tokens\": max_new_tokens,\n",
246
+ " \"do_sample\": True,\n",
247
+ " \"temperature\": temperature,\n",
248
+ " \"top_p\": top_p,\n",
249
+ " \"top_k\": top_k,\n",
250
+ " \"repetition_penalty\": repetition_penalty,\n",
251
+ " }\n",
252
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
253
+ " thread.start()\n",
254
+ " buffer = \"\"\n",
255
+ " for new_text in streamer:\n",
256
+ " buffer += new_text\n",
257
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
258
+ " time.sleep(0.01)\n",
259
+ " yield buffer, buffer\n",
260
+ "\n",
261
+ "with gr.Blocks(css=css, theme=steel_blue_theme) as demo:\n",
262
+ " gr.Markdown(\"# **Gliese-OCR-7B-Post2.0 (4-bit)**\", elem_id=\"main-title\")\n",
263
+ " with gr.Row():\n",
264
+ " with gr.Column(scale=2):\n",
265
+ " image_query = gr.Textbox(label=\"Query Input\", placeholder=\"Enter your query here...\")\n",
266
+ " image_upload = gr.Image(type=\"pil\", label=\"Upload Image\", height=290)\n",
267
+ "\n",
268
+ " image_submit = gr.Button(\"Submit\", variant=\"primary\")\n",
269
+ "\n",
270
+ " with gr.Accordion(\"Advanced options\", open=False):\n",
271
+ " max_new_tokens = gr.Slider(label=\"Max new tokens\", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)\n",
272
+ " temperature = gr.Slider(label=\"Temperature\", minimum=0.1, maximum=4.0, step=0.1, value=0.7)\n",
273
+ " top_p = gr.Slider(label=\"Top-p (nucleus sampling)\", minimum=0.05, maximum=1.0, step=0.05, value=0.9)\n",
274
+ " top_k = gr.Slider(label=\"Top-k\", minimum=1, maximum=1000, step=1, value=50)\n",
275
+ " repetition_penalty = gr.Slider(label=\"Repetition penalty\", minimum=1.0, maximum=2.0, step=0.05, value=1.1)\n",
276
+ "\n",
277
+ " with gr.Column(scale=3):\n",
278
+ " gr.Markdown(\"## Output\", elem_id=\"output-title\")\n",
279
+ " output = gr.Textbox(label=\"Raw Output Stream\", interactive=False, lines=11, show_copy_button=True)\n",
280
+ " with gr.Accordion(\"(Result.md)\", open=False):\n",
281
+ " markdown_output = gr.Markdown(label=\"(Result.Md)\")\n",
282
+ "\n",
283
+ " image_submit.click(\n",
284
+ " fn=generate_image,\n",
285
+ " inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],\n",
286
+ " outputs=[output, markdown_output]\n",
287
+ " )\n",
288
+ "\n",
289
+ "if __name__ == \"__main__\":\n",
290
+ " demo.queue(max_size=50).launch(show_error=True)"
291
+ ],
292
+ "metadata": {
293
+ "id": "cbREUHUJGrzT"
294
+ },
295
+ "execution_count": null,
296
+ "outputs": []
297
+ }
298
+ ]
299
+ }