Nanonets-OCR2-3B-AWQ-nvfp4
Nanonets-OCR2-3B-AWQ-nvfp4 model is an
experimentalquantized version of the Nanonets-OCR2-3B model, featuring 3 billion parameters and multiple tensor types including F32, BF16, F8_E4M3, and U8, optimized for efficient inference. It is based on the Qwen/Qwen2.5-VL-3B-Instruct base model and fine-tuned on Nanonets-OCR2 data, designed for advanced image-to-markdown OCR tasks such as recognizing LaTeX equations, complex tables, signatures, watermarks, checkboxes, and multilingual handwritten text, outputting documents in structured markdown with intelligent semantic tagging suitable for large language model downstream processing. Despite being experimental and not yet deployed by any inference provider, it supports image-text-to-text processing ideal for complex document workflows involving multipart content types including flowcharts and organizational charts, with applications in business, financial, and multilingual domains. This quantized variant is part of ongoing efforts to enable efficient use of this powerful OCR technology on lighter hardware while maintaining sophisticated extraction capabilities.
Quick Start with Transformers 馃
Install the required packages
gradio
torch
torchvision
transformers==4.57.1
accelerate
matplotlib
anyio
compressed-tensors
Run Demo
import os
import sys
import random
import uuid
import json
import time
from threading import Thread
from typing import Iterable
from huggingface_hub import snapshot_download
import gradio as gr
import torch
import numpy as np
from PIL import Image
import cv2
from transformers import (
Qwen2_5_VLForConditionalGeneration,
AutoProcessor,
TextIteratorStreamer,
)
from transformers.image_utils import load_image
css = """
#main-title h1 {
font-size: 2.3em !important;
}
#output-title h2 {
font-size: 2.1em !important;
}
"""
MAX_MAX_NEW_TOKENS = 4096
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
MODEL_ID = "prithivMLmods/Nanonets-OCR2-3B-AWQ-nvfp4"
print(f"Loading model: {MODEL_ID}")
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID,
trust_remote_code=True,
torch_dtype="auto",
).to(device).eval()
print("Model loaded successfully.")
def generate_image(text: str, image: Image.Image,
max_new_tokens: int, temperature: float, top_p: float,
top_k: int, repetition_penalty: float):
"""
Generates responses using the Nanonets-OCR2-3B model for image input.
Yields raw text and Markdown-formatted text.
"""
if image is None:
yield "Please upload an image.", "Please upload an image."
return
messages = [{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": text},
]
}]
prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(
text=[prompt_full],
images=[image],
return_tensors="pt",
padding=True).to(device)
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = {
**inputs,
"streamer": streamer,
"max_new_tokens": max_new_tokens,
"do_sample": True,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"repetition_penalty": repetition_penalty,
}
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
time.sleep(0.01)
yield buffer, buffer
with gr.Blocks(css=css) as demo:
gr.Markdown("# **Nanonets-OCR2-3B-AWQ-nvfp4**", elem_id="main-title")
with gr.Row():
with gr.Column(scale=2):
image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
image_upload = gr.Image(type="pil", label="Upload Image", height=290)
image_submit = gr.Button("Submit", variant="primary")
with gr.Accordion("Advanced options", open=False):
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.7)
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.1)
with gr.Column(scale=3):
gr.Markdown("## Output", elem_id="output-title")
output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=15, show_copy_button=True)
with gr.Accordion("(Result.md)", open=False):
markdown_output = gr.Markdown(label="(Result.Md)")
image_submit.click(
fn=generate_image,
inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
outputs=[output, markdown_output]
)
if __name__ == "__main__":
demo.queue(max_size=50).launch(debug=True)
All the restrictions and guidelines will be followed as in the original model Nanonets-OCR2-3B.
Model and Resource Links
| Resource Type | Description | Link |
|---|---|---|
| Original Model Card | Official release of Nanonets-OCR2-3B by Nanonets | nanonets/Nanonets-OCR2-3B |
| Optimized Model (AWQ-nvfp4) | Quantized version optimized for efficient inference and deployment | prithivMLmods/Nanonets-OCR2-3B-AWQ-nvfp4 |
| Demo Space | Interactive demo hosted on Hugging Face Spaces | Multimodal-OCR3 Demo |
- Downloads last month
- -


