From b58bcabc764e88d58a949dfdb8de7d7c17319503 Mon Sep 17 00:00:00 2001 From: Juergen Kunz Date: Mon, 19 Jan 2026 11:51:23 +0000 Subject: [PATCH] update --- readme.md | 233 ++++++++++++++++---- test/test.bankstatements.nanonets.ts | 226 +++++++++++++------ test/test.invoices.nanonets.ts | 318 +++++++++++++++++---------- 3 files changed, 559 insertions(+), 218 deletions(-) diff --git a/readme.md b/readme.md index 1dc3c41..6c59a7a 100644 --- a/readme.md +++ b/readme.md @@ -1,19 +1,27 @@ # @host.today/ht-docker-ai 🚀 -Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required. +Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—**no cloud API keys required**. + +> 🔥 **Four VLMs, one registry.** From lightweight document OCR to GPT-4o-level vision understanding—pick the right tool for your task. ## Issue Reporting and Security For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly. +--- + ## 🎯 What's Included -| Model | Parameters | Best For | API | -|-------|-----------|----------|-----| -| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible | -| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible | +| Model | Parameters | Best For | API | Port | +|-------|-----------|----------|-----|------| +| **MiniCPM-V 4.5** | 8B | General vision understanding, multi-image analysis | Ollama-compatible | 11434 | +| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, structured OCR | OpenAI-compatible | 8000 | +| **Nanonets-OCR-s** | ~4B | Document OCR with semantic markdown output | OpenAI-compatible | 8000 | +| **Qwen3-VL-30B** | 30B (A3B) | Advanced visual agents, code generation from images | Ollama-compatible | 11434 | -## 📦 Available Images +--- + +## 📦 Quick Reference: All Available Images ``` code.foss.global/host.today/ht-docker-ai: @@ -25,12 +33,14 @@ code.foss.global/host.today/ht-docker-ai: | `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 | | `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 | | `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 | +| `nanonets-ocr` | Nanonets-OCR-s | NVIDIA GPU (8-10GB VRAM) | 8000 | +| `qwen3vl` | Qwen3-VL-30B-A3B | NVIDIA GPU (~20GB VRAM) | 11434 | --- ## 🖼️ MiniCPM-V 4.5 -A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages. +A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across **30+ languages**. ### Quick Start @@ -95,7 +105,7 @@ curl http://localhost:11434/api/chat -d '{ ## 📄 PaddleOCR-VL -A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages. +A specialized **0.9B Vision-Language Model** optimized for document parsing. Native support for tables, formulas, charts, and text extraction in **109 languages**. ### Quick Start @@ -185,8 +195,121 @@ PaddleOCR-VL accepts images in multiple formats: --- +## 🔍 Nanonets-OCR-s + +A **Qwen2.5-VL-3B** model fine-tuned specifically for document OCR. Outputs structured markdown with semantic HTML tags—perfect for preserving document structure. + +### Key Features + +- 📝 **Semantic output:** Tables → HTML, equations → LaTeX, watermarks/page numbers → tagged +- 🌍 **Multilingual:** Inherits Qwen's broad language support +- ⚡ **Efficient:** ~8-10GB VRAM, runs great on consumer GPUs +- 🔌 **OpenAI-compatible:** Drop-in replacement for existing pipelines + +### Quick Start + +```bash +docker run -d \ + --name nanonets \ + --gpus all \ + -p 8000:8000 \ + -v hf-cache:/root/.cache/huggingface \ + code.foss.global/host.today/ht-docker-ai:nanonets-ocr +``` + +### API Usage + +```bash +curl http://localhost:8000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "nanonets/Nanonets-OCR-s", + "messages": [{ + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": "data:image/png;base64,"}}, + {"type": "text", "text": "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation."} + ] + }], + "temperature": 0.0, + "max_tokens": 4096 + }' +``` + +### Output Format + +Nanonets-OCR-s returns markdown with semantic tags: + +| Element | Output Format | +|---------|---------------| +| Tables | `...
` (HTML) | +| Equations | `$...$` (LaTeX) | +| Images | `description` | +| Watermarks | `OFFICIAL COPY` | +| Page numbers | `14` | + +### Performance + +| Metric | Value | +|--------|-------| +| Speed | 3–8 seconds per page | +| VRAM | ~8-10GB | + +--- + +## 🧠 Qwen3-VL-30B-A3B + +The **most powerful** Qwen vision model—30B parameters with 3B active (MoE architecture). Handles complex visual reasoning, code generation from screenshots, and visual agent capabilities. + +### Key Features + +- 🚀 **256K context** (expandable to 1M tokens!) +- 🤖 **Visual agent capabilities** — can plan and execute multi-step tasks +- 💻 **Code generation from images** — screenshot → working code +- 🎯 **State-of-the-art** visual reasoning + +### Quick Start + +```bash +docker run -d \ + --name qwen3vl \ + --gpus all \ + -p 11434:11434 \ + -v ollama-data:/root/.ollama \ + code.foss.global/host.today/ht-docker-ai:qwen3vl +``` + +Then pull the model (one-time, ~20GB): +```bash +docker exec qwen3vl ollama pull qwen3-vl:30b-a3b +``` + +### API Usage + +```bash +curl http://localhost:11434/api/chat -d '{ + "model": "qwen3-vl:30b-a3b", + "messages": [{ + "role": "user", + "content": "Analyze this screenshot and write the code to recreate this UI", + "images": [""] + }] +}' +``` + +### Hardware Requirements + +| Requirement | Value | +|-------------|-------| +| VRAM | ~20GB (Q4_K_M quantization) | +| Context | 256K tokens default | + +--- + ## 🐳 Docker Compose +Run multiple VLMs together for maximum flexibility: + ```yaml version: '3.8' services: @@ -206,7 +329,7 @@ services: capabilities: [gpu] restart: unless-stopped - # Document parsing / OCR + # Document parsing / OCR (table specialist) paddleocr: image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl ports: @@ -222,6 +345,22 @@ services: capabilities: [gpu] restart: unless-stopped + # Document OCR with semantic output + nanonets: + image: code.foss.global/host.today/ht-docker-ai:nanonets-ocr + ports: + - "8001:8000" + volumes: + - hf-cache:/root/.cache/huggingface + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + restart: unless-stopped + volumes: ollama-data: hf-cache: @@ -231,7 +370,7 @@ volumes: ## ⚙️ Environment Variables -### MiniCPM-V 4.5 +### MiniCPM-V 4.5 & Qwen3-VL (Ollama-based) | Variable | Default | Description | |----------|---------|-------------| @@ -239,13 +378,47 @@ volumes: | `OLLAMA_HOST` | `0.0.0.0` | API bind address | | `OLLAMA_ORIGINS` | `*` | Allowed CORS origins | -### PaddleOCR-VL +### PaddleOCR-VL & Nanonets-OCR (vLLM-based) | Variable | Default | Description | |----------|---------|-------------| -| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID | -| `SERVER_HOST` | `0.0.0.0` | API bind address | -| `SERVER_PORT` | `8000` | API port | +| `MODEL_NAME` | Model-specific | HuggingFace model ID | +| `HOST` | `0.0.0.0` | API bind address | +| `PORT` | `8000` | API port | +| `MAX_MODEL_LEN` | `8192` | Maximum sequence length | +| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) | + +--- + +## 🏗️ Architecture Notes + +### Dual-VLM Consensus Strategy + +For production document extraction, consider using multiple models together: + +1. **Pass 1:** MiniCPM-V visual extraction (images → JSON) +2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON) +3. **Consensus:** If results match → Done (fast path) +4. **Pass 3+:** Additional visual passes if needed + +This dual-VLM approach catches extraction errors that single models miss. + +### Why Multi-Model Works + +- **Different architectures:** Independent models cross-validate each other +- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision +- **Native processing:** All VLMs see original images—no intermediate structure loss + +### Model Selection Guide + +| Task | Recommended Model | +|------|-------------------| +| General image understanding | MiniCPM-V 4.5 | +| Table extraction from documents | PaddleOCR-VL | +| Document OCR with structure preservation | Nanonets-OCR-s | +| Complex visual reasoning / code generation | Qwen3-VL-30B | +| Multi-image analysis | MiniCPM-V 4.5 | +| Visual agent tasks | Qwen3-VL-30B | --- @@ -265,37 +438,16 @@ cd ht-docker-ai --- -## 🏗️ Architecture Notes - -### Dual-VLM Consensus Strategy - -For production document extraction, consider using both models together: - -1. **Pass 1:** MiniCPM-V visual extraction (images → JSON) -2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON) -3. **Consensus:** If results match → Done (fast path) -4. **Pass 3+:** Additional visual passes if needed - -This dual-VLM approach catches extraction errors that single models miss. - -### Why This Works - -- **Different architectures:** Two independent models cross-validate each other -- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision -- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss - ---- - ## 🔍 Troubleshooting ### Model download hangs ```bash docker logs -f ``` -Model downloads can take several minutes (~5GB for MiniCPM-V). +Model downloads can take several minutes (~5GB for MiniCPM-V, ~20GB for Qwen3-VL). ### Out of memory -- **GPU:** Use the CPU variant or upgrade VRAM +- **GPU:** Use a lighter model variant or upgrade VRAM - **CPU:** Increase container memory: `--memory=16g` ### API not responding @@ -315,6 +467,13 @@ sudo nvidia-ctk runtime configure --runtime=docker sudo systemctl restart docker ``` +### GPU Memory Contention (Multi-Model) + +When running multiple VLMs on a single GPU: +- vLLM and Ollama both need significant GPU memory +- **Single GPU:** Run services sequentially (stop one before starting another) +- **Multi-GPU:** Assign each service to a different GPU via `CUDA_VISIBLE_DEVICES` + --- ## License and Legal Information diff --git a/test/test.bankstatements.nanonets.ts b/test/test.bankstatements.nanonets.ts index 0a945af..f4b4acc 100644 --- a/test/test.bankstatements.nanonets.ts +++ b/test/test.bankstatements.nanonets.ts @@ -28,12 +28,19 @@ interface ITransaction { amount: number; } +interface IImageData { + base64: string; + width: number; + height: number; + pageNum: number; +} + interface ITestCase { name: string; pdfPath: string; jsonPath: string; markdownPath?: string; - images?: string[]; + images?: IImageData[]; } // Nanonets-specific prompt for document OCR to markdown @@ -50,12 +57,48 @@ const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from this bank statemen STATEMENT: `; +// Constants for smart batching +const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom +const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches + /** - * Convert PDF to PNG images using ImageMagick + * Estimate visual tokens for an image based on dimensions */ -function convertPdfToImages(pdfPath: string): string[] { +function estimateVisualTokens(width: number, height: number): number { + return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE)); +} + +/** + * Batch images to fit within context window + */ +function batchImages(images: IImageData[]): IImageData[][] { + const batches: IImageData[][] = []; + let currentBatch: IImageData[] = []; + let currentTokens = 0; + + for (const img of images) { + const imgTokens = estimateVisualTokens(img.width, img.height); + + if (currentTokens + imgTokens > MAX_VISUAL_TOKENS && currentBatch.length > 0) { + batches.push(currentBatch); + currentBatch = [img]; + currentTokens = imgTokens; + } else { + currentBatch.push(img); + currentTokens += imgTokens; + } + } + if (currentBatch.length > 0) batches.push(currentBatch); + + return batches; +} + +/** + * Convert PDF to JPEG images using ImageMagick with dimension tracking + */ +function convertPdfToImages(pdfPath: string): IImageData[] { const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-')); - const outputPattern = path.join(tempDir, 'page-%d.png'); + const outputPattern = path.join(tempDir, 'page-%d.jpg'); try { execSync( @@ -63,13 +106,24 @@ function convertPdfToImages(pdfPath: string): string[] { { stdio: 'pipe' } ); - const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort(); - const images: string[] = []; + const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort(); + const images: IImageData[] = []; - for (const file of files) { + for (let i = 0; i < files.length; i++) { + const file = files[i]; const imagePath = path.join(tempDir, file); const imageData = fs.readFileSync(imagePath); - images.push(imageData.toString('base64')); + + // Get image dimensions using identify command + const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim(); + const [width, height] = dimensions.split(' ').map(Number); + + images.push({ + base64: imageData.toString('base64'), + width, + height, + pageNum: i + 1, + }); } return images; @@ -79,10 +133,28 @@ function convertPdfToImages(pdfPath: string): string[] { } /** - * Convert a single page to markdown using Nanonets-OCR-s + * Convert a batch of pages to markdown using Nanonets-OCR-s */ -async function convertPageToMarkdown(image: string, pageNum: number): Promise { +async function convertBatchToMarkdown(batch: IImageData[]): Promise { const startTime = Date.now(); + const pageNums = batch.map(img => img.pageNum).join(', '); + + // Build content array with all images first, then the prompt + const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = []; + + for (const img of batch) { + content.push({ + type: 'image_url', + image_url: { url: `data:image/jpeg;base64,${img.base64}` }, + }); + } + + // Add prompt with page separator instruction if multiple pages + const promptText = batch.length > 1 + ? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.` + : NANONETS_OCR_PROMPT; + + content.push({ type: 'text', text: promptText }); const response = await fetch(`${NANONETS_URL}/chat/completions`, { method: 'POST', @@ -94,12 +166,9 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise { - console.log(` [${docName}] Converting ${images.length} page(s)...`); +async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise { + const batches = batchImages(images); + console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`); - const markdownPages: string[] = []; + const markdownParts: string[] = []; - for (let i = 0; i < images.length; i++) { - const markdown = await convertPageToMarkdown(images[i], i + 1); - markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`); + for (let i = 0; i < batches.length; i++) { + const batch = batches[i]; + const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0); + console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`); + const markdown = await convertBatchToMarkdown(batch); + markdownParts.push(markdown); } - const fullMarkdown = markdownPages.join('\n\n'); + const fullMarkdown = markdownParts.join('\n\n'); console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`); return fullMarkdown; } @@ -161,25 +240,6 @@ async function ensureExtractionModel(): Promise { const models = data.models || []; if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) { console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`); - - // Warmup: send a simple request to ensure model is loaded - console.log(` [Ollama] Warming up model...`); - const warmupResponse = await fetch(`${OLLAMA_URL}/api/chat`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - model: EXTRACTION_MODEL, - messages: [{ role: 'user', content: 'Return: [{"test": 1}]' }], - stream: false, - }), - signal: AbortSignal.timeout(120000), - }); - - if (warmupResponse.ok) { - const warmupData = await warmupResponse.json(); - console.log(` [Ollama] Warmup complete (${warmupData.message?.content?.length || 0} chars)`); - } - return true; } } @@ -201,22 +261,24 @@ async function ensureExtractionModel(): Promise { * Extract transactions from markdown using GPT-OSS 20B (streaming) */ async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise { - console.log(` [${queryId}] Sending to ${EXTRACTION_MODEL}...`); - console.log(` [${queryId}] Markdown length: ${markdown.length}`); const startTime = Date.now(); - const fullPrompt = JSON_EXTRACTION_PROMPT + markdown; - console.log(` [${queryId}] Prompt preview: ${fullPrompt.substring(0, 200)}...`); + + // Log exact prompt + console.log(`\n [${queryId}] ===== PROMPT =====`); + console.log(fullPrompt); + console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`); const response = await fetch(`${OLLAMA_URL}/api/chat`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model: EXTRACTION_MODEL, - messages: [{ - role: 'user', - content: fullPrompt, - }], + messages: [ + { role: 'user', content: 'Hi there, how are you?' }, + { role: 'assistant', content: 'Good, how can I help you today?' }, + { role: 'user', content: fullPrompt }, + ], stream: true, }), signal: AbortSignal.timeout(600000), // 10 minute timeout @@ -228,35 +290,59 @@ async function extractTransactionsFromMarkdown(markdown: string, queryId: string throw new Error(`Ollama API error: ${response.status}`); } - // Stream the response and log to console + // Stream the response let content = ''; + let thinkingContent = ''; + let thinkingStarted = false; + let outputStarted = false; const reader = response.body!.getReader(); const decoder = new TextDecoder(); - process.stdout.write(` [${queryId}] `); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; - while (true) { - const { done, value } = await reader.read(); - if (done) break; + const chunk = decoder.decode(value, { stream: true }); - const chunk = decoder.decode(value, { stream: true }); - // Each line is a JSON object - for (const line of chunk.split('\n').filter(l => l.trim())) { - try { - const json = JSON.parse(line); - const token = json.message?.content || ''; - if (token) { - process.stdout.write(token); - content += token; + // Each line is a JSON object + for (const line of chunk.split('\n').filter(l => l.trim())) { + try { + const json = JSON.parse(line); + + // Stream thinking tokens + const thinking = json.message?.thinking || ''; + if (thinking) { + if (!thinkingStarted) { + process.stdout.write(` [${queryId}] THINKING: `); + thinkingStarted = true; + } + process.stdout.write(thinking); + thinkingContent += thinking; + } + + // Stream content tokens + const token = json.message?.content || ''; + if (token) { + if (!outputStarted) { + if (thinkingStarted) process.stdout.write('\n'); + process.stdout.write(` [${queryId}] OUTPUT: `); + outputStarted = true; + } + process.stdout.write(token); + content += token; + } + } catch { + // Ignore parse errors for partial chunks } - } catch { - // Ignore parse errors for partial chunks } } + } finally { + if (thinkingStarted || outputStarted) process.stdout.write('\n'); } const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); - console.log(`\n [${queryId}] Done: ${content.length} chars (${elapsed}s)`); + console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`); return parseJsonResponse(content, queryId); } diff --git a/test/test.invoices.nanonets.ts b/test/test.invoices.nanonets.ts index aba6b35..f9f7fdd 100644 --- a/test/test.invoices.nanonets.ts +++ b/test/test.invoices.nanonets.ts @@ -1,8 +1,8 @@ /** - * Invoice extraction using Nanonets-OCR-s + Qwen3 (sequential two-stage pipeline) + * Invoice extraction using Nanonets-OCR-s + GPT-OSS 20B (sequential two-stage pipeline) * * Stage 1: Nanonets-OCR-s converts ALL document pages to markdown (stop after completion) - * Stage 2: Qwen3 extracts structured JSON from saved markdown (after Nanonets stops) + * Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops) * * This approach avoids GPU contention by running services sequentially. */ @@ -17,7 +17,7 @@ const NANONETS_URL = 'http://localhost:8000/v1'; const NANONETS_MODEL = 'nanonets/Nanonets-OCR-s'; const OLLAMA_URL = 'http://localhost:11434'; -const QWEN_MODEL = 'qwen3:8b'; +const EXTRACTION_MODEL = 'gpt-oss:20b'; // Temp directory for storing markdown between stages const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-markdown'); @@ -32,6 +32,13 @@ interface IInvoice { total_amount: number; } +interface IImageData { + base64: string; + width: number; + height: number; + pageNum: number; +} + interface ITestCase { name: string; pdfPath: string; @@ -47,7 +54,7 @@ If there is an image in the document and image caption is not present, add a sma Watermarks should be wrapped in brackets. Ex: OFFICIAL COPY. Page numbers should be wrapped in brackets. Ex: 14.`; -// JSON extraction prompt for Qwen3 +// JSON extraction prompt for GPT-OSS 20B const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON. IMPORTANT RULES: @@ -73,12 +80,48 @@ Return ONLY this JSON format, no explanation: INVOICE TEXT: `; +// Constants for smart batching +const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom +const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches + /** - * Convert PDF to PNG images + * Estimate visual tokens for an image based on dimensions */ -function convertPdfToImages(pdfPath: string): string[] { +function estimateVisualTokens(width: number, height: number): number { + return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE)); +} + +/** + * Batch images to fit within context window + */ +function batchImages(images: IImageData[]): IImageData[][] { + const batches: IImageData[][] = []; + let currentBatch: IImageData[] = []; + let currentTokens = 0; + + for (const img of images) { + const imgTokens = estimateVisualTokens(img.width, img.height); + + if (currentTokens + imgTokens > MAX_VISUAL_TOKENS && currentBatch.length > 0) { + batches.push(currentBatch); + currentBatch = [img]; + currentTokens = imgTokens; + } else { + currentBatch.push(img); + currentTokens += imgTokens; + } + } + if (currentBatch.length > 0) batches.push(currentBatch); + + return batches; +} + +/** + * Convert PDF to JPEG images using ImageMagick with dimension tracking + */ +function convertPdfToImages(pdfPath: string): IImageData[] { const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-')); - const outputPattern = path.join(tempDir, 'page-%d.png'); + const outputPattern = path.join(tempDir, 'page-%d.jpg'); try { execSync( @@ -86,13 +129,24 @@ function convertPdfToImages(pdfPath: string): string[] { { stdio: 'pipe' } ); - const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort(); - const images: string[] = []; + const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort(); + const images: IImageData[] = []; - for (const file of files) { + for (let i = 0; i < files.length; i++) { + const file = files[i]; const imagePath = path.join(tempDir, file); const imageData = fs.readFileSync(imagePath); - images.push(imageData.toString('base64')); + + // Get image dimensions using identify command + const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim(); + const [width, height] = dimensions.split(' ').map(Number); + + images.push({ + base64: imageData.toString('base64'), + width, + height, + pageNum: i + 1, + }); } return images; @@ -102,10 +156,28 @@ function convertPdfToImages(pdfPath: string): string[] { } /** - * Convert a single page to markdown using Nanonets-OCR-s + * Convert a batch of pages to markdown using Nanonets-OCR-s */ -async function convertPageToMarkdown(image: string, pageNum: number): Promise { +async function convertBatchToMarkdown(batch: IImageData[]): Promise { const startTime = Date.now(); + const pageNums = batch.map(img => img.pageNum).join(', '); + + // Build content array with all images first, then the prompt + const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = []; + + for (const img of batch) { + content.push({ + type: 'image_url', + image_url: { url: `data:image/jpeg;base64,${img.base64}` }, + }); + } + + // Add prompt with page separator instruction if multiple pages + const promptText = batch.length > 1 + ? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.` + : NANONETS_OCR_PROMPT; + + content.push({ type: 'text', text: promptText }); const response = await fetch(`${NANONETS_URL}/chat/completions`, { method: 'POST', @@ -117,12 +189,9 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise { - console.log(` [${docName}] Converting ${images.length} page(s)...`); +async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise { + const batches = batchImages(images); + console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`); - const markdownPages: string[] = []; + const markdownParts: string[] = []; - for (let i = 0; i < images.length; i++) { - const markdown = await convertPageToMarkdown(images[i], i + 1); - markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`); + for (let i = 0; i < batches.length; i++) { + const batch = batches[i]; + const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0); + console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`); + const markdown = await convertBatchToMarkdown(batch); + markdownParts.push(markdown); } - const fullMarkdown = markdownPages.join('\n\n'); + const fullMarkdown = markdownParts.join('\n\n'); console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`); return fullMarkdown; } @@ -173,16 +252,16 @@ function stopNanonets(): void { } /** - * Ensure Qwen3 model is available + * Ensure GPT-OSS 20B model is available */ -async function ensureQwen3(): Promise { +async function ensureExtractionModel(): Promise { try { const response = await fetch(`${OLLAMA_URL}/api/tags`); if (response.ok) { const data = await response.json(); const models = data.models || []; - if (models.some((m: { name: string }) => m.name === QWEN_MODEL)) { - console.log(` [Ollama] Model available: ${QWEN_MODEL}`); + if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) { + console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`); return true; } } @@ -190,11 +269,11 @@ async function ensureQwen3(): Promise { return false; } - console.log(` [Ollama] Pulling ${QWEN_MODEL}...`); + console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`); const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ name: QWEN_MODEL, stream: false }), + body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }), }); return pullResponse.ok; @@ -303,97 +382,114 @@ function parseJsonToInvoice(response: string): IInvoice | null { } /** - * Extract invoice from markdown using Qwen3 + * Extract invoice from markdown using GPT-OSS 20B (streaming) */ async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise { - console.log(` [${queryId}] Sending to ${QWEN_MODEL}...`); const startTime = Date.now(); + const fullPrompt = JSON_EXTRACTION_PROMPT + markdown; + + // Log exact prompt + console.log(`\n [${queryId}] ===== PROMPT =====`); + console.log(fullPrompt); + console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`); const response = await fetch(`${OLLAMA_URL}/api/chat`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents body: JSON.stringify({ - model: QWEN_MODEL, - messages: [{ - role: 'user', - content: JSON_EXTRACTION_PROMPT + markdown, - }], - stream: false, - options: { - num_predict: 2000, - temperature: 0.1, - }, + model: EXTRACTION_MODEL, + messages: [ + { role: 'user', content: 'Hi there, how are you?' }, + { role: 'assistant', content: 'Good, how can I help you today?' }, + { role: 'user', content: fullPrompt }, + ], + stream: true, }), + signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents }); - const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); - if (!response.ok) { + const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`); throw new Error(`Ollama API error: ${response.status}`); } - const data = await response.json(); - const content = (data.message?.content || '').trim(); - console.log(` [${queryId}] Response: ${content.length} chars (${elapsed}s)`); + // Stream the response + let content = ''; + let thinkingContent = ''; + let thinkingStarted = false; + let outputStarted = false; + const reader = response.body!.getReader(); + const decoder = new TextDecoder(); + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + + // Each line is a JSON object + for (const line of chunk.split('\n').filter(l => l.trim())) { + try { + const json = JSON.parse(line); + + // Stream thinking tokens + const thinking = json.message?.thinking || ''; + if (thinking) { + if (!thinkingStarted) { + process.stdout.write(` [${queryId}] THINKING: `); + thinkingStarted = true; + } + process.stdout.write(thinking); + thinkingContent += thinking; + } + + // Stream content tokens + const token = json.message?.content || ''; + if (token) { + if (!outputStarted) { + if (thinkingStarted) process.stdout.write('\n'); + process.stdout.write(` [${queryId}] OUTPUT: `); + outputStarted = true; + } + process.stdout.write(token); + content += token; + } + } catch { + // Ignore parse errors for partial chunks + } + } + } + } finally { + if (thinkingStarted || outputStarted) process.stdout.write('\n'); + } + + const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); + console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`); return parseJsonToInvoice(content); } /** - * Compare two invoices for consensus + * Extract invoice (single pass - GPT-OSS is more reliable) */ -function invoicesMatch(a: IInvoice, b: IInvoice): boolean { - const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase(); - const dateMatch = a.invoice_date === b.invoice_date; - const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02; - return numMatch && dateMatch && totalMatch; -} - -/** - * Extract with consensus - */ -async function extractWithConsensus(markdown: string, docName: string): Promise { - const MAX_ATTEMPTS = 3; - - for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) { - console.log(` [${docName}] Attempt ${attempt}/${MAX_ATTEMPTS}`); - - const inv1 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q1`); - const inv2 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q2`); - - if (!inv1 || !inv2) { - console.log(` [${docName}] Parsing failed, retrying...`); - continue; - } - - console.log(` [${docName}] Q1: ${inv1.invoice_number} | ${inv1.invoice_date} | ${inv1.total_amount}`); - console.log(` [${docName}] Q2: ${inv2.invoice_number} | ${inv2.invoice_date} | ${inv2.total_amount}`); - - if (invoicesMatch(inv1, inv2)) { - console.log(` [${docName}] CONSENSUS`); - return inv2; - } - console.log(` [${docName}] No consensus`); +async function extractInvoice(markdown: string, docName: string): Promise { + console.log(` [${docName}] Extracting...`); + const invoice = await extractInvoiceFromMarkdown(markdown, docName); + if (!invoice) { + return { + invoice_number: '', + invoice_date: '', + vendor_name: '', + currency: 'EUR', + net_amount: 0, + vat_amount: 0, + total_amount: 0, + }; } - - // Fallback - const fallback = await extractInvoiceFromMarkdown(markdown, `${docName}-FALLBACK`); - if (fallback) { - console.log(` [${docName}] FALLBACK: ${fallback.invoice_number} | ${fallback.invoice_date} | ${fallback.total_amount}`); - return fallback; - } - - return { - invoice_number: '', - invoice_date: '', - vendor_name: '', - currency: 'EUR', - net_amount: 0, - vat_amount: 0, - total_amount: 0, - }; + console.log(` [${docName}] Extracted: ${invoice.invoice_number}`); + return invoice; } /** @@ -520,16 +616,16 @@ tap.test('Stage 1: Stop Nanonets', async () => { expect(isContainerRunning('nanonets-test')).toBeFalse(); }); -// -------- STAGE 2: Extraction with Qwen3 -------- +// -------- STAGE 2: Extraction with GPT-OSS 20B -------- -tap.test('Stage 2: Setup Ollama + Qwen3', async () => { - console.log('\n========== STAGE 2: Qwen3 Extraction ==========\n'); +tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => { + console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n'); const ollamaOk = await ensureMiniCpm(); expect(ollamaOk).toBeTrue(); - const qwenOk = await ensureQwen3(); - expect(qwenOk).toBeTrue(); + const extractionOk = await ensureExtractionModel(); + expect(extractionOk).toBeTrue(); }); let passedCount = 0; @@ -551,7 +647,7 @@ for (const tc of testCases) { const markdown = fs.readFileSync(mdPath, 'utf-8'); console.log(` Markdown: ${markdown.length} chars`); - const extracted = await extractWithConsensus(markdown, tc.name); + const extracted = await extractInvoice(markdown, tc.name); const elapsedMs = Date.now() - startTime; processingTimes.push(elapsedMs); @@ -580,10 +676,10 @@ tap.test('Summary', async () => { const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0; console.log(`\n========================================`); - console.log(` Invoice Summary (Nanonets + Qwen3)`); + console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`); console.log(`========================================`); console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`); - console.log(` Stage 2: Qwen3 8B (md -> JSON)`); + console.log(` Stage 2: GPT-OSS 20B (md -> JSON)`); console.log(` Passed: ${passedCount}/${totalInvoices}`); console.log(` Failed: ${failedCount}/${totalInvoices}`); console.log(` Accuracy: ${accuracy.toFixed(1)}%`);