2 Commits

Author SHA1 Message Date
b316d98f24 v1.8.0
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 41s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 00:11:17 +00:00
f0d88fcbe0 feat(paddleocr-vl): add structured HTML output and table parsing for PaddleOCR-VL, update API, tests, and README 2026-01-18 00:11:17 +00:00
5 changed files with 487 additions and 83 deletions

View File

@@ -1,5 +1,14 @@
# Changelog
## 2026-01-18 - 1.8.0 - feat(paddleocr-vl)
add structured HTML output and table parsing for PaddleOCR-VL, update API, tests, and README
- Add result_to_html(), parse_markdown_table(), and parse_paddleocr_table() to emit semantic HTML and convert OCR/markdown tables to proper <table> elements
- Enhance result_to_markdown() with positional/type hints (header/footer/title/table/figure) to improve downstream LLM processing
- Expose 'html' in supported formats and handle output_format='html' in parse endpoints and CLI flow
- Update tests to request HTML output and extract invoice fields from structured HTML (test/test.invoices.paddleocr-vl.ts)
- Refresh README with usage, new images/tags, architecture notes, and troubleshooting for the updated pipeline
## 2026-01-17 - 1.7.1 - fix(docker)
standardize Dockerfile and entrypoint filenames; add GPU-specific Dockerfiles and update build and test references

View File

@@ -10,6 +10,7 @@ Provides REST API for document parsing using:
import os
import io
import re
import base64
import logging
import tempfile
@@ -261,23 +262,210 @@ def process_document(image: Image.Image) -> dict:
def result_to_markdown(result: dict) -> str:
"""Convert result to Markdown format"""
"""Convert result to Markdown format with structural hints for LLM processing.
Adds positional and type-based formatting to help downstream LLMs
understand document structure:
- Tables are marked with **[TABLE]** prefix
- Header zone content (top 15%) is bolded
- Footer zone content (bottom 15%) is separated with horizontal rule
- Titles are formatted as # headers
- Figures/charts are marked with *[Figure: ...]*
"""
lines = []
image_height = result.get("image_size", [0, 1000])[1]
for block in result.get("blocks", []):
block_type = block.get("type", "text")
content = block.get("content", "")
block_type = block.get("type", "text").lower()
content = block.get("content", "").strip()
bbox = block.get("bbox", [])
if "table" in block_type.lower():
lines.append(f"\n{content}\n")
elif "formula" in block_type.lower():
if not content:
continue
# Determine position zone (top 15%, middle, bottom 15%)
y_pos = bbox[1] if bbox and len(bbox) > 1 else 0
y_end = bbox[3] if bbox and len(bbox) > 3 else y_pos
is_header_zone = y_pos < image_height * 0.15
is_footer_zone = y_end > image_height * 0.85
# Format based on type and position
if "table" in block_type:
lines.append(f"\n**[TABLE]**\n{content}\n")
elif "title" in block_type:
lines.append(f"# {content}")
elif "formula" in block_type or "math" in block_type:
lines.append(f"\n$$\n{content}\n$$\n")
elif "figure" in block_type or "chart" in block_type:
lines.append(f"*[Figure: {content}]*")
elif is_header_zone:
lines.append(f"**{content}**")
elif is_footer_zone:
lines.append(f"---\n{content}")
else:
lines.append(content)
return "\n\n".join(lines)
def parse_markdown_table(content: str) -> str:
"""Convert table content to HTML table.
Handles:
- PaddleOCR-VL format: <fcel>cell<lcel>cell<nl> (detected by <fcel> tags)
- Pipe-delimited tables: | Header | Header |
- Separator rows: |---|---|
- Returns HTML <table> structure
"""
content_stripped = content.strip()
# Check for PaddleOCR-VL table format (<fcel>, <lcel>, <ecel>, <nl>)
if '<fcel>' in content_stripped or '<nl>' in content_stripped:
return parse_paddleocr_table(content_stripped)
lines = content_stripped.split('\n')
if not lines:
return f'<pre>{content}</pre>'
# Check if it looks like a markdown table
if not any('|' in line for line in lines):
return f'<pre>{content}</pre>'
html_rows = []
is_header = True
for line in lines:
line = line.strip()
if not line or line.startswith('|') == False and '|' not in line:
continue
# Skip separator rows (|---|---|)
if re.match(r'^[\|\s\-:]+$', line):
is_header = False
continue
# Parse cells
cells = [c.strip() for c in line.split('|')]
cells = [c for c in cells if c] # Remove empty from edges
if is_header:
row = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
html_rows.append(f'<thead>{row}</thead>')
is_header = False
else:
row = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
html_rows.append(row)
if html_rows:
# Wrap body rows in tbody
header = html_rows[0] if '<thead>' in html_rows[0] else ''
body_rows = [r for r in html_rows if '<thead>' not in r]
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
return f'<table>{header}{body}</table>'
return f'<pre>{content}</pre>'
def parse_paddleocr_table(content: str) -> str:
"""Convert PaddleOCR-VL table format to HTML table.
PaddleOCR-VL uses:
- <fcel> = first cell in a row
- <lcel> = subsequent cells
- <ecel> = empty cell
- <nl> = row separator (newline)
Example input:
<fcel>Header1<lcel>Header2<nl><fcel>Value1<lcel>Value2<nl>
"""
# Split into rows by <nl>
rows_raw = re.split(r'<nl>', content)
html_rows = []
is_first_row = True
for row_content in rows_raw:
row_content = row_content.strip()
if not row_content:
continue
# Extract cells: split by <fcel>, <lcel>, or <ecel>
# Each cell is the text between these markers
cells = []
# Pattern to match cell markers and capture content
# Content is everything between markers
parts = re.split(r'<fcel>|<lcel>|<ecel>', row_content)
for part in parts:
part = part.strip()
if part:
cells.append(part)
if not cells:
continue
# First row is header
if is_first_row:
row_html = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
html_rows.append(f'<thead>{row_html}</thead>')
is_first_row = False
else:
row_html = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
html_rows.append(row_html)
if html_rows:
header = html_rows[0] if '<thead>' in html_rows[0] else ''
body_rows = [r for r in html_rows if '<thead>' not in r]
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
return f'<table>{header}{body}</table>'
return f'<pre>{content}</pre>'
def result_to_html(result: dict) -> str:
"""Convert result to semantic HTML for optimal LLM processing.
Uses semantic HTML5 tags with position metadata as data-* attributes.
Markdown tables are converted to proper HTML <table> tags for
unambiguous parsing by downstream LLMs.
"""
parts = []
image_height = result.get("image_size", [0, 1000])[1]
parts.append('<!DOCTYPE html><html><body>')
for block in result.get("blocks", []):
block_type = block.get("type", "text").lower()
content = block.get("content", "").strip()
bbox = block.get("bbox", [])
if not content:
continue
# Position metadata
y_pos = bbox[1] / image_height if bbox and len(bbox) > 1 else 0
data_attrs = f'data-type="{block_type}" data-y="{y_pos:.2f}"'
# Format based on type
if "table" in block_type:
table_html = parse_markdown_table(content)
parts.append(f'<section {data_attrs} class="table-region">{table_html}</section>')
elif "title" in block_type:
parts.append(f'<h1 {data_attrs}>{content}</h1>')
elif "formula" in block_type or "math" in block_type:
parts.append(f'<div {data_attrs} class="formula"><code>{content}</code></div>')
elif "figure" in block_type or "chart" in block_type:
parts.append(f'<figure {data_attrs}><figcaption>{content}</figcaption></figure>')
elif y_pos < 0.15:
parts.append(f'<header {data_attrs}><strong>{content}</strong></header>')
elif y_pos > 0.85:
parts.append(f'<footer {data_attrs}>{content}</footer>')
else:
parts.append(f'<p {data_attrs}>{content}</p>')
parts.append('</body></html>')
return '\n'.join(parts)
# Request/Response models
class ParseRequest(BaseModel):
image: str # base64 encoded image
@@ -331,7 +519,7 @@ async def health_check():
async def supported_formats():
"""List supported output formats"""
return {
"output_formats": ["json", "markdown"],
"output_formats": ["json", "markdown", "html"],
"image_formats": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
"capabilities": [
"Layout detection (PP-DocLayoutV2)",
@@ -356,6 +544,9 @@ async def parse_document_endpoint(request: ParseRequest):
if request.output_format == "markdown":
markdown = result_to_markdown(result)
output = {"markdown": markdown}
elif request.output_format == "html":
html = result_to_html(result)
output = {"html": html}
else:
output = result
@@ -408,6 +599,8 @@ async def chat_completions(request: dict):
if output_format == "markdown":
content = result_to_markdown(result)
elif output_format == "html":
content = result_to_html(result)
else:
content = json.dumps(result, ensure_ascii=False, indent=2)

View File

@@ -1,6 +1,6 @@
{
"name": "@host.today/ht-docker-ai",
"version": "1.7.1",
"version": "1.8.0",
"type": "module",
"private": false,
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",

296
readme.md
View File

@@ -1,23 +1,40 @@
# @host.today/ht-docker-ai
# @host.today/ht-docker-ai 🚀
Docker images for AI vision-language models, starting with MiniCPM-V 4.5.
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
## Overview
## Issue Reporting and Security
This project provides ready-to-use Docker containers for running state-of-the-art AI vision-language models. Built on Ollama for simplified model management and a consistent REST API.
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
## Available Images
## 🎯 What's Included
| Tag | Description | Requirements |
|-----|-------------|--------------|
| `minicpm45v` | MiniCPM-V 4.5 with GPU support | NVIDIA GPU, 9-18GB VRAM |
| `minicpm45v-cpu` | MiniCPM-V 4.5 CPU-only | 8GB+ RAM |
| `latest` | Alias for `minicpm45v` | NVIDIA GPU |
| Model | Parameters | Best For | API |
|-------|-----------|----------|-----|
| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
## Quick Start
## 📦 Available Images
### GPU (Recommended)
```
code.foss.global/host.today/ht-docker-ai:<tag>
```
| Tag | Model | Hardware | Port |
|-----|-------|----------|------|
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
---
## 🖼️ MiniCPM-V 4.5
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
### Quick Start
**GPU (Recommended):**
```bash
docker run -d \
--name minicpm \
@@ -27,8 +44,7 @@ docker run -d \
code.foss.global/host.today/ht-docker-ai:minicpm45v
```
### CPU Only
**CPU Only:**
```bash
docker run -d \
--name minicpm \
@@ -37,18 +53,16 @@ docker run -d \
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
```
## API Usage
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
The container exposes the Ollama API on port 11434.
### List Available Models
### API Examples
**List models:**
```bash
curl http://localhost:11434/api/tags
```
### Generate Text from Image
**Analyze an image:**
```bash
curl http://localhost:11434/api/generate -d '{
"model": "minicpm-v",
@@ -57,60 +71,128 @@ curl http://localhost:11434/api/generate -d '{
}'
```
### Chat with Vision
**Chat with vision:**
```bash
curl http://localhost:11434/api/chat -d '{
"model": "minicpm-v",
"messages": [
{
"messages": [{
"role": "user",
"content": "Describe this image in detail",
"images": ["<base64-encoded-image>"]
}
]
}]
}'
```
## Environment Variables
### Hardware Requirements
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `minicpm-v` | Model to pull on startup |
| `OLLAMA_HOST` | `0.0.0.0` | Host address for API |
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
| Variant | VRAM/RAM | Notes |
|---------|----------|-------|
| GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
| GPU (full precision) | 18GB VRAM | Maximum quality |
| CPU (GGUF) | 8GB+ RAM | Slower but accessible |
## Hardware Requirements
---
### GPU Variant (`minicpm45v`)
## 📄 PaddleOCR-VL
- NVIDIA GPU with CUDA support
- Minimum 9GB VRAM (int4 quantized)
- Recommended 18GB VRAM (full precision)
- NVIDIA Container Toolkit installed
A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
### CPU Variant (`minicpm45v-cpu`)
### Quick Start
- Minimum 8GB RAM
- Recommended 16GB+ RAM for better performance
- No GPU required
**GPU:**
```bash
docker run -d \
--name paddleocr \
--gpus all \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
```
## Model Information
**CPU:**
```bash
docker run -d \
--name paddleocr \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
```
**MiniCPM-V 4.5** is a GPT-4o level multimodal large language model developed by OpenBMB.
### OpenAI-Compatible API
- **Parameters**: 8B (Qwen3-8B + SigLIP2-400M)
- **Capabilities**: Image understanding, OCR, multi-image analysis
- **Languages**: 30+ languages including English, Chinese, French, Spanish
PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
## Docker Compose Example
```bash
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "paddleocr-vl",
"messages": [{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
{"type": "text", "text": "Table Recognition:"}
]
}],
"max_tokens": 8192
}'
```
### Task Prompts
| Prompt | Output | Use Case |
|--------|--------|----------|
| `OCR:` | Plain text | General text extraction |
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
| `Chart Recognition:` | Description | Graphs and visualizations |
### API Endpoints
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check with model/device info |
| `/formats` | GET | Supported image formats and input methods |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
| `/ocr` | POST | Legacy OCR endpoint |
### Image Input Methods
PaddleOCR-VL accepts images in multiple formats:
```javascript
// Base64 data URL
"data:image/png;base64,iVBORw0KGgo..."
// HTTP URL
"https://example.com/document.png"
// Raw base64
"iVBORw0KGgo..."
```
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
**Optimal resolution:** 1080p2K. Images are automatically scaled for best results.
### Performance
| Mode | Speed per Page |
|------|----------------|
| GPU (CUDA) | 25 seconds |
| CPU | 3060 seconds |
---
## 🐳 Docker Compose
```yaml
version: '3.8'
services:
# General vision tasks
minicpm:
image: code.foss.global/host.today/ht-docker-ai:minicpm45v
container_name: minicpm
ports:
- "11434:11434"
volumes:
@@ -124,11 +206,50 @@ services:
capabilities: [gpu]
restart: unless-stopped
# Document parsing / OCR
paddleocr:
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
ports:
- "8000:8000"
volumes:
- hf-cache:/root/.cache/huggingface
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
restart: unless-stopped
volumes:
ollama-data:
hf-cache:
```
## Building Locally
---
## ⚙️ Environment Variables
### MiniCPM-V 4.5
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `minicpm-v` | Ollama model to pull on startup |
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
### PaddleOCR-VL
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
| `SERVER_HOST` | `0.0.0.0` | API bind address |
| `SERVER_PORT` | `8000` | API port |
---
## 🔧 Building from Source
```bash
# Clone the repository
@@ -142,6 +263,77 @@ cd ht-docker-ai
./test-images.sh
```
## License
---
MIT - Task Venture Capital GmbH
## 🏗️ Architecture Notes
### Dual-VLM Consensus Strategy
For production document extraction, consider using both models together:
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
3. **Consensus:** If results match → Done (fast path)
4. **Pass 3+:** Additional visual passes if needed
This dual-VLM approach catches extraction errors that single models miss.
### Why This Works
- **Different architectures:** Two independent models cross-validate each other
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
---
## 🔍 Troubleshooting
### Model download hangs
```bash
docker logs -f <container-name>
```
Model downloads can take several minutes (~5GB for MiniCPM-V).
### Out of memory
- **GPU:** Use the CPU variant or upgrade VRAM
- **CPU:** Increase container memory: `--memory=16g`
### API not responding
1. Check container health: `docker ps`
2. Review logs: `docker logs <container>`
3. Verify port: `curl localhost:11434/api/tags` or `curl localhost:8000/health`
### Enable NVIDIA GPU support on host
```bash
# Install NVIDIA Container Toolkit
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker
```
---
## License and Legal Information
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
### Company Information
Task Venture Capital GmbH
Registered at District Court Bremen HRB 35230 HB, Germany
For any legal inquiries or further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

View File

@@ -4,11 +4,13 @@
* This tests the complete PaddleOCR-VL pipeline:
* 1. PP-DocLayoutV2 for layout detection
* 2. PaddleOCR-VL for recognition
* 3. Structured Markdown output
* 4. MiniCPM extracts invoice fields from structured Markdown
* 3. Structured HTML output (semantic tags with proper tables)
* 4. Qwen2.5 extracts invoice fields from structured HTML
*
* The structured Markdown has proper tables and formatting,
* making it much easier for MiniCPM to extract invoice data.
* HTML output is used instead of Markdown because:
* - <table> tags are unambiguous (no parser variations)
* - LLMs are heavily trained on web/HTML data
* - Semantic tags (header, footer, section) provide clear structure
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
@@ -61,7 +63,7 @@ function convertPdfToImages(pdfPath: string): string[] {
}
/**
* Parse document using PaddleOCR-VL Full Pipeline (returns structured Markdown)
* Parse document using PaddleOCR-VL Full Pipeline (returns structured HTML)
*/
async function parseDocument(imageBase64: string): Promise<string> {
const response = await fetch(`${PADDLEOCR_VL_URL}/parse`, {
@@ -69,7 +71,7 @@ async function parseDocument(imageBase64: string): Promise<string> {
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
image: imageBase64,
output_format: 'markdown',
output_format: 'html',
}),
});
@@ -84,18 +86,25 @@ async function parseDocument(imageBase64: string): Promise<string> {
throw new Error(`PaddleOCR-VL error: ${data.error}`);
}
return data.result?.markdown || '';
return data.result?.html || '';
}
/**
* Extract invoice fields from structured Markdown using Qwen2.5 (text-only model)
* Extract invoice fields from structured HTML using Qwen2.5 (text-only model)
*/
async function extractInvoiceFromMarkdown(markdown: string): Promise<IInvoice> {
// Truncate if too long
const truncated = markdown.length > 12000 ? markdown.slice(0, 12000) : markdown;
console.log(` [Extract] Processing ${truncated.length} chars of Markdown`);
async function extractInvoiceFromHtml(html: string): Promise<IInvoice> {
// Truncate if too long (HTML is more valuable per byte, allow more)
const truncated = html.length > 16000 ? html.slice(0, 16000) : html;
console.log(` [Extract] Processing ${truncated.length} chars of HTML`);
const prompt = `You are an invoice data extractor. Extract the following fields from this OCR text and return ONLY a valid JSON object.
const prompt = `You are an invoice data extractor. Extract the following fields from this HTML document (OCR output with semantic structure) and return ONLY a valid JSON object.
The HTML uses semantic tags:
- <table> with <thead>/<tbody> for structured tables (invoice line items, totals)
- <header> for document header (company info, invoice number)
- <footer> for document footer (payment terms, legal text)
- <section class="table-region"> for table regions
- data-type and data-y attributes indicate block type and vertical position
Required fields:
- invoice_number: The invoice/receipt/document number
@@ -115,8 +124,9 @@ Rules:
- Use 0 for missing numeric fields
- Convert dates to YYYY-MM-DD format (e.g., "28-JAN-2022" becomes "2022-01-28")
- Extract numbers without currency symbols
- Look for totals in <table> sections, especially rows with "Total", "Amount Due", "Grand Total"
OCR Text:
HTML Document:
${truncated}
JSON:`;
@@ -195,12 +205,12 @@ JSON:`;
* Single extraction pass: Parse with PaddleOCR-VL Full, extract with Qwen2.5 (text-only)
*/
async function extractOnce(images: string[], passNum: number): Promise<IInvoice> {
// Parse document with full pipeline (PaddleOCR-VL)
const markdown = await parseDocument(images[0]);
console.log(` [Parse] Got ${markdown.split('\n').length} lines of Markdown`);
// Parse document with full pipeline (PaddleOCR-VL) -> returns HTML
const html = await parseDocument(images[0]);
console.log(` [Parse] Got ${html.split('\n').length} lines of HTML`);
// Extract invoice fields from Markdown using text-only model (no images)
return extractInvoiceFromMarkdown(markdown);
// Extract invoice fields from HTML using text-only model (no images)
return extractInvoiceFromHtml(html);
}
/**
@@ -438,7 +448,7 @@ tap.test('summary', async () => {
console.log(`\n======================================================`);
console.log(` Invoice Extraction Summary (PaddleOCR-VL Full)`);
console.log(`======================================================`);
console.log(` Method: PaddleOCR-VL Full Pipeline -> Qwen2.5 (text-only)`);
console.log(` Method: PaddleOCR-VL Full Pipeline (HTML) -> Qwen2.5 (text-only)`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);