Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 235aa1352b | |||
| 08728ada4d | |||
| b58bcabc76 |
@@ -1,21 +1,22 @@
|
||||
# Nanonets-OCR-s Vision Language Model
|
||||
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR
|
||||
# ~8-10GB VRAM, outputs structured markdown with semantic tags
|
||||
# Nanonets-OCR2-3B Vision Language Model
|
||||
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR (Oct 2025 release)
|
||||
# Improvements over OCR-s: better semantic tagging, LaTeX equations, flowcharts
|
||||
# ~12-16GB VRAM with 30K context, outputs structured markdown with semantic tags
|
||||
#
|
||||
# Build: docker build -f Dockerfile_nanonets_ocr -t nanonets-ocr .
|
||||
# Build: docker build -f Dockerfile_nanonets_vllm_gpu_VRAM10GB -t nanonets-ocr .
|
||||
# Run: docker run --gpus all -p 8000:8000 -v ht-huggingface-cache:/root/.cache/huggingface nanonets-ocr
|
||||
|
||||
FROM vllm/vllm-openai:latest
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="Nanonets-OCR-s - Document OCR optimized Vision Language Model"
|
||||
LABEL description="Nanonets-OCR2-3B - Document OCR optimized Vision Language Model"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV MODEL_NAME="nanonets/Nanonets-OCR-s"
|
||||
ENV MODEL_NAME="nanonets/Nanonets-OCR2-3B"
|
||||
ENV HOST="0.0.0.0"
|
||||
ENV PORT="8000"
|
||||
ENV MAX_MODEL_LEN="8192"
|
||||
ENV MAX_MODEL_LEN="30000"
|
||||
ENV GPU_MEMORY_UTILIZATION="0.9"
|
||||
|
||||
# Expose OpenAI-compatible API port
|
||||
@@ -25,9 +26,9 @@ EXPOSE 8000
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=5 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
# Start vLLM server with Nanonets-OCR-s model
|
||||
CMD ["--model", "nanonets/Nanonets-OCR-s", \
|
||||
# Start vLLM server with Nanonets-OCR2-3B model
|
||||
CMD ["--model", "nanonets/Nanonets-OCR2-3B", \
|
||||
"--trust-remote-code", \
|
||||
"--max-model-len", "8192", \
|
||||
"--max-model-len", "30000", \
|
||||
"--host", "0.0.0.0", \
|
||||
"--port", "8000"]
|
||||
@@ -13,46 +13,38 @@ NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
|
||||
|
||||
# Build GPU variant
|
||||
# Build MiniCPM-V 4.5 GPU variant
|
||||
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_minicpm45v_gpu \
|
||||
-f Dockerfile_minicpm45v_ollama_gpu_VRAM9GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
|
||||
.
|
||||
|
||||
# Build CPU variant
|
||||
echo -e "${GREEN}Building MiniCPM-V 4.5 CPU variant...${NC}"
|
||||
# Build Qwen3-VL GPU variant
|
||||
echo -e "${GREEN}Building Qwen3-VL-30B-A3B GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_minicpm45v_cpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
|
||||
-f Dockerfile_qwen3vl_ollama_gpu_VRAM20GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl \
|
||||
.
|
||||
|
||||
# Build PaddleOCR-VL GPU variant
|
||||
echo -e "${GREEN}Building PaddleOCR-VL GPU variant...${NC}"
|
||||
# Build Nanonets-OCR GPU variant
|
||||
echo -e "${GREEN}Building Nanonets-OCR-s GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_paddleocr_vl_gpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu \
|
||||
.
|
||||
|
||||
# Build PaddleOCR-VL CPU variant
|
||||
echo -e "${GREEN}Building PaddleOCR-VL CPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_paddleocr_vl_cpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu \
|
||||
-f Dockerfile_nanonets_vllm_gpu_VRAM10GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr \
|
||||
.
|
||||
|
||||
echo -e "${GREEN}All images built successfully!${NC}"
|
||||
echo ""
|
||||
echo "Available images:"
|
||||
echo " MiniCPM-V 4.5:"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
|
||||
echo " MiniCPM-V 4.5 (Ollama, ~9GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest"
|
||||
echo ""
|
||||
echo " PaddleOCR-VL (Vision-Language Model):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl (GPU/vLLM)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu (GPU/vLLM)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu (CPU)"
|
||||
echo " Qwen3-VL-30B-A3B (Ollama, ~20GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl"
|
||||
echo ""
|
||||
echo " Nanonets-OCR-s (vLLM, ~10GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr"
|
||||
|
||||
11
changelog.md
11
changelog.md
@@ -1,5 +1,16 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-01-19 - 1.14.0 - feat(docker-images)
|
||||
add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout
|
||||
|
||||
- Add new Dockerfiles for Nanonets (Dockerfile_nanonets_vllm_gpu_VRAM10GB), Qwen3 (Dockerfile_qwen3vl_ollama_gpu_VRAM20GB) and a clarified MiniCPM Ollama variant (Dockerfile_minicpm45v_ollama_gpu_VRAM9GB); remove older, redundant Dockerfiles.
|
||||
- Update build-images.sh to build the new image tags (minicpm45v, qwen3vl, nanonets-ocr) and adjust messaging/targets accordingly.
|
||||
- Documentation overhaul: readme.md and readme.hints.md updated to reflect vLLM vs Ollama runtimes, corrected ports/VRAM estimates, volume recommendations, and API endpoint details.
|
||||
- Tests updated to target the new model ID (nanonets/Nanonets-OCR2-3B), to process one page per batch, and to include a 10-minute AbortSignal timeout for OCR requests.
|
||||
- Added focused extraction test suites (test/test.invoices.extraction.ts and test/test.invoices.failed.ts) for faster iteration and debugging of invoice extraction.
|
||||
- Bump devDependencies: @git.zone/tsrun -> ^2.0.1 and @git.zone/tstest -> ^3.1.5.
|
||||
- Misc: test helper references and docker compose/test port mapping fixed (nanonets uses 8000), and various README sections cleaned and reorganized.
|
||||
|
||||
## 2026-01-18 - 1.13.2 - fix(tests)
|
||||
stabilize OCR extraction tests and manage GPU containers
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@host.today/ht-docker-ai",
|
||||
"version": "1.13.2",
|
||||
"version": "1.14.0",
|
||||
"type": "module",
|
||||
"private": false,
|
||||
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||
@@ -13,8 +13,8 @@
|
||||
"test": "tstest test/ --verbose"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tsrun": "^1.3.3",
|
||||
"@git.zone/tstest": "^1.0.90"
|
||||
"@git.zone/tsrun": "^2.0.1",
|
||||
"@git.zone/tstest": "^3.1.5"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
883
pnpm-lock.yaml
generated
883
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
284
readme.hints.md
284
readme.hints.md
@@ -2,12 +2,18 @@
|
||||
|
||||
## Architecture
|
||||
|
||||
This project uses **Ollama** as the runtime framework for serving AI models. This provides:
|
||||
This project uses **Ollama** and **vLLM** as runtime frameworks for serving AI models:
|
||||
|
||||
### Ollama-based Images (MiniCPM-V, Qwen3-VL)
|
||||
- Automatic model download and caching
|
||||
- Unified REST API (compatible with OpenAI format)
|
||||
- Built-in quantization support
|
||||
- GPU/CPU auto-detection
|
||||
- GPU auto-detection
|
||||
|
||||
### vLLM-based Images (Nanonets-OCR)
|
||||
- High-performance inference server
|
||||
- OpenAI-compatible API
|
||||
- Optimized for VLM workloads
|
||||
|
||||
## Model Details
|
||||
|
||||
@@ -24,18 +30,24 @@ This project uses **Ollama** as the runtime framework for serving AI models. Thi
|
||||
|------|---------------|
|
||||
| Full precision (bf16) | 18GB |
|
||||
| int4 quantized | 9GB |
|
||||
| GGUF (CPU) | 8GB RAM |
|
||||
|
||||
## Container Startup Flow
|
||||
|
||||
### Ollama-based containers
|
||||
1. `docker-entrypoint.sh` starts Ollama server in background
|
||||
2. Waits for server to be ready
|
||||
3. Checks if model already exists in volume
|
||||
4. Pulls model if not present
|
||||
5. Keeps container running
|
||||
|
||||
### vLLM-based containers
|
||||
1. vLLM server starts with model auto-download
|
||||
2. Health check endpoint available at `/health`
|
||||
3. OpenAI-compatible API at `/v1/chat/completions`
|
||||
|
||||
## Volume Persistence
|
||||
|
||||
### Ollama volumes
|
||||
Mount `/root/.ollama` to persist downloaded models:
|
||||
|
||||
```bash
|
||||
@@ -44,9 +56,16 @@ Mount `/root/.ollama` to persist downloaded models:
|
||||
|
||||
Without this volume, the model will be re-downloaded on each container start (~5GB download).
|
||||
|
||||
### vLLM/HuggingFace volumes
|
||||
Mount `/root/.cache/huggingface` for model caching:
|
||||
|
||||
```bash
|
||||
-v hf-cache:/root/.cache/huggingface
|
||||
```
|
||||
|
||||
## API Endpoints
|
||||
|
||||
All endpoints follow the Ollama API specification:
|
||||
### Ollama API (MiniCPM-V, Qwen3-VL)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
@@ -56,192 +75,23 @@ All endpoints follow the Ollama API specification:
|
||||
| `/api/pull` | POST | Pull a model |
|
||||
| `/api/show` | POST | Show model info |
|
||||
|
||||
## GPU Detection
|
||||
### vLLM API (Nanonets-OCR)
|
||||
|
||||
The GPU variant uses Ollama's automatic GPU detection. For CPU-only mode, we set:
|
||||
|
||||
```dockerfile
|
||||
ENV CUDA_VISIBLE_DEVICES=""
|
||||
```
|
||||
|
||||
This forces Ollama to use CPU inference even if GPU is available.
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
|
||||
## Health Checks
|
||||
|
||||
Both variants include Docker health checks:
|
||||
All containers include Docker health checks:
|
||||
|
||||
```dockerfile
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||
```
|
||||
|
||||
CPU variant has longer `start-period` (120s) due to slower startup.
|
||||
|
||||
## PaddleOCR-VL (Recommended)
|
||||
|
||||
### Overview
|
||||
|
||||
PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized for document parsing. It replaces the older PP-Structure approach with native VLM understanding.
|
||||
|
||||
**Key advantages over PP-Structure:**
|
||||
- Native table understanding (no HTML parsing needed)
|
||||
- 109 language support
|
||||
- Better handling of complex multi-row tables
|
||||
- Structured Markdown/JSON output
|
||||
|
||||
### Docker Images
|
||||
|
||||
| Tag | Description |
|
||||
|-----|-------------|
|
||||
| `paddleocr-vl` | GPU variant using vLLM (recommended) |
|
||||
| `paddleocr-vl-cpu` | CPU variant using transformers |
|
||||
|
||||
### API Endpoints (OpenAI-compatible)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check with model info |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
| `/ocr` | POST | Legacy OCR endpoint |
|
||||
|
||||
### Request/Response Format
|
||||
|
||||
**POST /v1/chat/completions (OpenAI-compatible)**
|
||||
```json
|
||||
{
|
||||
"model": "paddleocr-vl",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
|
||||
{"type": "text", "text": "Table Recognition:"}
|
||||
]
|
||||
}
|
||||
],
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 8192
|
||||
}
|
||||
```
|
||||
|
||||
**Task Prompts:**
|
||||
- `"OCR:"` - Text recognition
|
||||
- `"Table Recognition:"` - Table extraction (returns markdown)
|
||||
- `"Formula Recognition:"` - Formula extraction
|
||||
- `"Chart Recognition:"` - Chart extraction
|
||||
|
||||
**Response**
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-...",
|
||||
"object": "chat.completion",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "| Date | Description | Amount |\n|---|---|---|\n| 2021-06-01 | GITLAB INC | -119.96 |"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | Model to load |
|
||||
| `HOST` | `0.0.0.0` | Server host |
|
||||
| `PORT` | `8000` | Server port |
|
||||
| `MAX_BATCHED_TOKENS` | `16384` | vLLM max batch tokens |
|
||||
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
||||
|
||||
### Performance
|
||||
|
||||
- **GPU (vLLM)**: ~2-5 seconds per page
|
||||
- **CPU**: ~30-60 seconds per page
|
||||
|
||||
---
|
||||
|
||||
## Adding New Models
|
||||
|
||||
To add a new model variant:
|
||||
|
||||
1. Create `Dockerfile_<modelname>`
|
||||
2. Set `MODEL_NAME` environment variable
|
||||
3. Update `build-images.sh` with new build target
|
||||
4. Add documentation to `readme.md`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Model download hangs
|
||||
|
||||
Check container logs:
|
||||
```bash
|
||||
docker logs -f <container-name>
|
||||
```
|
||||
|
||||
The model download is ~5GB and may take several minutes.
|
||||
|
||||
### Out of memory
|
||||
|
||||
- GPU: Use int4 quantized version or add more VRAM
|
||||
- CPU: Increase container memory limit: `--memory=16g`
|
||||
|
||||
### API not responding
|
||||
|
||||
1. Check if container is healthy: `docker ps`
|
||||
2. Check logs for errors: `docker logs <container>`
|
||||
3. Verify port mapping: `curl localhost:11434/api/tags`
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
Build and push using npmci:
|
||||
|
||||
```bash
|
||||
npmci docker login
|
||||
npmci docker build
|
||||
npmci docker push code.foss.global
|
||||
```
|
||||
|
||||
## Multi-Pass Extraction Strategy
|
||||
|
||||
The bank statement extraction uses a dual-VLM consensus approach:
|
||||
|
||||
### Architecture: Dual-VLM Consensus
|
||||
|
||||
| VLM | Model | Purpose |
|
||||
|-----|-------|---------|
|
||||
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
||||
| **PaddleOCR-VL** | 0.9B params | Table-specialized extraction |
|
||||
|
||||
### Extraction Strategy
|
||||
|
||||
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2**: PaddleOCR-VL table recognition (images → markdown → JSON)
|
||||
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
||||
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
||||
|
||||
### Why Dual-VLM Works
|
||||
|
||||
- **Different architectures**: Two independent models cross-check each other
|
||||
- **Specialized strengths**: PaddleOCR-VL optimized for tables, MiniCPM-V for general vision
|
||||
- **No structure loss**: Both VLMs see the original images directly
|
||||
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
||||
|
||||
### Comparison vs Old PP-Structure Approach
|
||||
|
||||
| Approach | Bank Statement Result | Issue |
|
||||
|----------|----------------------|-------|
|
||||
| MiniCPM-V Visual | 28 transactions ✓ | - |
|
||||
| PP-Structure HTML + Visual | 13 transactions ✗ | HTML merged rows incorrectly |
|
||||
| PaddleOCR-VL Table | 28 transactions ✓ | Native table understanding |
|
||||
|
||||
**Key insight**: PP-Structure's HTML output loses structure for complex tables. PaddleOCR-VL's native VLM approach maintains table integrity.
|
||||
|
||||
---
|
||||
|
||||
## Nanonets-OCR-s
|
||||
@@ -254,7 +104,7 @@ Nanonets-OCR-s is a Qwen2.5-VL-3B model fine-tuned specifically for document OCR
|
||||
- Based on Qwen2.5-VL-3B (~4B parameters)
|
||||
- Fine-tuned for document OCR
|
||||
- Outputs markdown with semantic HTML tags
|
||||
- ~8-10GB VRAM (fits comfortably in 16GB)
|
||||
- ~10GB VRAM
|
||||
|
||||
### Docker Images
|
||||
|
||||
@@ -305,7 +155,7 @@ Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.
|
||||
### Performance
|
||||
|
||||
- **GPU (vLLM)**: ~3-8 seconds per page
|
||||
- **VRAM usage**: ~8-10GB
|
||||
- **VRAM usage**: ~10GB
|
||||
|
||||
### Two-Stage Pipeline (Nanonets + Qwen3)
|
||||
|
||||
@@ -332,6 +182,76 @@ docker start minicpm-test
|
||||
|
||||
---
|
||||
|
||||
## Multi-Pass Extraction Strategy
|
||||
|
||||
The bank statement extraction uses a dual-VLM consensus approach:
|
||||
|
||||
### Architecture: Dual-VLM Consensus
|
||||
|
||||
| VLM | Model | Purpose |
|
||||
|-----|-------|---------|
|
||||
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
||||
| **Nanonets-OCR-s** | ~4B params | Document OCR with semantic output |
|
||||
|
||||
### Extraction Strategy
|
||||
|
||||
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2**: Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
||||
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
||||
|
||||
### Why Dual-VLM Works
|
||||
|
||||
- **Different architectures**: Two independent models cross-check each other
|
||||
- **Specialized strengths**: Nanonets-OCR-s optimized for document structure, MiniCPM-V for general vision
|
||||
- **No structure loss**: Both VLMs see the original images directly
|
||||
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
||||
|
||||
---
|
||||
|
||||
## Adding New Models
|
||||
|
||||
To add a new model variant:
|
||||
|
||||
1. Create `Dockerfile_<modelname>_<runtime>_<hardware>_VRAM<size>`
|
||||
2. Set `MODEL_NAME` environment variable
|
||||
3. Update `build-images.sh` with new build target
|
||||
4. Add documentation to `readme.md`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Model download hangs
|
||||
|
||||
Check container logs:
|
||||
```bash
|
||||
docker logs -f <container-name>
|
||||
```
|
||||
|
||||
The model download is ~5GB and may take several minutes.
|
||||
|
||||
### Out of memory
|
||||
|
||||
- GPU: Use a lighter model variant or upgrade VRAM
|
||||
- Add more GPU memory: Consider multi-GPU setup
|
||||
|
||||
### API not responding
|
||||
|
||||
1. Check if container is healthy: `docker ps`
|
||||
2. Check logs for errors: `docker logs <container>`
|
||||
3. Verify port mapping: `curl localhost:11434/api/tags`
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
Build and push using npmci:
|
||||
|
||||
```bash
|
||||
npmci docker login
|
||||
npmci docker build
|
||||
npmci docker push code.foss.global
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [Ollama Documentation](https://ollama.ai/docs)
|
||||
|
||||
271
readme.md
271
readme.md
@@ -1,40 +1,45 @@
|
||||
# @host.today/ht-docker-ai 🚀
|
||||
|
||||
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
|
||||
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration—**no cloud API keys required**.
|
||||
|
||||
> 🔥 **Three VLMs, one registry.** From lightweight document OCR to GPT-4o-level vision understanding—pick the right tool for your task.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 What's Included
|
||||
|
||||
| Model | Parameters | Best For | API |
|
||||
|-------|-----------|----------|-----|
|
||||
| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
|
||||
| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
|
||||
| Model | Parameters | Best For | API | Port | VRAM |
|
||||
|-------|-----------|----------|-----|------|------|
|
||||
| **MiniCPM-V 4.5** | 8B | General vision understanding, multi-image analysis | Ollama-compatible | 11434 | ~9GB |
|
||||
| **Nanonets-OCR-s** | ~4B | Document OCR with semantic markdown output | OpenAI-compatible | 8000 | ~10GB |
|
||||
| **Qwen3-VL-30B** | 30B (A3B) | Advanced visual agents, code generation from images | Ollama-compatible | 11434 | ~20GB |
|
||||
|
||||
## 📦 Available Images
|
||||
---
|
||||
|
||||
## 📦 Quick Reference: All Available Images
|
||||
|
||||
```
|
||||
code.foss.global/host.today/ht-docker-ai:<tag>
|
||||
```
|
||||
|
||||
| Tag | Model | Hardware | Port |
|
||||
|-----|-------|----------|------|
|
||||
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
|
||||
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
|
||||
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
|
||||
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
|
||||
| Tag | Model | Runtime | Port | VRAM |
|
||||
|-----|-------|---------|------|------|
|
||||
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | Ollama | 11434 | ~9GB |
|
||||
| `nanonets-ocr` | Nanonets-OCR-s | vLLM | 8000 | ~10GB |
|
||||
| `qwen3vl` | Qwen3-VL-30B-A3B | Ollama | 11434 | ~20GB |
|
||||
|
||||
---
|
||||
|
||||
## 🖼️ MiniCPM-V 4.5
|
||||
|
||||
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
|
||||
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across **30+ languages**.
|
||||
|
||||
### Quick Start
|
||||
|
||||
**GPU (Recommended):**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name minicpm \
|
||||
@@ -44,15 +49,6 @@ docker run -d \
|
||||
code.foss.global/host.today/ht-docker-ai:minicpm45v
|
||||
```
|
||||
|
||||
**CPU Only:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name minicpm \
|
||||
-p 11434:11434 \
|
||||
-v ollama-data:/root/.ollama \
|
||||
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
|
||||
```
|
||||
|
||||
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
|
||||
|
||||
### API Examples
|
||||
@@ -85,108 +81,128 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
| Variant | VRAM/RAM | Notes |
|
||||
|---------|----------|-------|
|
||||
| GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
|
||||
| GPU (full precision) | 18GB VRAM | Maximum quality |
|
||||
| CPU (GGUF) | 8GB+ RAM | Slower but accessible |
|
||||
| Mode | VRAM Required |
|
||||
|------|---------------|
|
||||
| int4 quantized | 9GB |
|
||||
| Full precision (bf16) | 18GB |
|
||||
|
||||
---
|
||||
|
||||
## 📄 PaddleOCR-VL
|
||||
## 🔍 Nanonets-OCR-s
|
||||
|
||||
A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
|
||||
A **Qwen2.5-VL-3B** model fine-tuned specifically for document OCR. Outputs structured markdown with semantic HTML tags—perfect for preserving document structure.
|
||||
|
||||
### Key Features
|
||||
|
||||
- 📝 **Semantic output:** Tables → HTML, equations → LaTeX, watermarks/page numbers → tagged
|
||||
- 🌍 **Multilingual:** Inherits Qwen's broad language support
|
||||
- ⚡ **Efficient:** ~10GB VRAM, runs great on consumer GPUs
|
||||
- 🔌 **OpenAI-compatible:** Drop-in replacement for existing pipelines
|
||||
|
||||
### Quick Start
|
||||
|
||||
**GPU:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name paddleocr \
|
||||
--name nanonets \
|
||||
--gpus all \
|
||||
-p 8000:8000 \
|
||||
-v hf-cache:/root/.cache/huggingface \
|
||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
||||
code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||
```
|
||||
|
||||
**CPU:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name paddleocr \
|
||||
-p 8000:8000 \
|
||||
-v hf-cache:/root/.cache/huggingface \
|
||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
|
||||
```
|
||||
|
||||
### OpenAI-Compatible API
|
||||
|
||||
PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
|
||||
### API Usage
|
||||
|
||||
```bash
|
||||
curl http://localhost:8000/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "paddleocr-vl",
|
||||
"model": "nanonets/Nanonets-OCR-s",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
|
||||
{"type": "text", "text": "Table Recognition:"}
|
||||
{"type": "text", "text": "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation."}
|
||||
]
|
||||
}],
|
||||
"max_tokens": 8192
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 4096
|
||||
}'
|
||||
```
|
||||
|
||||
### Task Prompts
|
||||
### Output Format
|
||||
|
||||
| Prompt | Output | Use Case |
|
||||
|--------|--------|----------|
|
||||
| `OCR:` | Plain text | General text extraction |
|
||||
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
|
||||
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
|
||||
| `Chart Recognition:` | Description | Graphs and visualizations |
|
||||
Nanonets-OCR-s returns markdown with semantic tags:
|
||||
|
||||
### API Endpoints
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check with model/device info |
|
||||
| `/formats` | GET | Supported image formats and input methods |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
| `/ocr` | POST | Legacy OCR endpoint |
|
||||
|
||||
### Image Input Methods
|
||||
|
||||
PaddleOCR-VL accepts images in multiple formats:
|
||||
|
||||
```javascript
|
||||
// Base64 data URL
|
||||
"data:image/png;base64,iVBORw0KGgo..."
|
||||
|
||||
// HTTP URL
|
||||
"https://example.com/document.png"
|
||||
|
||||
// Raw base64
|
||||
"iVBORw0KGgo..."
|
||||
```
|
||||
|
||||
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
|
||||
|
||||
**Optimal resolution:** 1080p–2K. Images are automatically scaled for best results.
|
||||
| Element | Output Format |
|
||||
|---------|---------------|
|
||||
| Tables | `<table>...</table>` (HTML) |
|
||||
| Equations | `$...$` (LaTeX) |
|
||||
| Images | `<img>description</img>` |
|
||||
| Watermarks | `<watermark>OFFICIAL COPY</watermark>` |
|
||||
| Page numbers | `<page_number>14</page_number>` |
|
||||
|
||||
### Performance
|
||||
|
||||
| Mode | Speed per Page |
|
||||
|------|----------------|
|
||||
| GPU (CUDA) | 2–5 seconds |
|
||||
| CPU | 30–60 seconds |
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Speed | 3–8 seconds per page |
|
||||
| VRAM | ~10GB |
|
||||
|
||||
---
|
||||
|
||||
## 🧠 Qwen3-VL-30B-A3B
|
||||
|
||||
The **most powerful** Qwen vision model—30B parameters with 3B active (MoE architecture). Handles complex visual reasoning, code generation from screenshots, and visual agent capabilities.
|
||||
|
||||
### Key Features
|
||||
|
||||
- 🚀 **256K context** (expandable to 1M tokens!)
|
||||
- 🤖 **Visual agent capabilities** — can plan and execute multi-step tasks
|
||||
- 💻 **Code generation from images** — screenshot → working code
|
||||
- 🎯 **State-of-the-art** visual reasoning
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name qwen3vl \
|
||||
--gpus all \
|
||||
-p 11434:11434 \
|
||||
-v ollama-data:/root/.ollama \
|
||||
code.foss.global/host.today/ht-docker-ai:qwen3vl
|
||||
```
|
||||
|
||||
Then pull the model (one-time, ~20GB):
|
||||
```bash
|
||||
docker exec qwen3vl ollama pull qwen3-vl:30b-a3b
|
||||
```
|
||||
|
||||
### API Usage
|
||||
|
||||
```bash
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "qwen3-vl:30b-a3b",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": "Analyze this screenshot and write the code to recreate this UI",
|
||||
"images": ["<base64-encoded-image>"]
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
| Requirement | Value |
|
||||
|-------------|-------|
|
||||
| VRAM | ~20GB (Q4_K_M quantization) |
|
||||
| Context | 256K tokens default |
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Docker Compose
|
||||
|
||||
Run multiple VLMs together for maximum flexibility:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
@@ -206,9 +222,9 @@ services:
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
||||
|
||||
# Document parsing / OCR
|
||||
paddleocr:
|
||||
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
||||
# Document OCR with semantic output
|
||||
nanonets:
|
||||
image: code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
@@ -231,7 +247,7 @@ volumes:
|
||||
|
||||
## ⚙️ Environment Variables
|
||||
|
||||
### MiniCPM-V 4.5
|
||||
### MiniCPM-V 4.5 & Qwen3-VL (Ollama-based)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
@@ -239,13 +255,46 @@ volumes:
|
||||
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
|
||||
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
|
||||
|
||||
### PaddleOCR-VL
|
||||
### Nanonets-OCR (vLLM-based)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
|
||||
| `SERVER_HOST` | `0.0.0.0` | API bind address |
|
||||
| `SERVER_PORT` | `8000` | API port |
|
||||
| `MODEL_NAME` | `nanonets/Nanonets-OCR-s` | HuggingFace model ID |
|
||||
| `HOST` | `0.0.0.0` | API bind address |
|
||||
| `PORT` | `8000` | API port |
|
||||
| `MAX_MODEL_LEN` | `8192` | Maximum sequence length |
|
||||
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture Notes
|
||||
|
||||
### Dual-VLM Consensus Strategy
|
||||
|
||||
For production document extraction, consider using multiple models together:
|
||||
|
||||
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2:** Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||
3. **Consensus:** If results match → Done (fast path)
|
||||
4. **Pass 3+:** Additional visual passes if needed
|
||||
|
||||
This dual-VLM approach catches extraction errors that single models miss.
|
||||
|
||||
### Why Multi-Model Works
|
||||
|
||||
- **Different architectures:** Independent models cross-validate each other
|
||||
- **Specialized strengths:** Nanonets-OCR-s excels at document structure; MiniCPM-V handles general vision
|
||||
- **Native processing:** All VLMs see original images—no intermediate structure loss
|
||||
|
||||
### Model Selection Guide
|
||||
|
||||
| Task | Recommended Model |
|
||||
|------|-------------------|
|
||||
| General image understanding | MiniCPM-V 4.5 |
|
||||
| Document OCR with structure preservation | Nanonets-OCR-s |
|
||||
| Complex visual reasoning / code generation | Qwen3-VL-30B |
|
||||
| Multi-image analysis | MiniCPM-V 4.5 |
|
||||
| Visual agent tasks | Qwen3-VL-30B |
|
||||
|
||||
---
|
||||
|
||||
@@ -265,37 +314,16 @@ cd ht-docker-ai
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture Notes
|
||||
|
||||
### Dual-VLM Consensus Strategy
|
||||
|
||||
For production document extraction, consider using both models together:
|
||||
|
||||
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
|
||||
3. **Consensus:** If results match → Done (fast path)
|
||||
4. **Pass 3+:** Additional visual passes if needed
|
||||
|
||||
This dual-VLM approach catches extraction errors that single models miss.
|
||||
|
||||
### Why This Works
|
||||
|
||||
- **Different architectures:** Two independent models cross-validate each other
|
||||
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
|
||||
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Troubleshooting
|
||||
|
||||
### Model download hangs
|
||||
```bash
|
||||
docker logs -f <container-name>
|
||||
```
|
||||
Model downloads can take several minutes (~5GB for MiniCPM-V).
|
||||
Model downloads can take several minutes (~5GB for MiniCPM-V, ~20GB for Qwen3-VL).
|
||||
|
||||
### Out of memory
|
||||
- **GPU:** Use the CPU variant or upgrade VRAM
|
||||
- **GPU:** Use a lighter model variant or upgrade VRAM
|
||||
- **CPU:** Increase container memory: `--memory=16g`
|
||||
|
||||
### API not responding
|
||||
@@ -315,6 +343,13 @@ sudo nvidia-ctk runtime configure --runtime=docker
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
### GPU Memory Contention (Multi-Model)
|
||||
|
||||
When running multiple VLMs on a single GPU:
|
||||
- vLLM and Ollama both need significant GPU memory
|
||||
- **Single GPU:** Run services sequentially (stop one before starting another)
|
||||
- **Multi-GPU:** Assign each service to a different GPU via `CUDA_VISIBLE_DEVICES`
|
||||
|
||||
---
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
@@ -32,10 +32,10 @@ export const IMAGES = {
|
||||
healthTimeout: 120000,
|
||||
} as IImageConfig,
|
||||
|
||||
// Nanonets-OCR-s - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned)
|
||||
// Nanonets-OCR2-3B - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned, Oct 2025)
|
||||
nanonetsOcr: {
|
||||
name: 'nanonets-ocr',
|
||||
dockerfile: 'Dockerfile_nanonets_ocr',
|
||||
dockerfile: 'Dockerfile_nanonets_vllm_gpu_VRAM10GB',
|
||||
buildContext: '.',
|
||||
containerName: 'nanonets-test',
|
||||
ports: ['8000:8000'],
|
||||
@@ -340,12 +340,12 @@ export async function ensureQwen3Vl(): Promise<boolean> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Nanonets-OCR-s service is running (via vLLM)
|
||||
* Document OCR optimized VLM based on Qwen2.5-VL-3B
|
||||
* Ensure Nanonets-OCR2-3B service is running (via vLLM)
|
||||
* Document OCR optimized VLM based on Qwen2.5-VL-3B (Oct 2025 release)
|
||||
*/
|
||||
export async function ensureNanonetsOcr(): Promise<boolean> {
|
||||
if (!isGpuAvailable()) {
|
||||
console.log('[Docker] WARNING: Nanonets-OCR-s requires GPU, but none detected');
|
||||
console.log('[Docker] WARNING: Nanonets-OCR2-3B requires GPU, but none detected');
|
||||
}
|
||||
return ensureService(IMAGES.nanonetsOcr);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Bank statement extraction using Nanonets-OCR-s + GPT-OSS 20B (sequential two-stage pipeline)
|
||||
* Bank statement extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||
*
|
||||
* Stage 1: Nanonets-OCR-s converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||
*
|
||||
* This approach avoids GPU contention by running services sequentially.
|
||||
@@ -14,7 +14,7 @@ import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, removeContainer, isContainerRunning } from './helpers/docker.js';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR-s';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
@@ -28,12 +28,19 @@ interface ITransaction {
|
||||
amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
jsonPath: string;
|
||||
markdownPath?: string;
|
||||
images?: string[];
|
||||
images?: IImageData[];
|
||||
}
|
||||
|
||||
// Nanonets-specific prompt for document OCR to markdown
|
||||
@@ -50,12 +57,31 @@ const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from this bank statemen
|
||||
STATEMENT:
|
||||
`;
|
||||
|
||||
// Constants for smart batching
|
||||
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
|
||||
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
// One page per batch for reliable processing
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
@@ -63,13 +89,24 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
|
||||
// Get image dimensions using identify command
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
@@ -79,10 +116,28 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a single page to markdown using Nanonets-OCR-s
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertPageToMarkdown(image: string, pageNum: number): Promise<string> {
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
// Build content array with all images first, then the prompt
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
// Add prompt with page separator instruction if multiple pages
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
@@ -94,14 +149,12 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'image_url', image_url: { url: `data:image/png;base64,${image}` }},
|
||||
{ type: 'text', text: NANONETS_OCR_PROMPT },
|
||||
],
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096,
|
||||
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
@@ -112,25 +165,35 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const content = (data.choices?.[0]?.message?.content || '').trim();
|
||||
console.log(` Page ${pageNum}: ${content.length} chars (${elapsed}s)`);
|
||||
return content;
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
// For single-page batches, add page marker if not present
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: string[], docName: string): Promise<string> {
|
||||
console.log(` [${docName}] Converting ${images.length} page(s)...`);
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownPages: string[] = [];
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
const markdown = await convertPageToMarkdown(images[i], i + 1);
|
||||
markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`);
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownPages.join('\n\n');
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
@@ -161,25 +224,6 @@ async function ensureExtractionModel(): Promise<boolean> {
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
|
||||
// Warmup: send a simple request to ensure model is loaded
|
||||
console.log(` [Ollama] Warming up model...`);
|
||||
const warmupResponse = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [{ role: 'user', content: 'Return: [{"test": 1}]' }],
|
||||
stream: false,
|
||||
}),
|
||||
signal: AbortSignal.timeout(120000),
|
||||
});
|
||||
|
||||
if (warmupResponse.ok) {
|
||||
const warmupData = await warmupResponse.json();
|
||||
console.log(` [Ollama] Warmup complete (${warmupData.message?.content?.length || 0} chars)`);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -201,22 +245,24 @@ async function ensureExtractionModel(): Promise<boolean> {
|
||||
* Extract transactions from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
|
||||
console.log(` [${queryId}] Sending to ${EXTRACTION_MODEL}...`);
|
||||
console.log(` [${queryId}] Markdown length: ${markdown.length}`);
|
||||
const startTime = Date.now();
|
||||
|
||||
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
||||
console.log(` [${queryId}] Prompt preview: ${fullPrompt.substring(0, 200)}...`);
|
||||
|
||||
// Log exact prompt
|
||||
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
||||
console.log(fullPrompt);
|
||||
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: fullPrompt,
|
||||
}],
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: fullPrompt },
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout
|
||||
@@ -228,24 +274,45 @@ async function extractTransactionsFromMarkdown(markdown: string, queryId: string
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
// Stream the response and log to console
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
process.stdout.write(` [${queryId}] `);
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
// Each line is a JSON object
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
// Stream thinking tokens
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
// Stream content tokens
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
@@ -254,9 +321,12 @@ async function extractTransactionsFromMarkdown(markdown: string, queryId: string
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(`\n [${queryId}] Done: ${content.length} chars (${elapsed}s)`);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
||||
|
||||
return parseJsonResponse(content, queryId);
|
||||
}
|
||||
|
||||
436
test/test.invoices.extraction.ts
Normal file
436
test/test.invoices.extraction.ts
Normal file
@@ -0,0 +1,436 @@
|
||||
/**
|
||||
* Invoice extraction tuning - uses pre-generated markdown files
|
||||
*
|
||||
* Skips OCR stage, only runs GPT-OSS extraction on existing .debug.md files.
|
||||
* Use this to quickly iterate on extraction prompts and logic.
|
||||
*
|
||||
* Run with: tstest test/test.invoices.extraction.ts --verbose
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Test these specific invoices (must have .debug.md files)
|
||||
const TEST_INVOICES = [
|
||||
'consensus_2021-09',
|
||||
'hetzner_2022-04',
|
||||
'qonto_2021-08',
|
||||
'qonto_2021-09',
|
||||
];
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
markdownPath: string;
|
||||
jsonPath: string;
|
||||
}
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
|
||||
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||
|
||||
WHERE TO FIND DATA:
|
||||
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer")
|
||||
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||
|
||||
RULES:
|
||||
1. invoice_number: Extract ONLY the value (e.g., "R0015632540"), NOT the label "Invoice no."
|
||||
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||
3. vendor_name: The company issuing the invoice
|
||||
4. currency: EUR, USD, or GBP
|
||||
5. net_amount: Total before tax
|
||||
6. vat_amount: Tax amount
|
||||
7. total_amount: Final total with tax
|
||||
|
||||
JSON only:
|
||||
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`;
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice number - minimal normalization
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON from response
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into IInvoice
|
||||
*/
|
||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed) return null;
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(parsed.currency || '')),
|
||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
const startTime = Date.now();
|
||||
|
||||
console.log(` [${queryId}] Invoice: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: `Here is an invoice document:\n\n${markdown}` },
|
||||
{ role: 'assistant', content: 'I have read the invoice document you provided. I can see all the text content. What would you like me to do with it?' },
|
||||
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
signal: AbortSignal.timeout(120000), // 2 min timeout
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors for partial chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking, ${content.length} output (${elapsed}s)`);
|
||||
|
||||
return parseJsonToInvoice(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize invoice number for comparison (remove spaces, lowercase)
|
||||
*/
|
||||
function normalizeInvoiceNumber(s: string): string {
|
||||
return s.replace(/\s+/g, '').toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Invoice number - normalize spaces for comparison
|
||||
const extNum = normalizeInvoiceNumber(extracted.invoice_number || '');
|
||||
const expNum = normalizeInvoiceNumber(expected.invoice_number || '');
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases with existing debug markdown
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const invoicesDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(invoicesDir)) return [];
|
||||
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const invoiceName of TEST_INVOICES) {
|
||||
const markdownPath = path.join(invoicesDir, `${invoiceName}.debug.md`);
|
||||
const jsonPath = path.join(invoicesDir, `${invoiceName}.json`);
|
||||
|
||||
if (fs.existsSync(markdownPath) && fs.existsSync(jsonPath)) {
|
||||
testCases.push({
|
||||
name: invoiceName,
|
||||
markdownPath,
|
||||
jsonPath,
|
||||
});
|
||||
} else {
|
||||
if (!fs.existsSync(markdownPath)) {
|
||||
console.warn(`Warning: Missing markdown: ${markdownPath}`);
|
||||
}
|
||||
if (!fs.existsSync(jsonPath)) {
|
||||
console.warn(`Warning: Missing JSON: ${jsonPath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\n========================================`);
|
||||
console.log(` EXTRACTION TUNING TEST`);
|
||||
console.log(` (Skips OCR, uses existing .debug.md)`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Testing ${testCases.length} invoices:`);
|
||||
for (const tc of testCases) {
|
||||
console.log(` - ${tc.name}`);
|
||||
}
|
||||
console.log(`========================================\n`);
|
||||
|
||||
tap.test('Setup Ollama + GPT-OSS 20B', async () => {
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Extract ${tc.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
const markdown = fs.readFileSync(tc.markdownPath, 'utf-8');
|
||||
|
||||
console.log(`\n ========================================`);
|
||||
console.log(` === ${tc.name} ===`);
|
||||
console.log(` ========================================`);
|
||||
console.log(` EXPECTED: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const extracted = await extractInvoiceFromMarkdown(markdown, tc.name);
|
||||
|
||||
if (!extracted) {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ FAILED TO PARSE (${((Date.now() - startTime) / 1000).toFixed(1)}s)`);
|
||||
return;
|
||||
}
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
|
||||
console.log(` EXTRACTED: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
console.log(` ERRORS:`);
|
||||
result.errors.forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Extraction Tuning Summary`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Model: ${EXTRACTION_MODEL}`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`========================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
695
test/test.invoices.failed.ts
Normal file
695
test/test.invoices.failed.ts
Normal file
@@ -0,0 +1,695 @@
|
||||
/**
|
||||
* Focused test for failed invoice extractions
|
||||
*
|
||||
* Tests only the 4 invoices that failed in the main test:
|
||||
* - consensus_2021-09: invoice_number "2021/1384" → "20211384" (slash stripped)
|
||||
* - hetzner_2022-04: model hallucinated after 281s thinking
|
||||
* - qonto_2021-08: invoice_number "08-21-INVOICE-410870" → "4108705" (prefix stripped)
|
||||
* - qonto_2021-09: invoice_number "09-21-INVOICE-4303642" → "4303642" (prefix stripped)
|
||||
*
|
||||
* Run with: tstest test/test.invoices.failed.ts --verbose
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Temp directory for storing markdown between stages
|
||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-failed-debug');
|
||||
|
||||
// Only test these specific invoices that failed
|
||||
const FAILED_INVOICES = [
|
||||
'consensus_2021-09',
|
||||
'hetzner_2022-04',
|
||||
'qonto_2021-08',
|
||||
'qonto_2021-09',
|
||||
];
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
jsonPath: string;
|
||||
markdownPath?: string;
|
||||
}
|
||||
|
||||
// Nanonets-specific prompt for document OCR to markdown
|
||||
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||
Return the tables in html format.
|
||||
Return the equations in LaTeX representation.
|
||||
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B
|
||||
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
|
||||
|
||||
IMPORTANT RULES:
|
||||
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID). PRESERVE ALL CHARACTERS including slashes, dashes, and prefixes.
|
||||
2. invoice_date: Format as YYYY-MM-DD
|
||||
3. vendor_name: The company that issued the invoice
|
||||
4. currency: EUR, USD, or GBP
|
||||
5. net_amount: Amount before tax
|
||||
6. vat_amount: Tax/VAT amount
|
||||
7. total_amount: Final total (gross amount)
|
||||
|
||||
Return ONLY this JSON format, no explanation:
|
||||
{
|
||||
"invoice_number": "INV-2024-001",
|
||||
"invoice_date": "2024-01-15",
|
||||
"vendor_name": "Company Name",
|
||||
"currency": "EUR",
|
||||
"net_amount": 100.00,
|
||||
"vat_amount": 19.00,
|
||||
"total_amount": 119.00
|
||||
}
|
||||
|
||||
INVOICE TEXT:
|
||||
`;
|
||||
|
||||
const PATCH_SIZE = 14;
|
||||
|
||||
/**
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer dummy',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096 * batch.length,
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000),
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop Nanonets container
|
||||
*/
|
||||
function stopNanonets(): void {
|
||||
console.log(' [Docker] Stopping Nanonets container...');
|
||||
try {
|
||||
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||
execSync('sleep 5', { stdio: 'pipe' });
|
||||
console.log(' [Docker] Nanonets stopped');
|
||||
} catch {
|
||||
console.log(' [Docker] Nanonets was not running');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice number - MINIMAL normalization for debugging
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
// Only remove markdown formatting, preserve everything else
|
||||
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON from response
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into IInvoice
|
||||
*/
|
||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed) return null;
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(parsed.currency || '')),
|
||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
const startTime = Date.now();
|
||||
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
||||
|
||||
// Log exact prompt
|
||||
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
||||
console.log(fullPrompt);
|
||||
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: fullPrompt },
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors for partial chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
||||
|
||||
// Log raw response for debugging
|
||||
console.log(` [${queryId}] RAW RESPONSE: ${content}`);
|
||||
|
||||
return parseJsonToInvoice(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice (single pass)
|
||||
*/
|
||||
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||
console.log(` [${docName}] Extracting...`);
|
||||
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||
if (!invoice) {
|
||||
return {
|
||||
invoice_number: '',
|
||||
invoice_date: '',
|
||||
vendor_name: '',
|
||||
currency: 'EUR',
|
||||
net_amount: 0,
|
||||
vat_amount: 0,
|
||||
total_amount: 0,
|
||||
};
|
||||
}
|
||||
console.log(` [${docName}] Extracted: ${JSON.stringify(invoice, null, 2)}`);
|
||||
return invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected - detailed output
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Invoice number comparison - exact match after whitespace normalization
|
||||
const extNum = extracted.invoice_number?.trim() || '';
|
||||
const expNum = expected.invoice_number?.trim() || '';
|
||||
if (extNum.toLowerCase() !== expNum.toLowerCase()) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases for failed invoices only
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const invoiceName of FAILED_INVOICES) {
|
||||
const pdfFile = `${invoiceName}.pdf`;
|
||||
const jsonFile = `${invoiceName}.json`;
|
||||
|
||||
if (files.includes(pdfFile) && files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: invoiceName,
|
||||
pdfPath: path.join(testDir, pdfFile),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
} else {
|
||||
console.warn(`Warning: Missing files for ${invoiceName}`);
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\n========================================`);
|
||||
console.log(` FAILED INVOICES DEBUG TEST`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Testing ${testCases.length} failed invoices:`);
|
||||
for (const tc of testCases) {
|
||||
console.log(` - ${tc.name}`);
|
||||
}
|
||||
console.log(`========================================\n`);
|
||||
|
||||
// Ensure temp directory exists
|
||||
if (!fs.existsSync(TEMP_MD_DIR)) {
|
||||
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// -------- STAGE 1: OCR with Nanonets --------
|
||||
|
||||
tap.test('Stage 1: Setup Nanonets', async () => {
|
||||
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||
const ok = await ensureNanonetsOcr();
|
||||
expect(ok).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Convert failed invoices to markdown', async () => {
|
||||
console.log('\n Converting failed invoice PDFs to markdown with Nanonets-OCR-s...\n');
|
||||
|
||||
for (const tc of testCases) {
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
|
||||
const images = convertPdfToImages(tc.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
fs.writeFileSync(mdPath, markdown);
|
||||
tc.markdownPath = mdPath;
|
||||
console.log(` Saved: ${mdPath}`);
|
||||
|
||||
// Also save to .nogit for inspection
|
||||
const debugMdPath = path.join(process.cwd(), '.nogit/invoices', `${tc.name}.debug.md`);
|
||||
fs.writeFileSync(debugMdPath, markdown);
|
||||
console.log(` Debug copy: ${debugMdPath}`);
|
||||
}
|
||||
|
||||
console.log('\n Stage 1 complete: All failed invoices converted to markdown\n');
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||
stopNanonets();
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||
});
|
||||
|
||||
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||
|
||||
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
console.log(`\n ========================================`);
|
||||
console.log(` === ${tc.name} ===`);
|
||||
console.log(` ========================================`);
|
||||
console.log(` EXPECTED:`);
|
||||
console.log(` invoice_number: "${expected.invoice_number}"`);
|
||||
console.log(` invoice_date: "${expected.invoice_date}"`);
|
||||
console.log(` vendor_name: "${expected.vendor_name}"`);
|
||||
console.log(` total_amount: ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
if (!fs.existsSync(mdPath)) {
|
||||
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||
}
|
||||
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const extracted = await extractInvoice(markdown, tc.name);
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
|
||||
console.log(`\n EXTRACTED:`);
|
||||
console.log(` invoice_number: "${extracted.invoice_number}"`);
|
||||
console.log(` invoice_date: "${extracted.invoice_date}"`);
|
||||
console.log(` vendor_name: "${extracted.vendor_name}"`);
|
||||
console.log(` total_amount: ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
console.log(` ERRORS:`);
|
||||
result.errors.forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Don't fail the test - we're debugging
|
||||
// expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Failed Invoices Debug Summary`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Markdown files saved to: ${TEMP_MD_DIR}`);
|
||||
console.log(` Debug copies in: .nogit/invoices/*.debug.md`);
|
||||
console.log(`========================================\n`);
|
||||
|
||||
// Don't cleanup temp files for debugging
|
||||
console.log(` Keeping temp files for debugging.\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,8 +1,8 @@
|
||||
/**
|
||||
* Invoice extraction using Nanonets-OCR-s + Qwen3 (sequential two-stage pipeline)
|
||||
* Invoice extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||
*
|
||||
* Stage 1: Nanonets-OCR-s converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 2: Qwen3 extracts structured JSON from saved markdown (after Nanonets stops)
|
||||
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||
*
|
||||
* This approach avoids GPU contention by running services sequentially.
|
||||
*/
|
||||
@@ -14,10 +14,10 @@ import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR-s';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const QWEN_MODEL = 'qwen3:8b';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Temp directory for storing markdown between stages
|
||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-markdown');
|
||||
@@ -32,6 +32,13 @@ interface IInvoice {
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
@@ -47,7 +54,7 @@ If there is an image in the document and image caption is not present, add a sma
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||
|
||||
// JSON extraction prompt for Qwen3
|
||||
// JSON extraction prompt for GPT-OSS 20B
|
||||
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
|
||||
|
||||
IMPORTANT RULES:
|
||||
@@ -73,12 +80,31 @@ Return ONLY this JSON format, no explanation:
|
||||
INVOICE TEXT:
|
||||
`;
|
||||
|
||||
// Constants for smart batching
|
||||
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
|
||||
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
// One page per batch for reliable processing
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
@@ -86,13 +112,24 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
|
||||
// Get image dimensions using identify command
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
@@ -102,10 +139,28 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a single page to markdown using Nanonets-OCR-s
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertPageToMarkdown(image: string, pageNum: number): Promise<string> {
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
// Build content array with all images first, then the prompt
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
// Add prompt with page separator instruction if multiple pages
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
@@ -117,14 +172,12 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'image_url', image_url: { url: `data:image/png;base64,${image}` }},
|
||||
{ type: 'text', text: NANONETS_OCR_PROMPT },
|
||||
],
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096,
|
||||
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
@@ -135,25 +188,35 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const content = (data.choices?.[0]?.message?.content || '').trim();
|
||||
console.log(` Page ${pageNum}: ${content.length} chars (${elapsed}s)`);
|
||||
return content;
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
// For single-page batches, add page marker if not present
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: string[], docName: string): Promise<string> {
|
||||
console.log(` [${docName}] Converting ${images.length} page(s)...`);
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownPages: string[] = [];
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
const markdown = await convertPageToMarkdown(images[i], i + 1);
|
||||
markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`);
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownPages.join('\n\n');
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
@@ -173,16 +236,16 @@ function stopNanonets(): void {
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Qwen3 model is available
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureQwen3(): Promise<boolean> {
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === QWEN_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${QWEN_MODEL}`);
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -190,11 +253,11 @@ async function ensureQwen3(): Promise<boolean> {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${QWEN_MODEL}...`);
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: QWEN_MODEL, stream: false }),
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
@@ -303,88 +366,102 @@ function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using Qwen3
|
||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
console.log(` [${queryId}] Sending to ${QWEN_MODEL}...`);
|
||||
const startTime = Date.now();
|
||||
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
||||
|
||||
// Log exact prompt
|
||||
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
||||
console.log(fullPrompt);
|
||||
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents
|
||||
body: JSON.stringify({
|
||||
model: QWEN_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: JSON_EXTRACTION_PROMPT + markdown,
|
||||
}],
|
||||
stream: false,
|
||||
options: {
|
||||
num_predict: 2000,
|
||||
temperature: 0.1,
|
||||
},
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: fullPrompt },
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
if (!response.ok) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const content = (data.message?.content || '').trim();
|
||||
console.log(` [${queryId}] Response: ${content.length} chars (${elapsed}s)`);
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
// Each line is a JSON object
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
// Stream thinking tokens
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
// Stream content tokens
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors for partial chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
||||
|
||||
return parseJsonToInvoice(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two invoices for consensus
|
||||
* Extract invoice (single pass - GPT-OSS is more reliable)
|
||||
*/
|
||||
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
|
||||
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
|
||||
const dateMatch = a.invoice_date === b.invoice_date;
|
||||
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
|
||||
return numMatch && dateMatch && totalMatch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with consensus
|
||||
*/
|
||||
async function extractWithConsensus(markdown: string, docName: string): Promise<IInvoice> {
|
||||
const MAX_ATTEMPTS = 3;
|
||||
|
||||
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
|
||||
console.log(` [${docName}] Attempt ${attempt}/${MAX_ATTEMPTS}`);
|
||||
|
||||
const inv1 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q1`);
|
||||
const inv2 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q2`);
|
||||
|
||||
if (!inv1 || !inv2) {
|
||||
console.log(` [${docName}] Parsing failed, retrying...`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(` [${docName}] Q1: ${inv1.invoice_number} | ${inv1.invoice_date} | ${inv1.total_amount}`);
|
||||
console.log(` [${docName}] Q2: ${inv2.invoice_number} | ${inv2.invoice_date} | ${inv2.total_amount}`);
|
||||
|
||||
if (invoicesMatch(inv1, inv2)) {
|
||||
console.log(` [${docName}] CONSENSUS`);
|
||||
return inv2;
|
||||
}
|
||||
console.log(` [${docName}] No consensus`);
|
||||
}
|
||||
|
||||
// Fallback
|
||||
const fallback = await extractInvoiceFromMarkdown(markdown, `${docName}-FALLBACK`);
|
||||
if (fallback) {
|
||||
console.log(` [${docName}] FALLBACK: ${fallback.invoice_number} | ${fallback.invoice_date} | ${fallback.total_amount}`);
|
||||
return fallback;
|
||||
}
|
||||
|
||||
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||
console.log(` [${docName}] Extracting...`);
|
||||
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||
if (!invoice) {
|
||||
return {
|
||||
invoice_number: '',
|
||||
invoice_date: '',
|
||||
@@ -394,6 +471,9 @@ async function extractWithConsensus(markdown: string, docName: string): Promise<
|
||||
vat_amount: 0,
|
||||
total_amount: 0,
|
||||
};
|
||||
}
|
||||
console.log(` [${docName}] Extracted: ${invoice.invoice_number}`);
|
||||
return invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -520,16 +600,16 @@ tap.test('Stage 1: Stop Nanonets', async () => {
|
||||
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||
});
|
||||
|
||||
// -------- STAGE 2: Extraction with Qwen3 --------
|
||||
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||
|
||||
tap.test('Stage 2: Setup Ollama + Qwen3', async () => {
|
||||
console.log('\n========== STAGE 2: Qwen3 Extraction ==========\n');
|
||||
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const qwenOk = await ensureQwen3();
|
||||
expect(qwenOk).toBeTrue();
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
@@ -551,7 +631,7 @@ for (const tc of testCases) {
|
||||
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const extracted = await extractWithConsensus(markdown, tc.name);
|
||||
const extracted = await extractInvoice(markdown, tc.name);
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
processingTimes.push(elapsedMs);
|
||||
@@ -580,10 +660,10 @@ tap.test('Summary', async () => {
|
||||
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Invoice Summary (Nanonets + Qwen3)`);
|
||||
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
|
||||
console.log(` Stage 2: Qwen3 8B (md -> JSON)`);
|
||||
console.log(` Stage 2: GPT-OSS 20B (md -> JSON)`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
|
||||
Reference in New Issue
Block a user