17 Commits

Author SHA1 Message Date
26d2de824f v1.15.1
Some checks failed
Docker (tags) / security (push) Successful in 22s
Docker (tags) / test (push) Failing after 7m38s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-20 03:19:58 +00:00
969d21c51a fix(tests): enable progress events in invoice tests and bump @push.rocks/smartagent devDependency to ^1.5.4 2026-01-20 03:19:58 +00:00
da2b827ba3 chore: update smartagent to v1.5.2 (streaming support for native tool calling) 2026-01-20 02:55:28 +00:00
9bc1f74978 feat(test): enable native tool calling for GPT-OSS invoice extraction
- Update smartai to v0.13.2 (native tool calling support)
- Update smartagent to v1.5.1 (useNativeToolCalling option)
- Enable think: true for GPT-OSS reasoning mode in Ollama config
- Enable useNativeToolCalling: true in DualAgentOrchestrator
- Simplify driver system message (native tools don't need XML instructions)

Native tool calling uses Ollama's built-in Harmony format parser
instead of requiring XML generation, which is more efficient for GPT-OSS models.
2026-01-20 02:51:52 +00:00
cf282b2437 v1.15.0
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 7m38s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-20 01:17:41 +00:00
77d57e80bd feat(tests): integrate SmartAi/DualAgentOrchestrator into extraction tests and add JSON self-validation 2026-01-20 01:17:41 +00:00
b202e024a4 v1.14.3
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 1m46s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-20 00:55:24 +00:00
2210611f70 fix(repo): no changes detected in the diff; no files modified and no release required 2026-01-20 00:55:24 +00:00
d8bdb18841 fix(test): add JSON validation and retry logic to invoice extraction
- Add tryExtractJson function to validate JSON before accepting
- Use orchestrator.continueTask() to request correction if JSON is invalid
- Retry up to 2 times for malformed JSON responses
- Remove duplicate parseJsonToInvoice function
2026-01-20 00:45:30 +00:00
d384c1d79b feat(tests): integrate smartagent DualAgentOrchestrator with streaming support
- Update test.invoices.nanonets.ts to use DualAgentOrchestrator for JSON extraction
- Enable streaming token callback for real-time progress visibility
- Add markdown caching to avoid re-running Nanonets OCR for cached files
- Update test.bankstatements.minicpm.ts and test.invoices.minicpm.ts with streaming
- Update dependencies to @push.rocks/smartai@0.11.1 and @push.rocks/smartagent@1.2.8
2026-01-20 00:39:36 +00:00
6bd672da61 v1.14.2
Some checks failed
Docker (tags) / security (push) Successful in 28s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-19 21:28:26 +00:00
44d6dc3336 fix(readme): update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command 2026-01-19 21:28:26 +00:00
d1ff95bd94 v1.14.1
Some checks failed
Docker (tags) / security (push) Successful in 26s
Docker (tags) / test (push) Failing after 39s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-19 21:19:37 +00:00
09770d3177 fix(extraction): improve JSON extraction prompts and model options for invoice and bank statement tests 2026-01-19 21:19:37 +00:00
235aa1352b v1.14.0
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 39s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-19 21:05:51 +00:00
08728ada4d feat(docker-images): add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout 2026-01-19 21:05:51 +00:00
b58bcabc76 update 2026-01-19 11:51:23 +00:00
16 changed files with 3733 additions and 1754 deletions

View File

@@ -1,21 +1,22 @@
# Nanonets-OCR-s Vision Language Model
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR
# ~8-10GB VRAM, outputs structured markdown with semantic tags
# Nanonets-OCR2-3B Vision Language Model
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR (Oct 2025 release)
# Improvements over OCR-s: better semantic tagging, LaTeX equations, flowcharts
# ~12-16GB VRAM with 30K context, outputs structured markdown with semantic tags
#
# Build: docker build -f Dockerfile_nanonets_ocr -t nanonets-ocr .
# Build: docker build -f Dockerfile_nanonets_vllm_gpu_VRAM10GB -t nanonets-ocr .
# Run: docker run --gpus all -p 8000:8000 -v ht-huggingface-cache:/root/.cache/huggingface nanonets-ocr
FROM vllm/vllm-openai:latest
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
LABEL description="Nanonets-OCR-s - Document OCR optimized Vision Language Model"
LABEL description="Nanonets-OCR2-3B - Document OCR optimized Vision Language Model"
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
# Environment configuration
ENV MODEL_NAME="nanonets/Nanonets-OCR-s"
ENV MODEL_NAME="nanonets/Nanonets-OCR2-3B"
ENV HOST="0.0.0.0"
ENV PORT="8000"
ENV MAX_MODEL_LEN="8192"
ENV MAX_MODEL_LEN="30000"
ENV GPU_MEMORY_UTILIZATION="0.9"
# Expose OpenAI-compatible API port
@@ -25,9 +26,9 @@ EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=5 \
CMD curl -f http://localhost:8000/health || exit 1
# Start vLLM server with Nanonets-OCR-s model
CMD ["--model", "nanonets/Nanonets-OCR-s", \
# Start vLLM server with Nanonets-OCR2-3B model
CMD ["--model", "nanonets/Nanonets-OCR2-3B", \
"--trust-remote-code", \
"--max-model-len", "8192", \
"--max-model-len", "30000", \
"--host", "0.0.0.0", \
"--port", "8000"]

View File

@@ -13,46 +13,38 @@ NC='\033[0m' # No Color
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
# Build GPU variant
# Build MiniCPM-V 4.5 GPU variant
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
docker build \
-f Dockerfile_minicpm45v_gpu \
-f Dockerfile_minicpm45v_ollama_gpu_VRAM9GB \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
.
# Build CPU variant
echo -e "${GREEN}Building MiniCPM-V 4.5 CPU variant...${NC}"
# Build Qwen3-VL GPU variant
echo -e "${GREEN}Building Qwen3-VL-30B-A3B GPU variant...${NC}"
docker build \
-f Dockerfile_minicpm45v_cpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
-f Dockerfile_qwen3vl_ollama_gpu_VRAM20GB \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl \
.
# Build PaddleOCR-VL GPU variant
echo -e "${GREEN}Building PaddleOCR-VL GPU variant...${NC}"
# Build Nanonets-OCR GPU variant
echo -e "${GREEN}Building Nanonets-OCR-s GPU variant...${NC}"
docker build \
-f Dockerfile_paddleocr_vl_gpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu \
.
# Build PaddleOCR-VL CPU variant
echo -e "${GREEN}Building PaddleOCR-VL CPU variant...${NC}"
docker build \
-f Dockerfile_paddleocr_vl_cpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu \
-f Dockerfile_nanonets_vllm_gpu_VRAM10GB \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr \
.
echo -e "${GREEN}All images built successfully!${NC}"
echo ""
echo "Available images:"
echo " MiniCPM-V 4.5:"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
echo " MiniCPM-V 4.5 (Ollama, ~9GB VRAM):"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest"
echo ""
echo " PaddleOCR-VL (Vision-Language Model):"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl (GPU/vLLM)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu (GPU/vLLM)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu (CPU)"
echo " Qwen3-VL-30B-A3B (Ollama, ~20GB VRAM):"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl"
echo ""
echo " Nanonets-OCR-s (vLLM, ~10GB VRAM):"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr"

View File

@@ -1,5 +1,57 @@
# Changelog
## 2026-01-20 - 1.15.1 - fix(tests)
enable progress events in invoice tests and bump @push.rocks/smartagent devDependency to ^1.5.4
- Added an onProgress handler in test/test.invoices.nanonets.ts to log progress events (console.log(event.logMessage)) so tool calls and progress are visible during tests.
- Bumped devDependency @push.rocks/smartagent from ^1.5.2 to ^1.5.4 in package.json.
## 2026-01-20 - 1.15.0 - feat(tests)
integrate SmartAi/DualAgentOrchestrator into extraction tests and add JSON self-validation
- Integrate SmartAi and DualAgentOrchestrator into bankstatement and invoice tests to perform structured extraction with streaming
- Register and use JsonValidatorTool to validate outputs (json.validate) and enforce validation before task completion
- Add tryExtractJson parsing fallback, improved extraction prompts, retries and clearer parsing/logging
- Initialize and teardown SmartAi and orchestrator in test setup/summary, and enable onToken streaming handlers for real-time output
- Bump devDependencies: @push.rocks/smartagent to ^1.3.0 and @push.rocks/smartai to ^0.12.0
## 2026-01-20 - 1.14.3 - fix(repo)
no changes detected in the diff; no files modified and no release required
- Diff contained no changes
- No files were added, removed, or modified
- No code, dependency, or documentation updates to release
## 2026-01-19 - 1.14.2 - fix(readme)
update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command
- Renamed Nanonets-OCR-s -> Nanonets-OCR2-3B throughout README and examples
- Updated Nanonets VRAM guidance from ~10GB to ~12-16GB and documented 30K context
- Changed documented MAX_MODEL_LEN default from 8192 to 30000
- Updated example model identifiers (model strings and curl/example snippets) to nanonets/Nanonets-OCR2-3B
- Added MiniCPM and Qwen feature bullets (multilingual, multi-image, flowchart support, expanded context notes)
- Replaced README test command from ./test-images.sh to pnpm test
## 2026-01-19 - 1.14.1 - fix(extraction)
improve JSON extraction prompts and model options for invoice and bank statement tests
- Refactor JSON extraction prompts to be sent after the document text and add explicit 'WHERE TO FIND DATA' and 'RULES' sections for clearer extraction guidance
- Change chat message flow to: send document, assistant acknowledgement, then the JSON extraction prompt (avoids concatenating large prompts into one message)
- Add model options (num_ctx: 32768, temperature: 0) to give larger context windows and deterministic JSON output
- Simplify logging to avoid printing full prompt contents; log document and prompt lengths instead
- Increase timeouts for large documents to 600000ms (10 minutes) where applicable
## 2026-01-19 - 1.14.0 - feat(docker-images)
add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout
- Add new Dockerfiles for Nanonets (Dockerfile_nanonets_vllm_gpu_VRAM10GB), Qwen3 (Dockerfile_qwen3vl_ollama_gpu_VRAM20GB) and a clarified MiniCPM Ollama variant (Dockerfile_minicpm45v_ollama_gpu_VRAM9GB); remove older, redundant Dockerfiles.
- Update build-images.sh to build the new image tags (minicpm45v, qwen3vl, nanonets-ocr) and adjust messaging/targets accordingly.
- Documentation overhaul: readme.md and readme.hints.md updated to reflect vLLM vs Ollama runtimes, corrected ports/VRAM estimates, volume recommendations, and API endpoint details.
- Tests updated to target the new model ID (nanonets/Nanonets-OCR2-3B), to process one page per batch, and to include a 10-minute AbortSignal timeout for OCR requests.
- Added focused extraction test suites (test/test.invoices.extraction.ts and test/test.invoices.failed.ts) for faster iteration and debugging of invoice extraction.
- Bump devDependencies: @git.zone/tsrun -> ^2.0.1 and @git.zone/tstest -> ^3.1.5.
- Misc: test helper references and docker compose/test port mapping fixed (nanonets uses 8000), and various README sections cleaned and reorganized.
## 2026-01-18 - 1.13.2 - fix(tests)
stabilize OCR extraction tests and manage GPU containers

View File

@@ -1,6 +1,6 @@
{
"name": "@host.today/ht-docker-ai",
"version": "1.13.2",
"version": "1.15.1",
"type": "module",
"private": false,
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
@@ -13,8 +13,10 @@
"test": "tstest test/ --verbose"
},
"devDependencies": {
"@git.zone/tsrun": "^1.3.3",
"@git.zone/tstest": "^1.0.90"
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tstest": "^3.1.5",
"@push.rocks/smartagent": "^1.5.4",
"@push.rocks/smartai": "^0.13.2"
},
"repository": {
"type": "git",

2013
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,12 +2,18 @@
## Architecture
This project uses **Ollama** as the runtime framework for serving AI models. This provides:
This project uses **Ollama** and **vLLM** as runtime frameworks for serving AI models:
### Ollama-based Images (MiniCPM-V, Qwen3-VL)
- Automatic model download and caching
- Unified REST API (compatible with OpenAI format)
- Built-in quantization support
- GPU/CPU auto-detection
- GPU auto-detection
### vLLM-based Images (Nanonets-OCR)
- High-performance inference server
- OpenAI-compatible API
- Optimized for VLM workloads
## Model Details
@@ -24,18 +30,24 @@ This project uses **Ollama** as the runtime framework for serving AI models. Thi
|------|---------------|
| Full precision (bf16) | 18GB |
| int4 quantized | 9GB |
| GGUF (CPU) | 8GB RAM |
## Container Startup Flow
### Ollama-based containers
1. `docker-entrypoint.sh` starts Ollama server in background
2. Waits for server to be ready
3. Checks if model already exists in volume
4. Pulls model if not present
5. Keeps container running
### vLLM-based containers
1. vLLM server starts with model auto-download
2. Health check endpoint available at `/health`
3. OpenAI-compatible API at `/v1/chat/completions`
## Volume Persistence
### Ollama volumes
Mount `/root/.ollama` to persist downloaded models:
```bash
@@ -44,9 +56,16 @@ Mount `/root/.ollama` to persist downloaded models:
Without this volume, the model will be re-downloaded on each container start (~5GB download).
### vLLM/HuggingFace volumes
Mount `/root/.cache/huggingface` for model caching:
```bash
-v hf-cache:/root/.cache/huggingface
```
## API Endpoints
All endpoints follow the Ollama API specification:
### Ollama API (MiniCPM-V, Qwen3-VL)
| Endpoint | Method | Description |
|----------|--------|-------------|
@@ -56,192 +75,23 @@ All endpoints follow the Ollama API specification:
| `/api/pull` | POST | Pull a model |
| `/api/show` | POST | Show model info |
## GPU Detection
### vLLM API (Nanonets-OCR)
The GPU variant uses Ollama's automatic GPU detection. For CPU-only mode, we set:
```dockerfile
ENV CUDA_VISIBLE_DEVICES=""
```
This forces Ollama to use CPU inference even if GPU is available.
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
## Health Checks
Both variants include Docker health checks:
All containers include Docker health checks:
```dockerfile
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:11434/api/tags || exit 1
```
CPU variant has longer `start-period` (120s) due to slower startup.
## PaddleOCR-VL (Recommended)
### Overview
PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized for document parsing. It replaces the older PP-Structure approach with native VLM understanding.
**Key advantages over PP-Structure:**
- Native table understanding (no HTML parsing needed)
- 109 language support
- Better handling of complex multi-row tables
- Structured Markdown/JSON output
### Docker Images
| Tag | Description |
|-----|-------------|
| `paddleocr-vl` | GPU variant using vLLM (recommended) |
| `paddleocr-vl-cpu` | CPU variant using transformers |
### API Endpoints (OpenAI-compatible)
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check with model info |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
| `/ocr` | POST | Legacy OCR endpoint |
### Request/Response Format
**POST /v1/chat/completions (OpenAI-compatible)**
```json
{
"model": "paddleocr-vl",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
{"type": "text", "text": "Table Recognition:"}
]
}
],
"temperature": 0.0,
"max_tokens": 8192
}
```
**Task Prompts:**
- `"OCR:"` - Text recognition
- `"Table Recognition:"` - Table extraction (returns markdown)
- `"Formula Recognition:"` - Formula extraction
- `"Chart Recognition:"` - Chart extraction
**Response**
```json
{
"id": "chatcmpl-...",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "| Date | Description | Amount |\n|---|---|---|\n| 2021-06-01 | GITLAB INC | -119.96 |"
},
"finish_reason": "stop"
}
]
}
```
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | Model to load |
| `HOST` | `0.0.0.0` | Server host |
| `PORT` | `8000` | Server port |
| `MAX_BATCHED_TOKENS` | `16384` | vLLM max batch tokens |
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
### Performance
- **GPU (vLLM)**: ~2-5 seconds per page
- **CPU**: ~30-60 seconds per page
---
## Adding New Models
To add a new model variant:
1. Create `Dockerfile_<modelname>`
2. Set `MODEL_NAME` environment variable
3. Update `build-images.sh` with new build target
4. Add documentation to `readme.md`
## Troubleshooting
### Model download hangs
Check container logs:
```bash
docker logs -f <container-name>
```
The model download is ~5GB and may take several minutes.
### Out of memory
- GPU: Use int4 quantized version or add more VRAM
- CPU: Increase container memory limit: `--memory=16g`
### API not responding
1. Check if container is healthy: `docker ps`
2. Check logs for errors: `docker logs <container>`
3. Verify port mapping: `curl localhost:11434/api/tags`
## CI/CD Integration
Build and push using npmci:
```bash
npmci docker login
npmci docker build
npmci docker push code.foss.global
```
## Multi-Pass Extraction Strategy
The bank statement extraction uses a dual-VLM consensus approach:
### Architecture: Dual-VLM Consensus
| VLM | Model | Purpose |
|-----|-------|---------|
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
| **PaddleOCR-VL** | 0.9B params | Table-specialized extraction |
### Extraction Strategy
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
2. **Pass 2**: PaddleOCR-VL table recognition (images → markdown → JSON)
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
4. **Pass 3+**: MiniCPM-V visual if no consensus
### Why Dual-VLM Works
- **Different architectures**: Two independent models cross-check each other
- **Specialized strengths**: PaddleOCR-VL optimized for tables, MiniCPM-V for general vision
- **No structure loss**: Both VLMs see the original images directly
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
### Comparison vs Old PP-Structure Approach
| Approach | Bank Statement Result | Issue |
|----------|----------------------|-------|
| MiniCPM-V Visual | 28 transactions ✓ | - |
| PP-Structure HTML + Visual | 13 transactions ✗ | HTML merged rows incorrectly |
| PaddleOCR-VL Table | 28 transactions ✓ | Native table understanding |
**Key insight**: PP-Structure's HTML output loses structure for complex tables. PaddleOCR-VL's native VLM approach maintains table integrity.
---
## Nanonets-OCR-s
@@ -254,7 +104,7 @@ Nanonets-OCR-s is a Qwen2.5-VL-3B model fine-tuned specifically for document OCR
- Based on Qwen2.5-VL-3B (~4B parameters)
- Fine-tuned for document OCR
- Outputs markdown with semantic HTML tags
- ~8-10GB VRAM (fits comfortably in 16GB)
- ~10GB VRAM
### Docker Images
@@ -305,7 +155,7 @@ Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.
### Performance
- **GPU (vLLM)**: ~3-8 seconds per page
- **VRAM usage**: ~8-10GB
- **VRAM usage**: ~10GB
### Two-Stage Pipeline (Nanonets + Qwen3)
@@ -332,6 +182,76 @@ docker start minicpm-test
---
## Multi-Pass Extraction Strategy
The bank statement extraction uses a dual-VLM consensus approach:
### Architecture: Dual-VLM Consensus
| VLM | Model | Purpose |
|-----|-------|---------|
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
| **Nanonets-OCR-s** | ~4B params | Document OCR with semantic output |
### Extraction Strategy
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
2. **Pass 2**: Nanonets-OCR semantic extraction (images → markdown → JSON)
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
4. **Pass 3+**: MiniCPM-V visual if no consensus
### Why Dual-VLM Works
- **Different architectures**: Two independent models cross-check each other
- **Specialized strengths**: Nanonets-OCR-s optimized for document structure, MiniCPM-V for general vision
- **No structure loss**: Both VLMs see the original images directly
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
---
## Adding New Models
To add a new model variant:
1. Create `Dockerfile_<modelname>_<runtime>_<hardware>_VRAM<size>`
2. Set `MODEL_NAME` environment variable
3. Update `build-images.sh` with new build target
4. Add documentation to `readme.md`
## Troubleshooting
### Model download hangs
Check container logs:
```bash
docker logs -f <container-name>
```
The model download is ~5GB and may take several minutes.
### Out of memory
- GPU: Use a lighter model variant or upgrade VRAM
- Add more GPU memory: Consider multi-GPU setup
### API not responding
1. Check if container is healthy: `docker ps`
2. Check logs for errors: `docker logs <container>`
3. Verify port mapping: `curl localhost:11434/api/tags`
## CI/CD Integration
Build and push using npmci:
```bash
npmci docker login
npmci docker build
npmci docker push code.foss.global
```
---
## Related Resources
- [Ollama Documentation](https://ollama.ai/docs)

268
readme.md
View File

@@ -1,40 +1,52 @@
# @host.today/ht-docker-ai 🚀
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration—**no cloud API keys required**.
> 🔥 **Three VLMs, one registry.** From high-performance document OCR to GPT-4o-level vision understanding—pick the right tool for your task.
## Issue Reporting and Security
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
---
## 🎯 What's Included
| Model | Parameters | Best For | API |
|-------|-----------|----------|-----|
| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
| Model | Parameters | Best For | API | Port | VRAM |
|-------|-----------|----------|-----|------|------|
| **MiniCPM-V 4.5** | 8B | General vision understanding, multi-image analysis | Ollama-compatible | 11434 | ~9GB |
| **Nanonets-OCR2-3B** | ~3B | Document OCR with semantic markdown, LaTeX, flowcharts | OpenAI-compatible | 8000 | ~12-16GB |
| **Qwen3-VL-30B** | 30B (A3B) | Advanced visual agents, code generation from images | Ollama-compatible | 11434 | ~20GB |
## 📦 Available Images
---
## 📦 Quick Reference: All Available Images
```
code.foss.global/host.today/ht-docker-ai:<tag>
```
| Tag | Model | Hardware | Port |
|-----|-------|----------|------|
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
| Tag | Model | Runtime | Port | VRAM |
|-----|-------|---------|------|------|
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | Ollama | 11434 | ~9GB |
| `nanonets-ocr` | Nanonets-OCR2-3B | vLLM | 8000 | ~12-16GB |
| `qwen3vl` | Qwen3-VL-30B-A3B | Ollama | 11434 | ~20GB |
---
## 🖼️ MiniCPM-V 4.5
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across **30+ languages**.
### ✨ Key Features
- 🌍 **Multilingual:** 30+ languages supported
- 🖼️ **Multi-image:** Analyze multiple images in one request
- 📊 **Versatile:** Charts, documents, photos, diagrams
-**Efficient:** Runs on consumer GPUs (9GB VRAM)
### Quick Start
**GPU (Recommended):**
```bash
docker run -d \
--name minicpm \
@@ -44,15 +56,6 @@ docker run -d \
code.foss.global/host.today/ht-docker-ai:minicpm45v
```
**CPU Only:**
```bash
docker run -d \
--name minicpm \
-p 11434:11434 \
-v ollama-data:/root/.ollama \
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
```
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
### API Examples
@@ -85,110 +88,131 @@ curl http://localhost:11434/api/chat -d '{
### Hardware Requirements
| Variant | VRAM/RAM | Notes |
|---------|----------|-------|
| GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
| GPU (full precision) | 18GB VRAM | Maximum quality |
| CPU (GGUF) | 8GB+ RAM | Slower but accessible |
| Mode | VRAM Required |
|------|---------------|
| int4 quantized | ~9GB |
| Full precision (bf16) | ~18GB |
---
## 📄 PaddleOCR-VL
## 🔍 Nanonets-OCR2-3B
A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
The **latest Nanonets document OCR model** (October 2025 release)—based on Qwen2.5-VL-3B, fine-tuned specifically for document extraction with significant improvements over the original OCR-s.
### ✨ Key Features
- 📝 **Semantic output:** Tables → HTML, equations → LaTeX, flowcharts → structured markup
- 🌍 **Multilingual:** Inherits Qwen's broad language support
- 📄 **30K context:** Handle large, multi-page documents
- 🔌 **OpenAI-compatible:** Drop-in replacement for existing pipelines
- 🎯 **Improved accuracy:** Better semantic tagging and LaTeX equation extraction vs. OCR-s
### Quick Start
**GPU:**
```bash
docker run -d \
--name paddleocr \
--name nanonets \
--gpus all \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
code.foss.global/host.today/ht-docker-ai:nanonets-ocr
```
**CPU:**
```bash
docker run -d \
--name paddleocr \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
```
### OpenAI-Compatible API
PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
### API Usage
```bash
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "paddleocr-vl",
"model": "nanonets/Nanonets-OCR2-3B",
"messages": [{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
{"type": "text", "text": "Table Recognition:"}
{"type": "text", "text": "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation."}
]
}],
"max_tokens": 8192
"temperature": 0.0,
"max_tokens": 4096
}'
```
### Task Prompts
### Output Format
| Prompt | Output | Use Case |
|--------|--------|----------|
| `OCR:` | Plain text | General text extraction |
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
| `Chart Recognition:` | Description | Graphs and visualizations |
Nanonets-OCR2-3B returns markdown with semantic tags:
### API Endpoints
| Element | Output Format |
|---------|---------------|
| Tables | `<table>...</table>` (HTML) |
| Equations | `$...$` (LaTeX) |
| Images | `<img>description</img>` |
| Watermarks | `<watermark>OFFICIAL COPY</watermark>` |
| Page numbers | `<page_number>14</page_number>` |
| Flowcharts | Structured markup |
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check with model/device info |
| `/formats` | GET | Supported image formats and input methods |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
| `/ocr` | POST | Legacy OCR endpoint |
### Hardware Requirements
### Image Input Methods
| Config | VRAM |
|--------|------|
| 30K context (default) | ~12-16GB |
| Speed | ~3-8 seconds per page |
PaddleOCR-VL accepts images in multiple formats:
---
```javascript
// Base64 data URL
"data:image/png;base64,iVBORw0KGgo..."
## 🧠 Qwen3-VL-30B-A3B
// HTTP URL
"https://example.com/document.png"
The **most powerful** Qwen vision model—30B parameters with 3B active (MoE architecture). Handles complex visual reasoning, code generation from screenshots, and visual agent capabilities.
// Raw base64
"iVBORw0KGgo..."
### ✨ Key Features
- 🚀 **256K context** (expandable to 1M tokens!)
- 🤖 **Visual agent capabilities** — can plan and execute multi-step tasks
- 💻 **Code generation from images** — screenshot → working code
- 🎯 **State-of-the-art** visual reasoning
### Quick Start
```bash
docker run -d \
--name qwen3vl \
--gpus all \
-p 11434:11434 \
-v ollama-data:/root/.ollama \
code.foss.global/host.today/ht-docker-ai:qwen3vl
```
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
Then pull the model (one-time, ~20GB):
```bash
docker exec qwen3vl ollama pull qwen3-vl:30b-a3b
```
**Optimal resolution:** 1080p2K. Images are automatically scaled for best results.
### API Usage
### Performance
```bash
curl http://localhost:11434/api/chat -d '{
"model": "qwen3-vl:30b-a3b",
"messages": [{
"role": "user",
"content": "Analyze this screenshot and write the code to recreate this UI",
"images": ["<base64-encoded-image>"]
}]
}'
```
| Mode | Speed per Page |
|------|----------------|
| GPU (CUDA) | 25 seconds |
| CPU | 3060 seconds |
### Hardware Requirements
| Requirement | Value |
|-------------|-------|
| VRAM | ~20GB (Q4_K_M quantization) |
| Context | 256K tokens default |
---
## 🐳 Docker Compose
Run multiple VLMs together for maximum flexibility:
```yaml
version: '3.8'
services:
# General vision tasks
minicpm:
@@ -206,9 +230,9 @@ services:
capabilities: [gpu]
restart: unless-stopped
# Document parsing / OCR
paddleocr:
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
# Document OCR with semantic output
nanonets:
image: code.foss.global/host.today/ht-docker-ai:nanonets-ocr
ports:
- "8000:8000"
volumes:
@@ -231,7 +255,7 @@ volumes:
## ⚙️ Environment Variables
### MiniCPM-V 4.5
### MiniCPM-V 4.5 & Qwen3-VL (Ollama-based)
| Variable | Default | Description |
|----------|---------|-------------|
@@ -239,13 +263,47 @@ volumes:
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
### PaddleOCR-VL
### Nanonets-OCR (vLLM-based)
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
| `SERVER_HOST` | `0.0.0.0` | API bind address |
| `SERVER_PORT` | `8000` | API port |
| `MODEL_NAME` | `nanonets/Nanonets-OCR2-3B` | HuggingFace model ID |
| `HOST` | `0.0.0.0` | API bind address |
| `PORT` | `8000` | API port |
| `MAX_MODEL_LEN` | `30000` | Maximum sequence length |
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
---
## 🏗️ Architecture Notes
### Dual-VLM Consensus Strategy
For production document extraction, consider using multiple models together:
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
2. **Pass 2:** Nanonets-OCR semantic extraction (images → markdown → JSON)
3. **Consensus:** If results match → Done (fast path)
4. **Pass 3+:** Additional visual passes if needed
This dual-VLM approach catches extraction errors that single models miss.
### Why Multi-Model Works
- **Different architectures:** Independent models cross-validate each other
- **Specialized strengths:** Nanonets-OCR2-3B excels at document structure; MiniCPM-V handles general vision
- **Native processing:** All VLMs see original images—no intermediate structure loss
### Model Selection Guide
| Task | Recommended Model |
|------|-------------------|
| General image understanding | MiniCPM-V 4.5 |
| Document OCR with structure preservation | Nanonets-OCR2-3B |
| Complex visual reasoning / code generation | Qwen3-VL-30B |
| Multi-image analysis | MiniCPM-V 4.5 |
| Visual agent tasks | Qwen3-VL-30B |
| Large documents (30K+ tokens) | Nanonets-OCR2-3B |
---
@@ -260,42 +318,21 @@ cd ht-docker-ai
./build-images.sh
# Run tests
./test-images.sh
pnpm test
```
---
## 🏗️ Architecture Notes
### Dual-VLM Consensus Strategy
For production document extraction, consider using both models together:
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
3. **Consensus:** If results match → Done (fast path)
4. **Pass 3+:** Additional visual passes if needed
This dual-VLM approach catches extraction errors that single models miss.
### Why This Works
- **Different architectures:** Two independent models cross-validate each other
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
---
## 🔍 Troubleshooting
### Model download hangs
```bash
docker logs -f <container-name>
```
Model downloads can take several minutes (~5GB for MiniCPM-V).
Model downloads can take several minutes (~5GB for MiniCPM-V, ~20GB for Qwen3-VL).
### Out of memory
- **GPU:** Use the CPU variant or upgrade VRAM
- **GPU:** Use a lighter model variant or upgrade VRAM
- **CPU:** Increase container memory: `--memory=16g`
### API not responding
@@ -315,6 +352,13 @@ sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker
```
### GPU Memory Contention (Multi-Model)
When running multiple VLMs on a single GPU:
- vLLM and Ollama both need significant GPU memory
- **Single GPU:** Run services sequentially (stop one before starting another)
- **Multi-GPU:** Assign each service to a different GPU via `CUDA_VISIBLE_DEVICES`
---
## License and Legal Information

View File

@@ -32,10 +32,10 @@ export const IMAGES = {
healthTimeout: 120000,
} as IImageConfig,
// Nanonets-OCR-s - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned)
// Nanonets-OCR2-3B - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned, Oct 2025)
nanonetsOcr: {
name: 'nanonets-ocr',
dockerfile: 'Dockerfile_nanonets_ocr',
dockerfile: 'Dockerfile_nanonets_vllm_gpu_VRAM10GB',
buildContext: '.',
containerName: 'nanonets-test',
ports: ['8000:8000'],
@@ -340,12 +340,12 @@ export async function ensureQwen3Vl(): Promise<boolean> {
}
/**
* Ensure Nanonets-OCR-s service is running (via vLLM)
* Document OCR optimized VLM based on Qwen2.5-VL-3B
* Ensure Nanonets-OCR2-3B service is running (via vLLM)
* Document OCR optimized VLM based on Qwen2.5-VL-3B (Oct 2025 release)
*/
export async function ensureNanonetsOcr(): Promise<boolean> {
if (!isGpuAvailable()) {
console.log('[Docker] WARNING: Nanonets-OCR-s requires GPU, but none detected');
console.log('[Docker] WARNING: Nanonets-OCR2-3B requires GPU, but none detected');
}
return ensureService(IMAGES.nanonetsOcr);
}

View File

@@ -1,9 +1,10 @@
/**
* Bank statement extraction using MiniCPM-V (visual extraction)
* Bank statement extraction using MiniCPM-V via smartagent DualAgentOrchestrator
*
* JSON per-page approach:
* 1. Ask for structured JSON of all transactions per page
* 2. Consensus: extract twice, compare, retry if mismatch
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
* 1. Process each page with the orchestrator
* 2. Driver extracts transactions and validates JSON before completing
* 3. Streaming output during extraction
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
@@ -11,6 +12,8 @@ import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
import { SmartAi } from '@push.rocks/smartai';
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
@@ -21,21 +24,9 @@ interface ITransaction {
amount: number;
}
const JSON_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
IMPORTANT RULES:
1. Each transaction has: date, description/counterparty, and an amount
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
4. Date format: YYYY-MM-DD
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
6. Only include actual transactions with a specific date and amount
Return ONLY this JSON format, no explanation:
[
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
]`;
// SmartAi instance and orchestrator (initialized in setup)
let smartAi: SmartAi;
let orchestrator: DualAgentOrchestrator;
/**
* Convert PDF to PNG images using ImageMagick
@@ -65,206 +56,31 @@ function convertPdfToImages(pdfPath: string): string[] {
}
}
/**
* Query for JSON extraction
*/
async function queryJson(image: string, queryId: string): Promise<string> {
console.log(` [${queryId}] Sending request to ${MODEL}...`);
const startTime = Date.now();
const EXTRACTION_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: JSON_PROMPT,
images: [image],
}],
stream: false,
options: {
num_predict: 4000,
temperature: 0.1,
},
}),
});
IMPORTANT RULES:
1. Each transaction has: date, counterparty (description), and an amount
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
4. Date format: YYYY-MM-DD
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
6. Only include actual transactions with a specific date and amount
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
Before completing, validate your JSON output:
if (!response.ok) {
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
}
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
</tool_call>
const data = await response.json();
const content = (data.message?.content || '').trim();
console.log(` [${queryId}] Response received (${elapsed}s, ${content.length} chars)`);
return content;
}
Output format (must be a valid JSON array):
[
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
]
/**
* Sanitize JSON string - fix common issues from vision model output
*/
function sanitizeJson(jsonStr: string): string {
let s = jsonStr;
// Fix +number (e.g., +93.80 -> 93.80) - JSON doesn't allow + prefix
// Handle various whitespace patterns
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
s = s.replace(/:\s*\+(\d)/g, ': $1');
// Fix European number format with thousands separator (e.g., 1.000.00 -> 1000.00)
// Pattern: "amount": X.XXX.XX where X.XXX is thousands and .XX is decimal
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
// Also handle larger numbers like 10.000.00
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3$4.$5');
// Fix trailing commas before ] or }
s = s.replace(/,\s*([}\]])/g, '$1');
// Fix unescaped newlines inside strings (replace with space)
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
// Fix unescaped tabs inside strings
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
// Fix unescaped backslashes (but not already escaped ones)
s = s.replace(/\\(?!["\\/bfnrtu])/g, '\\\\');
// Fix common issues with counterparty names containing special chars
s = s.replace(/"counterparty":\s*"([^"]*)'([^"]*)"/g, '"counterparty": "$1$2"');
// Remove control characters except newlines (which we handle above)
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
return s;
}
/**
* Parse JSON response into transactions
*/
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
console.log(` [${queryId}] Parsing response...`);
// Try to find JSON in markdown code block
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
if (codeBlockMatch) {
console.log(` [${queryId}] Found JSON in code block`);
}
// Sanitize JSON (fix +number issue)
jsonStr = sanitizeJson(jsonStr);
try {
const parsed = JSON.parse(jsonStr);
if (Array.isArray(parsed)) {
const txs = parsed.map(tx => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
console.log(` [${queryId}] Parsed ${txs.length} transactions (direct)`);
return txs;
}
console.log(` [${queryId}] Parsed JSON is not an array`);
} catch (e) {
const errMsg = (e as Error).message;
console.log(` [${queryId}] Direct parse failed: ${errMsg}`);
// Log problematic section with context
const posMatch = errMsg.match(/position (\d+)/);
if (posMatch) {
const pos = parseInt(posMatch[1]);
const start = Math.max(0, pos - 40);
const end = Math.min(jsonStr.length, pos + 40);
const context = jsonStr.substring(start, end);
const marker = ' '.repeat(pos - start) + '^';
console.log(` [${queryId}] Context around error position ${pos}:`);
console.log(` [${queryId}] ...${context}...`);
console.log(` [${queryId}] ${marker}`);
}
// Try to find JSON array pattern
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
if (arrayMatch) {
console.log(` [${queryId}] Found array pattern, trying to parse...`);
const sanitizedArray = sanitizeJson(arrayMatch[0]);
try {
const parsed = JSON.parse(sanitizedArray);
if (Array.isArray(parsed)) {
const txs = parsed.map(tx => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
return txs;
}
} catch (e2) {
const errMsg2 = (e2 as Error).message;
console.log(` [${queryId}] Array parse failed: ${errMsg2}`);
const posMatch2 = errMsg2.match(/position (\d+)/);
if (posMatch2) {
const pos2 = parseInt(posMatch2[1]);
console.log(` [${queryId}] Context around error: ...${sanitizedArray.substring(Math.max(0, pos2 - 30), pos2 + 30)}...`);
}
// Try to extract individual objects from the malformed array
console.log(` [${queryId}] Attempting object-by-object extraction...`);
const extracted = extractTransactionsFromMalformedJson(sanitizedArray, queryId);
if (extracted.length > 0) {
console.log(` [${queryId}] Recovered ${extracted.length} transactions via object extraction`);
return extracted;
}
}
} else {
console.log(` [${queryId}] No array pattern found in response`);
console.log(` [${queryId}] Raw response preview: ${response.substring(0, 200)}...`);
}
}
console.log(` [${queryId}] PARSE FAILED - returning empty array`);
return [];
}
/**
* Extract transactions from malformed JSON by parsing objects individually
*/
function extractTransactionsFromMalformedJson(jsonStr: string, queryId: string): ITransaction[] {
const transactions: ITransaction[] = [];
// Match individual transaction objects
const objectPattern = /\{\s*"date"\s*:\s*"([^"]+)"\s*,\s*"counterparty"\s*:\s*"([^"]+)"\s*,\s*"amount"\s*:\s*([+-]?\d+\.?\d*)\s*\}/g;
let match;
while ((match = objectPattern.exec(jsonStr)) !== null) {
transactions.push({
date: match[1],
counterparty: match[2],
amount: parseFloat(match[3]),
});
}
// Also try with different field orders (amount before counterparty, etc.)
if (transactions.length === 0) {
const altPattern = /\{\s*"date"\s*:\s*"([^"]+)"[^}]*"amount"\s*:\s*([+-]?\d+\.?\d*)[^}]*\}/g;
while ((match = altPattern.exec(jsonStr)) !== null) {
// Try to extract counterparty from the match
const counterpartyMatch = match[0].match(/"counterparty"\s*:\s*"([^"]+)"/);
const descMatch = match[0].match(/"description"\s*:\s*"([^"]+)"/);
transactions.push({
date: match[1],
counterparty: counterpartyMatch?.[1] || descMatch?.[1] || 'UNKNOWN',
amount: parseFloat(match[2]),
});
}
}
return transactions;
}
Only complete after validation passes. Output the final JSON array in <task_complete> tags.`;
/**
* Parse amount from various formats
@@ -284,102 +100,101 @@ function parseAmount(value: unknown): number {
}
/**
* Compare two transaction arrays for consensus
* Extract JSON from response (handles markdown code blocks and task_complete tags)
*/
function transactionArraysMatch(a: ITransaction[], b: ITransaction[]): boolean {
if (a.length !== b.length) return false;
for (let i = 0; i < a.length; i++) {
const dateMatch = a[i].date === b[i].date;
const amountMatch = Math.abs(a[i].amount - b[i].amount) < 0.01;
if (!dateMatch || !amountMatch) return false;
}
return true;
}
/**
* Compare two transaction arrays and log differences
*/
function compareAndLogDifferences(txs1: ITransaction[], txs2: ITransaction[], pageNum: number): void {
if (txs1.length !== txs2.length) {
console.log(` [Page ${pageNum}] Length mismatch: Q1=${txs1.length}, Q2=${txs2.length}`);
return;
}
for (let i = 0; i < txs1.length; i++) {
const dateMatch = txs1[i].date === txs2[i].date;
const amountMatch = Math.abs(txs1[i].amount - txs2[i].amount) < 0.01;
if (!dateMatch || !amountMatch) {
console.log(` [Page ${pageNum}] Tx ${i + 1} differs:`);
console.log(` Q1: ${txs1[i].date} | ${txs1[i].amount}`);
console.log(` Q2: ${txs2[i].date} | ${txs2[i].amount}`);
function extractJsonFromResponse(response: string): unknown[] | null {
// Try to find JSON in task_complete tags
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
if (completeMatch) {
const content = completeMatch[1].trim();
// Try to find JSON in the content
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
try {
const parsed = JSON.parse(jsonStr);
if (Array.isArray(parsed)) return parsed;
} catch {
// Try to find JSON array pattern
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
if (jsonMatch) {
try {
const parsed = JSON.parse(jsonMatch[0]);
if (Array.isArray(parsed)) return parsed;
} catch {
return null;
}
}
}
}
// Try to find JSON in markdown code block
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
try {
const parsed = JSON.parse(jsonStr);
if (Array.isArray(parsed)) return parsed;
} catch {
// Try to find JSON array pattern
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
if (jsonMatch) {
try {
const parsed = JSON.parse(jsonMatch[0]);
if (Array.isArray(parsed)) return parsed;
} catch {
return null;
}
}
}
return null;
}
/**
* Extract transactions from a single page with consensus
* Parse JSON response into transactions
*/
function parseJsonToTransactions(response: string): ITransaction[] {
const parsed = extractJsonFromResponse(response);
if (!parsed || !Array.isArray(parsed)) return [];
return parsed.map((tx: any) => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
}
/**
* Extract transactions from a single page using smartagent orchestrator
*/
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
const MAX_ATTEMPTS = 5;
console.log(`\n ======== Page ${pageNum} ========`);
console.log(` [Page ${pageNum}] Starting JSON extraction...`);
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
console.log(`\n [Page ${pageNum}] --- Attempt ${attempt}/${MAX_ATTEMPTS} ---`);
const startTime = Date.now();
// Extract twice in parallel
const q1Id = `P${pageNum}A${attempt}Q1`;
const q2Id = `P${pageNum}A${attempt}Q2`;
const result = await orchestrator.run(EXTRACTION_PROMPT, { images: [image] });
const [response1, response2] = await Promise.all([
queryJson(image, q1Id),
queryJson(image, q2Id),
]);
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(`\n [Page ${pageNum}] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
const txs1 = parseJsonResponse(response1, q1Id);
const txs2 = parseJsonResponse(response2, q2Id);
const transactions = parseJsonToTransactions(result.result);
console.log(` [Page ${pageNum}] Results: Q1=${txs1.length} txs, Q2=${txs2.length} txs`);
if (txs1.length > 0 && transactionArraysMatch(txs1, txs2)) {
console.log(` [Page ${pageNum}] ✓ CONSENSUS REACHED: ${txs1.length} transactions`);
console.log(` [Page ${pageNum}] Transactions:`);
for (let i = 0; i < txs1.length; i++) {
const tx = txs1[i];
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
}
return txs1;
}
console.log(` [Page ${pageNum}] ✗ NO CONSENSUS`);
compareAndLogDifferences(txs1, txs2, pageNum);
if (attempt < MAX_ATTEMPTS) {
console.log(` [Page ${pageNum}] Retrying...`);
}
}
// Fallback: use last response
console.log(`\n [Page ${pageNum}] === FALLBACK (no consensus after ${MAX_ATTEMPTS} attempts) ===`);
const fallbackId = `P${pageNum}FALLBACK`;
const fallbackResponse = await queryJson(image, fallbackId);
const fallback = parseJsonResponse(fallbackResponse, fallbackId);
console.log(` [Page ${pageNum}] ~ FALLBACK RESULT: ${fallback.length} transactions`);
for (let i = 0; i < fallback.length; i++) {
const tx = fallback[i];
console.log(` [Page ${pageNum}] Extracted ${transactions.length} transactions:`);
for (let i = 0; i < Math.min(transactions.length, 10); i++) {
const tx = transactions[i];
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
}
return fallback;
if (transactions.length > 10) {
console.log(` ... and ${transactions.length - 10} more transactions`);
}
return transactions;
}
/**
* Extract all transactions from bank statement
*/
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (JSON consensus)`);
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
const allTransactions: ITransaction[] = [];
@@ -474,6 +289,80 @@ tap.test('setup: ensure Docker containers are running', async () => {
console.log('\n[Setup] All containers ready!\n');
});
tap.test('setup: initialize smartagent orchestrator', async () => {
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
smartAi = new SmartAi({
ollama: {
baseUrl: OLLAMA_URL,
model: MODEL,
defaultOptions: {
num_ctx: 32768,
num_predict: 4000,
temperature: 0.1,
},
defaultTimeout: 300000, // 5 minutes for vision tasks
},
});
await smartAi.start();
orchestrator = new DualAgentOrchestrator({
smartAiInstance: smartAi,
defaultProvider: 'ollama',
guardianPolicyPrompt: `You are a Guardian agent overseeing bank statement extraction tasks.
APPROVE all tool calls that:
- Use the json.validate action to verify JSON output
- Are reasonable attempts to complete the extraction task
REJECT tool calls that:
- Attempt to access external resources
- Try to execute arbitrary code
- Are clearly unrelated to bank statement extraction`,
driverSystemMessage: `You are an AI assistant that extracts bank transactions from statement images.
Your task is to analyze bank statement images and extract transaction data.
You have access to a json.validate tool to verify your JSON output.
IMPORTANT: Always validate your JSON before completing the task.
## Tool Usage Format
When you need to validate JSON, output:
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
</tool_call>
## Completion Format
After validation passes, complete the task:
<task_complete>
[{"date": "YYYY-MM-DD", "counterparty": "...", "amount": -123.45}, ...]
</task_complete>`,
maxIterations: 5,
maxConsecutiveRejections: 3,
onToken: (token, source) => {
if (source === 'driver') {
process.stdout.write(token);
}
},
onProgress: (event) => {
if (event.logLevel === 'error') {
console.error(event.logMessage);
}
},
});
// Register the JsonValidatorTool
orchestrator.registerTool(new JsonValidatorTool());
await orchestrator.start();
console.log('[Setup] Orchestrator initialized!\n');
});
tap.test('should have MiniCPM-V model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
@@ -482,7 +371,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V)\n`);
console.log(`\nFound ${testCases.length} bank statement test cases (smartagent + MiniCPM-V)\n`);
let passedCount = 0;
let failedCount = 0;
@@ -514,7 +403,10 @@ for (const testCase of testCases) {
// Log counterparty variations (names that differ but date/amount matched)
if (result.variations.length > 0) {
console.log(` Counterparty variations (${result.variations.length}):`);
result.variations.forEach((v) => console.log(` ${v}`));
result.variations.slice(0, 5).forEach((v) => console.log(` ${v}`));
if (result.variations.length > 5) {
console.log(` ... and ${result.variations.length - 5} more variations`);
}
}
expect(result.matches).toEqual(result.total);
@@ -522,12 +414,20 @@ for (const testCase of testCases) {
});
}
tap.test('cleanup: stop orchestrator', async () => {
if (orchestrator) {
await orchestrator.stop();
}
console.log('[Cleanup] Orchestrator stopped');
});
tap.test('summary', async () => {
const total = testCases.length;
console.log(`\n======================================================`);
console.log(` Bank Statement Summary (${MODEL})`);
console.log(` Bank Statement Summary`);
console.log(` (smartagent + ${MODEL})`);
console.log(`======================================================`);
console.log(` Method: JSON per-page + consensus`);
console.log(` Method: DualAgentOrchestrator with vision`);
console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`);
console.log(`======================================================\n`);

View File

@@ -1,7 +1,7 @@
/**
* Bank statement extraction using Nanonets-OCR-s + GPT-OSS 20B (sequential two-stage pipeline)
* Bank statement extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
*
* Stage 1: Nanonets-OCR-s converts ALL document pages to markdown (stop after completion)
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
*
* This approach avoids GPU contention by running services sequentially.
@@ -11,10 +11,12 @@ import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureNanonetsOcr, ensureMiniCpm, removeContainer, isContainerRunning } from './helpers/docker.js';
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
import { SmartAi } from '@push.rocks/smartai';
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
const NANONETS_URL = 'http://localhost:8000/v1';
const NANONETS_MODEL = 'nanonets/Nanonets-OCR-s';
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
const OLLAMA_URL = 'http://localhost:11434';
const EXTRACTION_MODEL = 'gpt-oss:20b';
@@ -22,18 +24,41 @@ const EXTRACTION_MODEL = 'gpt-oss:20b';
// Temp directory for storing markdown between stages
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-markdown');
// SmartAi instance for Ollama with optimized settings
const smartAi = new SmartAi({
ollama: {
baseUrl: OLLAMA_URL,
model: EXTRACTION_MODEL,
defaultOptions: {
num_ctx: 32768, // Larger context for long statements + thinking
temperature: 0, // Deterministic for JSON extraction
},
defaultTimeout: 600000, // 10 minute timeout for large documents
},
});
// DualAgentOrchestrator for structured task execution
let orchestrator: DualAgentOrchestrator;
interface ITransaction {
date: string;
counterparty: string;
amount: number;
}
interface IImageData {
base64: string;
width: number;
height: number;
pageNum: number;
}
interface ITestCase {
name: string;
pdfPath: string;
jsonPath: string;
markdownPath?: string;
images?: string[];
images?: IImageData[];
}
// Nanonets-specific prompt for document OCR to markdown
@@ -44,18 +69,47 @@ If there is an image in the document and image caption is not present, add a sma
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
// JSON extraction prompt for GPT-OSS 20B
const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from this bank statement as JSON array. Each transaction: {"date": "YYYY-MM-DD", "counterparty": "NAME", "amount": -25.99}. Amount negative for debits, positive for credits. Only include actual transactions, not balances. Return ONLY JSON array, no explanation.
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the statement text is provided)
const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from the bank statement. Return ONLY valid JSON array.
STATEMENT:
`;
WHERE TO FIND DATA:
- Transactions are typically in TABLES with columns: Date, Description/Counterparty, Debit, Credit, Balance
- Look for rows with actual money movements, NOT header rows or summary totals
RULES:
1. date: Convert to YYYY-MM-DD format
2. counterparty: The name/description of who the money went to/from
3. amount: NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
4. Only include actual transactions, NOT opening/closing balances
JSON array only:
[{"date":"YYYY-MM-DD","counterparty":"NAME","amount":-25.99}]`;
// Constants for smart batching
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
/**
* Convert PDF to PNG images using ImageMagick
* Estimate visual tokens for an image based on dimensions
*/
function convertPdfToImages(pdfPath: string): string[] {
function estimateVisualTokens(width: number, height: number): number {
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
}
/**
* Process images one page at a time for reliability
*/
function batchImages(images: IImageData[]): IImageData[][] {
// One page per batch for reliable processing
return images.map(img => [img]);
}
/**
* Convert PDF to JPEG images using ImageMagick with dimension tracking
*/
function convertPdfToImages(pdfPath: string): IImageData[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
const outputPattern = path.join(tempDir, 'page-%d.jpg');
try {
execSync(
@@ -63,13 +117,24 @@ function convertPdfToImages(pdfPath: string): string[] {
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
const images: string[] = [];
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
const images: IImageData[] = [];
for (const file of files) {
for (let i = 0; i < files.length; i++) {
const file = files[i];
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
// Get image dimensions using identify command
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
const [width, height] = dimensions.split(' ').map(Number);
images.push({
base64: imageData.toString('base64'),
width,
height,
pageNum: i + 1,
});
}
return images;
@@ -79,10 +144,28 @@ function convertPdfToImages(pdfPath: string): string[] {
}
/**
* Convert a single page to markdown using Nanonets-OCR-s
* Convert a batch of pages to markdown using Nanonets-OCR-s
*/
async function convertPageToMarkdown(image: string, pageNum: number): Promise<string> {
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
const startTime = Date.now();
const pageNums = batch.map(img => img.pageNum).join(', ');
// Build content array with all images first, then the prompt
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
for (const img of batch) {
content.push({
type: 'image_url',
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
});
}
// Add prompt with page separator instruction if multiple pages
const promptText = batch.length > 1
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
: NANONETS_OCR_PROMPT;
content.push({ type: 'text', text: promptText });
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
method: 'POST',
@@ -94,14 +177,12 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
model: NANONETS_MODEL,
messages: [{
role: 'user',
content: [
{ type: 'image_url', image_url: { url: `data:image/png;base64,${image}` }},
{ type: 'text', text: NANONETS_OCR_PROMPT },
],
content,
}],
max_tokens: 4096,
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
temperature: 0.0,
}),
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
});
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
@@ -112,25 +193,35 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
}
const data = await response.json();
const content = (data.choices?.[0]?.message?.content || '').trim();
console.log(` Page ${pageNum}: ${content.length} chars (${elapsed}s)`);
return content;
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
// For single-page batches, add page marker if not present
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
}
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
return responseContent;
}
/**
* Convert all pages of a document to markdown
* Convert all pages of a document to markdown using smart batching
*/
async function convertDocumentToMarkdown(images: string[], docName: string): Promise<string> {
console.log(` [${docName}] Converting ${images.length} page(s)...`);
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
const batches = batchImages(images);
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
const markdownPages: string[] = [];
const markdownParts: string[] = [];
for (let i = 0; i < images.length; i++) {
const markdown = await convertPageToMarkdown(images[i], i + 1);
markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`);
for (let i = 0; i < batches.length; i++) {
const batch = batches[i];
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
const markdown = await convertBatchToMarkdown(batch);
markdownParts.push(markdown);
}
const fullMarkdown = markdownPages.join('\n\n');
const fullMarkdown = markdownParts.join('\n\n');
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
return fullMarkdown;
}
@@ -161,25 +252,6 @@ async function ensureExtractionModel(): Promise<boolean> {
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
// Warmup: send a simple request to ensure model is loaded
console.log(` [Ollama] Warming up model...`);
const warmupResponse = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [{ role: 'user', content: 'Return: [{"test": 1}]' }],
stream: false,
}),
signal: AbortSignal.timeout(120000),
});
if (warmupResponse.ok) {
const warmupData = await warmupResponse.json();
console.log(` [Ollama] Warmup complete (${warmupData.message?.content?.length || 0} chars)`);
}
return true;
}
}
@@ -198,67 +270,107 @@ async function ensureExtractionModel(): Promise<boolean> {
}
/**
* Extract transactions from markdown using GPT-OSS 20B (streaming)
* Try to extract valid JSON from a response string
*/
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
console.log(` [${queryId}] Sending to ${EXTRACTION_MODEL}...`);
console.log(` [${queryId}] Markdown length: ${markdown.length}`);
const startTime = Date.now();
function tryExtractJson(response: string): unknown[] | null {
// Remove thinking tags
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
console.log(` [${queryId}] Prompt preview: ${fullPrompt.substring(0, 200)}...`);
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [{
role: 'user',
content: fullPrompt,
}],
stream: true,
}),
signal: AbortSignal.timeout(600000), // 10 minute timeout
});
if (!response.ok) {
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
// Try task_complete tags first
const completeMatch = clean.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
if (completeMatch) {
clean = completeMatch[1].trim();
}
// Stream the response and log to console
let content = '';
const reader = response.body!.getReader();
const decoder = new TextDecoder();
// Try code block
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
process.stdout.write(` [${queryId}] `);
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
// Each line is a JSON object
for (const line of chunk.split('\n').filter(l => l.trim())) {
try {
const parsed = JSON.parse(jsonStr);
if (Array.isArray(parsed)) return parsed;
} catch {
// Try to find JSON array
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
if (jsonMatch) {
try {
const json = JSON.parse(line);
const token = json.message?.content || '';
if (token) {
process.stdout.write(token);
content += token;
}
const parsed = JSON.parse(sanitizeJson(jsonMatch[0]));
if (Array.isArray(parsed)) return parsed;
} catch {
// Ignore parse errors for partial chunks
return null;
}
}
return null;
}
return null;
}
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(`\n [${queryId}] Done: ${content.length} chars (${elapsed}s)`);
/**
* Extract transactions from markdown using smartagent DualAgentOrchestrator
* Validates JSON and retries if invalid
*/
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
const startTime = Date.now();
return parseJsonResponse(content, queryId);
console.log(` [${queryId}] Statement: ${markdown.length} chars`);
// Build the extraction task with document context
const taskPrompt = `Extract all transactions from this bank statement document and output ONLY the JSON array:
${markdown}
${JSON_EXTRACTION_PROMPT}
Before completing, validate your JSON using the json.validate tool:
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
</tool_call>
Only complete after validation passes. Output the final JSON array in <task_complete></task_complete> tags.`;
try {
const result = await orchestrator.run(taskPrompt);
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
// Try to parse JSON from result
let jsonData: unknown[] | null = null;
let responseText = result.result || '';
if (result.success && responseText) {
jsonData = tryExtractJson(responseText);
}
// Fallback: try parsing from history
if (!jsonData && result.history?.length > 0) {
const lastMessage = result.history[result.history.length - 1];
if (lastMessage?.content) {
responseText = lastMessage.content;
jsonData = tryExtractJson(responseText);
}
}
if (!jsonData) {
console.log(` [${queryId}] Failed to parse JSON`);
return [];
}
// Convert to transactions
const txs = jsonData.map((tx: any) => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
return txs;
} catch (error) {
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
throw error;
}
}
/**
@@ -509,6 +621,53 @@ tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
const extractionOk = await ensureExtractionModel();
expect(extractionOk).toBeTrue();
// Initialize SmartAi and DualAgentOrchestrator
console.log(' [SmartAgent] Starting SmartAi...');
await smartAi.start();
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
orchestrator = new DualAgentOrchestrator({
smartAiInstance: smartAi,
defaultProvider: 'ollama',
guardianPolicyPrompt: `
JSON EXTRACTION POLICY:
- APPROVE all JSON extraction tasks
- APPROVE all json.validate tool calls
- This is a read-only operation - no file system or network access needed
- The task is to extract structured transaction data from document text
`,
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract transaction data from bank statements.
CRITICAL RULES:
1. Output valid JSON array with the exact format requested
2. Amounts should be NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
</tool_call>
4. Only complete after validation passes
When done, wrap your JSON array in <task_complete></task_complete> tags.`,
maxIterations: 5,
// Enable streaming for real-time progress visibility
onToken: (token, source) => {
if (source === 'driver') {
process.stdout.write(token);
}
},
});
// Register JsonValidatorTool for self-validation
orchestrator.registerTool(new JsonValidatorTool());
console.log(' [SmartAgent] Starting orchestrator...');
await orchestrator.start();
console.log(' [SmartAgent] Ready for extraction');
});
let passedCount = 0;
@@ -560,11 +719,19 @@ for (const tc of testCases) {
}
tap.test('Summary', async () => {
// Cleanup orchestrator and SmartAi
if (orchestrator) {
console.log('\n [SmartAgent] Stopping orchestrator...');
await orchestrator.stop();
}
console.log(' [SmartAgent] Stopping SmartAi...');
await smartAi.stop();
console.log(`\n======================================================`);
console.log(` Bank Statement Summary (Nanonets + GPT-OSS 20B Sequential)`);
console.log(` Bank Statement Summary (Nanonets + SmartAgent)`);
console.log(`======================================================`);
console.log(` Stage 1: Nanonets-OCR-s (document -> markdown)`);
console.log(` Stage 2: GPT-OSS 20B (markdown -> JSON)`);
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (markdown -> JSON)`);
console.log(` Passed: ${passedCount}/${testCases.length}`);
console.log(` Failed: ${failedCount}/${testCases.length}`);
console.log(`======================================================\n`);

View File

@@ -0,0 +1,440 @@
/**
* Invoice extraction tuning - uses pre-generated markdown files
*
* Skips OCR stage, only runs GPT-OSS extraction on existing .debug.md files.
* Use this to quickly iterate on extraction prompts and logic.
*
* Run with: tstest test/test.invoices.extraction.ts --verbose
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const EXTRACTION_MODEL = 'gpt-oss:20b';
// Test these specific invoices (must have .debug.md files)
const TEST_INVOICES = [
'consensus_2021-09',
'hetzner_2022-04',
'qonto_2021-08',
'qonto_2021-09',
];
interface IInvoice {
invoice_number: string;
invoice_date: string;
vendor_name: string;
currency: string;
net_amount: number;
vat_amount: number;
total_amount: number;
}
interface ITestCase {
name: string;
markdownPath: string;
jsonPath: string;
}
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
WHERE TO FIND DATA:
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer")
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
RULES:
1. invoice_number: Extract ONLY the value (e.g., "R0015632540"), NOT the label "Invoice no."
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
3. vendor_name: The company issuing the invoice
4. currency: EUR, USD, or GBP
5. net_amount: Total before tax
6. vat_amount: Tax amount
7. total_amount: Final total with tax
JSON only:
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`;
/**
* Ensure GPT-OSS 20B model is available
*/
async function ensureExtractionModel(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
return true;
}
}
} catch {
return false;
}
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
});
return pullResponse.ok;
}
/**
* Parse amount from string (handles European format)
*/
function parseAmount(s: string | number | undefined): number {
if (s === undefined || s === null) return 0;
if (typeof s === 'number') return s;
const match = s.match(/([\d.,]+)/);
if (!match) return 0;
const numStr = match[1];
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
? numStr.replace(/\./g, '').replace(',', '.')
: numStr.replace(/,/g, '');
return parseFloat(normalized) || 0;
}
/**
* Extract invoice number - minimal normalization
*/
function extractInvoiceNumber(s: string | undefined): string {
if (!s) return '';
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
}
/**
* Extract date (YYYY-MM-DD) from response
*/
function extractDate(s: string | undefined): string {
if (!s) return '';
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
if (isoMatch) return isoMatch[1];
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
if (dmyMatch) {
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
}
return clean.replace(/[^\d-]/g, '').trim();
}
/**
* Extract currency
*/
function extractCurrency(s: string | undefined): string {
if (!s) return 'EUR';
const upper = s.toUpperCase();
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
if (upper.includes('USD') || upper.includes('$')) return 'USD';
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
return 'EUR';
}
/**
* Extract JSON from response
*/
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
try {
return JSON.parse(jsonStr);
} catch {
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch {
return null;
}
}
return null;
}
}
/**
* Parse JSON response into IInvoice
*/
function parseJsonToInvoice(response: string): IInvoice | null {
const parsed = extractJsonFromResponse(response);
if (!parsed) return null;
return {
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
invoice_date: extractDate(String(parsed.invoice_date || '')),
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
currency: extractCurrency(String(parsed.currency || '')),
net_amount: parseAmount(parsed.net_amount as string | number),
vat_amount: parseAmount(parsed.vat_amount as string | number),
total_amount: parseAmount(parsed.total_amount as string | number),
};
}
/**
* Extract invoice from markdown using GPT-OSS 20B (streaming)
*/
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
const startTime = Date.now();
console.log(` [${queryId}] Invoice: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [
{ role: 'user', content: 'Hi there, how are you?' },
{ role: 'assistant', content: 'Good, how can I help you today?' },
{ role: 'user', content: `Here is an invoice document:\n\n${markdown}` },
{ role: 'assistant', content: 'I have read the invoice document you provided. I can see all the text content. What would you like me to do with it?' },
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
],
stream: true,
options: {
num_ctx: 32768, // Larger context for long invoices + thinking
temperature: 0, // Deterministic for JSON extraction
},
}),
signal: AbortSignal.timeout(120000), // 2 min timeout
});
if (!response.ok) {
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
}
// Stream the response
let content = '';
let thinkingContent = '';
let thinkingStarted = false;
let outputStarted = false;
const reader = response.body!.getReader();
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
for (const line of chunk.split('\n').filter(l => l.trim())) {
try {
const json = JSON.parse(line);
const thinking = json.message?.thinking || '';
if (thinking) {
if (!thinkingStarted) {
process.stdout.write(` [${queryId}] THINKING: `);
thinkingStarted = true;
}
process.stdout.write(thinking);
thinkingContent += thinking;
}
const token = json.message?.content || '';
if (token) {
if (!outputStarted) {
if (thinkingStarted) process.stdout.write('\n');
process.stdout.write(` [${queryId}] OUTPUT: `);
outputStarted = true;
}
process.stdout.write(token);
content += token;
}
} catch {
// Ignore parse errors for partial chunks
}
}
}
} finally {
if (thinkingStarted || outputStarted) process.stdout.write('\n');
}
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking, ${content.length} output (${elapsed}s)`);
return parseJsonToInvoice(content);
}
/**
* Normalize date to YYYY-MM-DD
*/
function normalizeDate(dateStr: string | null): string {
if (!dateStr) return '';
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
const monthMap: Record<string, string> = {
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
};
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
if (match) {
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
}
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
if (match) {
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
}
return dateStr;
}
/**
* Normalize invoice number for comparison (remove spaces, lowercase)
*/
function normalizeInvoiceNumber(s: string): string {
return s.replace(/\s+/g, '').toLowerCase();
}
/**
* Compare extracted invoice against expected
*/
function compareInvoice(
extracted: IInvoice,
expected: IInvoice
): { match: boolean; errors: string[] } {
const errors: string[] = [];
// Invoice number - normalize spaces for comparison
const extNum = normalizeInvoiceNumber(extracted.invoice_number || '');
const expNum = normalizeInvoiceNumber(expected.invoice_number || '');
if (extNum !== expNum) {
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
}
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
}
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
}
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
}
return { match: errors.length === 0, errors };
}
/**
* Find test cases with existing debug markdown
*/
function findTestCases(): ITestCase[] {
const invoicesDir = path.join(process.cwd(), '.nogit/invoices');
if (!fs.existsSync(invoicesDir)) return [];
const testCases: ITestCase[] = [];
for (const invoiceName of TEST_INVOICES) {
const markdownPath = path.join(invoicesDir, `${invoiceName}.debug.md`);
const jsonPath = path.join(invoicesDir, `${invoiceName}.json`);
if (fs.existsSync(markdownPath) && fs.existsSync(jsonPath)) {
testCases.push({
name: invoiceName,
markdownPath,
jsonPath,
});
} else {
if (!fs.existsSync(markdownPath)) {
console.warn(`Warning: Missing markdown: ${markdownPath}`);
}
if (!fs.existsSync(jsonPath)) {
console.warn(`Warning: Missing JSON: ${jsonPath}`);
}
}
}
return testCases;
}
// ============ TESTS ============
const testCases = findTestCases();
console.log(`\n========================================`);
console.log(` EXTRACTION TUNING TEST`);
console.log(` (Skips OCR, uses existing .debug.md)`);
console.log(`========================================`);
console.log(` Testing ${testCases.length} invoices:`);
for (const tc of testCases) {
console.log(` - ${tc.name}`);
}
console.log(`========================================\n`);
tap.test('Setup Ollama + GPT-OSS 20B', async () => {
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
const extractionOk = await ensureExtractionModel();
expect(extractionOk).toBeTrue();
});
let passedCount = 0;
let failedCount = 0;
for (const tc of testCases) {
tap.test(`Extract ${tc.name}`, async () => {
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
const markdown = fs.readFileSync(tc.markdownPath, 'utf-8');
console.log(`\n ========================================`);
console.log(` === ${tc.name} ===`);
console.log(` ========================================`);
console.log(` EXPECTED: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
console.log(` Markdown: ${markdown.length} chars`);
const startTime = Date.now();
const extracted = await extractInvoiceFromMarkdown(markdown, tc.name);
if (!extracted) {
failedCount++;
console.log(`\n Result: ✗ FAILED TO PARSE (${((Date.now() - startTime) / 1000).toFixed(1)}s)`);
return;
}
const elapsedMs = Date.now() - startTime;
console.log(` EXTRACTED: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
const result = compareInvoice(extracted, expected);
if (result.match) {
passedCount++;
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
} else {
failedCount++;
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
console.log(` ERRORS:`);
result.errors.forEach(e => console.log(` - ${e}`));
}
});
}
tap.test('Summary', async () => {
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
console.log(`\n========================================`);
console.log(` Extraction Tuning Summary`);
console.log(`========================================`);
console.log(` Model: ${EXTRACTION_MODEL}`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
console.log(`========================================\n`);
});
export default tap.start();

View File

@@ -0,0 +1,695 @@
/**
* Focused test for failed invoice extractions
*
* Tests only the 4 invoices that failed in the main test:
* - consensus_2021-09: invoice_number "2021/1384" → "20211384" (slash stripped)
* - hetzner_2022-04: model hallucinated after 281s thinking
* - qonto_2021-08: invoice_number "08-21-INVOICE-410870" → "4108705" (prefix stripped)
* - qonto_2021-09: invoice_number "09-21-INVOICE-4303642" → "4303642" (prefix stripped)
*
* Run with: tstest test/test.invoices.failed.ts --verbose
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
const NANONETS_URL = 'http://localhost:8000/v1';
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
const OLLAMA_URL = 'http://localhost:11434';
const EXTRACTION_MODEL = 'gpt-oss:20b';
// Temp directory for storing markdown between stages
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-failed-debug');
// Only test these specific invoices that failed
const FAILED_INVOICES = [
'consensus_2021-09',
'hetzner_2022-04',
'qonto_2021-08',
'qonto_2021-09',
];
interface IInvoice {
invoice_number: string;
invoice_date: string;
vendor_name: string;
currency: string;
net_amount: number;
vat_amount: number;
total_amount: number;
}
interface IImageData {
base64: string;
width: number;
height: number;
pageNum: number;
}
interface ITestCase {
name: string;
pdfPath: string;
jsonPath: string;
markdownPath?: string;
}
// Nanonets-specific prompt for document OCR to markdown
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
Return the tables in html format.
Return the equations in LaTeX representation.
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
// JSON extraction prompt for GPT-OSS 20B
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
IMPORTANT RULES:
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID). PRESERVE ALL CHARACTERS including slashes, dashes, and prefixes.
2. invoice_date: Format as YYYY-MM-DD
3. vendor_name: The company that issued the invoice
4. currency: EUR, USD, or GBP
5. net_amount: Amount before tax
6. vat_amount: Tax/VAT amount
7. total_amount: Final total (gross amount)
Return ONLY this JSON format, no explanation:
{
"invoice_number": "INV-2024-001",
"invoice_date": "2024-01-15",
"vendor_name": "Company Name",
"currency": "EUR",
"net_amount": 100.00,
"vat_amount": 19.00,
"total_amount": 119.00
}
INVOICE TEXT:
`;
const PATCH_SIZE = 14;
/**
* Estimate visual tokens for an image based on dimensions
*/
function estimateVisualTokens(width: number, height: number): number {
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
}
/**
* Process images one page at a time for reliability
*/
function batchImages(images: IImageData[]): IImageData[][] {
return images.map(img => [img]);
}
/**
* Convert PDF to JPEG images using ImageMagick with dimension tracking
*/
function convertPdfToImages(pdfPath: string): IImageData[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.jpg');
try {
execSync(
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
const images: IImageData[] = [];
for (let i = 0; i < files.length; i++) {
const file = files[i];
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
const [width, height] = dimensions.split(' ').map(Number);
images.push({
base64: imageData.toString('base64'),
width,
height,
pageNum: i + 1,
});
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Convert a batch of pages to markdown using Nanonets-OCR-s
*/
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
const startTime = Date.now();
const pageNums = batch.map(img => img.pageNum).join(', ');
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
for (const img of batch) {
content.push({
type: 'image_url',
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
});
}
const promptText = batch.length > 1
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
: NANONETS_OCR_PROMPT;
content.push({ type: 'text', text: promptText });
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer dummy',
},
body: JSON.stringify({
model: NANONETS_MODEL,
messages: [{
role: 'user',
content,
}],
max_tokens: 4096 * batch.length,
temperature: 0.0,
}),
signal: AbortSignal.timeout(600000),
});
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
}
const data = await response.json();
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
}
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
return responseContent;
}
/**
* Convert all pages of a document to markdown using smart batching
*/
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
const batches = batchImages(images);
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
const markdownParts: string[] = [];
for (let i = 0; i < batches.length; i++) {
const batch = batches[i];
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
const markdown = await convertBatchToMarkdown(batch);
markdownParts.push(markdown);
}
const fullMarkdown = markdownParts.join('\n\n');
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
return fullMarkdown;
}
/**
* Stop Nanonets container
*/
function stopNanonets(): void {
console.log(' [Docker] Stopping Nanonets container...');
try {
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
execSync('sleep 5', { stdio: 'pipe' });
console.log(' [Docker] Nanonets stopped');
} catch {
console.log(' [Docker] Nanonets was not running');
}
}
/**
* Ensure GPT-OSS 20B model is available
*/
async function ensureExtractionModel(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
return true;
}
}
} catch {
return false;
}
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
});
return pullResponse.ok;
}
/**
* Parse amount from string (handles European format)
*/
function parseAmount(s: string | number | undefined): number {
if (s === undefined || s === null) return 0;
if (typeof s === 'number') return s;
const match = s.match(/([\d.,]+)/);
if (!match) return 0;
const numStr = match[1];
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
? numStr.replace(/\./g, '').replace(',', '.')
: numStr.replace(/,/g, '');
return parseFloat(normalized) || 0;
}
/**
* Extract invoice number - MINIMAL normalization for debugging
*/
function extractInvoiceNumber(s: string | undefined): string {
if (!s) return '';
// Only remove markdown formatting, preserve everything else
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
}
/**
* Extract date (YYYY-MM-DD) from response
*/
function extractDate(s: string | undefined): string {
if (!s) return '';
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
if (isoMatch) return isoMatch[1];
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
if (dmyMatch) {
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
}
return clean.replace(/[^\d-]/g, '').trim();
}
/**
* Extract currency
*/
function extractCurrency(s: string | undefined): string {
if (!s) return 'EUR';
const upper = s.toUpperCase();
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
if (upper.includes('USD') || upper.includes('$')) return 'USD';
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
return 'EUR';
}
/**
* Extract JSON from response
*/
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
try {
return JSON.parse(jsonStr);
} catch {
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch {
return null;
}
}
return null;
}
}
/**
* Parse JSON response into IInvoice
*/
function parseJsonToInvoice(response: string): IInvoice | null {
const parsed = extractJsonFromResponse(response);
if (!parsed) return null;
return {
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
invoice_date: extractDate(String(parsed.invoice_date || '')),
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
currency: extractCurrency(String(parsed.currency || '')),
net_amount: parseAmount(parsed.net_amount as string | number),
vat_amount: parseAmount(parsed.vat_amount as string | number),
total_amount: parseAmount(parsed.total_amount as string | number),
};
}
/**
* Extract invoice from markdown using GPT-OSS 20B (streaming)
*/
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
const startTime = Date.now();
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
// Log exact prompt
console.log(`\n [${queryId}] ===== PROMPT =====`);
console.log(fullPrompt);
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [
{ role: 'user', content: 'Hi there, how are you?' },
{ role: 'assistant', content: 'Good, how can I help you today?' },
{ role: 'user', content: fullPrompt },
],
stream: true,
}),
signal: AbortSignal.timeout(600000),
});
if (!response.ok) {
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
}
// Stream the response
let content = '';
let thinkingContent = '';
let thinkingStarted = false;
let outputStarted = false;
const reader = response.body!.getReader();
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
for (const line of chunk.split('\n').filter(l => l.trim())) {
try {
const json = JSON.parse(line);
const thinking = json.message?.thinking || '';
if (thinking) {
if (!thinkingStarted) {
process.stdout.write(` [${queryId}] THINKING: `);
thinkingStarted = true;
}
process.stdout.write(thinking);
thinkingContent += thinking;
}
const token = json.message?.content || '';
if (token) {
if (!outputStarted) {
if (thinkingStarted) process.stdout.write('\n');
process.stdout.write(` [${queryId}] OUTPUT: `);
outputStarted = true;
}
process.stdout.write(token);
content += token;
}
} catch {
// Ignore parse errors for partial chunks
}
}
}
} finally {
if (thinkingStarted || outputStarted) process.stdout.write('\n');
}
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
// Log raw response for debugging
console.log(` [${queryId}] RAW RESPONSE: ${content}`);
return parseJsonToInvoice(content);
}
/**
* Extract invoice (single pass)
*/
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
console.log(` [${docName}] Extracting...`);
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
if (!invoice) {
return {
invoice_number: '',
invoice_date: '',
vendor_name: '',
currency: 'EUR',
net_amount: 0,
vat_amount: 0,
total_amount: 0,
};
}
console.log(` [${docName}] Extracted: ${JSON.stringify(invoice, null, 2)}`);
return invoice;
}
/**
* Normalize date to YYYY-MM-DD
*/
function normalizeDate(dateStr: string | null): string {
if (!dateStr) return '';
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
const monthMap: Record<string, string> = {
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
};
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
if (match) {
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
}
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
if (match) {
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
}
return dateStr;
}
/**
* Compare extracted invoice against expected - detailed output
*/
function compareInvoice(
extracted: IInvoice,
expected: IInvoice
): { match: boolean; errors: string[] } {
const errors: string[] = [];
// Invoice number comparison - exact match after whitespace normalization
const extNum = extracted.invoice_number?.trim() || '';
const expNum = expected.invoice_number?.trim() || '';
if (extNum.toLowerCase() !== expNum.toLowerCase()) {
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
}
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
}
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
}
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
}
return { match: errors.length === 0, errors };
}
/**
* Find test cases for failed invoices only
*/
function findTestCases(): ITestCase[] {
const testDir = path.join(process.cwd(), '.nogit/invoices');
if (!fs.existsSync(testDir)) return [];
const files = fs.readdirSync(testDir);
const testCases: ITestCase[] = [];
for (const invoiceName of FAILED_INVOICES) {
const pdfFile = `${invoiceName}.pdf`;
const jsonFile = `${invoiceName}.json`;
if (files.includes(pdfFile) && files.includes(jsonFile)) {
testCases.push({
name: invoiceName,
pdfPath: path.join(testDir, pdfFile),
jsonPath: path.join(testDir, jsonFile),
});
} else {
console.warn(`Warning: Missing files for ${invoiceName}`);
}
}
return testCases;
}
// ============ TESTS ============
const testCases = findTestCases();
console.log(`\n========================================`);
console.log(` FAILED INVOICES DEBUG TEST`);
console.log(`========================================`);
console.log(` Testing ${testCases.length} failed invoices:`);
for (const tc of testCases) {
console.log(` - ${tc.name}`);
}
console.log(`========================================\n`);
// Ensure temp directory exists
if (!fs.existsSync(TEMP_MD_DIR)) {
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
}
// -------- STAGE 1: OCR with Nanonets --------
tap.test('Stage 1: Setup Nanonets', async () => {
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
const ok = await ensureNanonetsOcr();
expect(ok).toBeTrue();
});
tap.test('Stage 1: Convert failed invoices to markdown', async () => {
console.log('\n Converting failed invoice PDFs to markdown with Nanonets-OCR-s...\n');
for (const tc of testCases) {
console.log(`\n === ${tc.name} ===`);
const images = convertPdfToImages(tc.pdfPath);
console.log(` Pages: ${images.length}`);
const markdown = await convertDocumentToMarkdown(images, tc.name);
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
fs.writeFileSync(mdPath, markdown);
tc.markdownPath = mdPath;
console.log(` Saved: ${mdPath}`);
// Also save to .nogit for inspection
const debugMdPath = path.join(process.cwd(), '.nogit/invoices', `${tc.name}.debug.md`);
fs.writeFileSync(debugMdPath, markdown);
console.log(` Debug copy: ${debugMdPath}`);
}
console.log('\n Stage 1 complete: All failed invoices converted to markdown\n');
});
tap.test('Stage 1: Stop Nanonets', async () => {
stopNanonets();
await new Promise(resolve => setTimeout(resolve, 3000));
expect(isContainerRunning('nanonets-test')).toBeFalse();
});
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
const extractionOk = await ensureExtractionModel();
expect(extractionOk).toBeTrue();
});
let passedCount = 0;
let failedCount = 0;
for (const tc of testCases) {
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
console.log(`\n ========================================`);
console.log(` === ${tc.name} ===`);
console.log(` ========================================`);
console.log(` EXPECTED:`);
console.log(` invoice_number: "${expected.invoice_number}"`);
console.log(` invoice_date: "${expected.invoice_date}"`);
console.log(` vendor_name: "${expected.vendor_name}"`);
console.log(` total_amount: ${expected.total_amount} ${expected.currency}`);
const startTime = Date.now();
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
if (!fs.existsSync(mdPath)) {
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
}
const markdown = fs.readFileSync(mdPath, 'utf-8');
console.log(` Markdown: ${markdown.length} chars`);
const extracted = await extractInvoice(markdown, tc.name);
const elapsedMs = Date.now() - startTime;
console.log(`\n EXTRACTED:`);
console.log(` invoice_number: "${extracted.invoice_number}"`);
console.log(` invoice_date: "${extracted.invoice_date}"`);
console.log(` vendor_name: "${extracted.vendor_name}"`);
console.log(` total_amount: ${extracted.total_amount} ${extracted.currency}`);
const result = compareInvoice(extracted, expected);
if (result.match) {
passedCount++;
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
} else {
failedCount++;
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
console.log(` ERRORS:`);
result.errors.forEach(e => console.log(` - ${e}`));
}
// Don't fail the test - we're debugging
// expect(result.match).toBeTrue();
});
}
tap.test('Summary', async () => {
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
console.log(`\n========================================`);
console.log(` Failed Invoices Debug Summary`);
console.log(`========================================`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
console.log(`========================================`);
console.log(` Markdown files saved to: ${TEMP_MD_DIR}`);
console.log(` Debug copies in: .nogit/invoices/*.debug.md`);
console.log(`========================================\n`);
// Don't cleanup temp files for debugging
console.log(` Keeping temp files for debugging.\n`);
});
export default tap.start();

View File

@@ -1,10 +1,10 @@
/**
* Invoice extraction test using MiniCPM-V (visual extraction)
* Invoice extraction test using MiniCPM-V via smartagent DualAgentOrchestrator
*
* Consensus approach:
* 1. Pass 1: Fast JSON extraction
* 2. Pass 2: Confirm with thinking enabled
* 3. If mismatch: repeat until consensus or max attempts
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
* 1. Pass images to the orchestrator
* 2. Driver extracts invoice data and validates JSON before completing
* 3. If validation fails, driver retries within the same task
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
@@ -12,6 +12,8 @@ import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
import { SmartAi } from '@push.rocks/smartai';
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
@@ -26,6 +28,10 @@ interface IInvoice {
total_amount: number;
}
// SmartAi instance and orchestrator (initialized in setup)
let smartAi: SmartAi;
let orchestrator: DualAgentOrchestrator;
/**
* Convert PDF to PNG images using ImageMagick
*/
@@ -54,7 +60,9 @@ function convertPdfToImages(pdfPath: string): string[] {
}
}
const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON object with these exact fields:
const EXTRACTION_PROMPT = `Extract invoice data from the provided image(s).
IMPORTANT: You must output a valid JSON object with these exact fields:
{
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
"invoice_date": "YYYY-MM-DD format",
@@ -64,67 +72,16 @@ const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON ob
"vat_amount": 0.00,
"total_amount": 0.00
}
Return only the JSON, no explanation.`;
/**
* Query MiniCPM-V for JSON output (fast, no thinking)
*/
async function queryJsonFast(images: string[]): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: JSON_PROMPT,
images: images,
}],
stream: false,
options: {
num_predict: 1000,
temperature: 0.1,
},
}),
});
Before completing, use the json.validate tool to verify your output is valid JSON with all required fields.
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON_HERE", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
</tool_call>
const data = await response.json();
return (data.message?.content || '').trim();
}
/**
* Query MiniCPM-V for JSON output with thinking enabled (slower, more accurate)
*/
async function queryJsonWithThinking(images: string[]): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: `Think carefully about this invoice image, then ${JSON_PROMPT}`,
images: images,
}],
stream: false,
options: {
num_predict: 2000,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return (data.message?.content || '').trim();
}
Only complete the task after validation passes. Output the final JSON in <task_complete> tags.`;
/**
* Parse amount from string (handles European format)
@@ -190,9 +147,31 @@ function extractCurrency(s: string | undefined): string {
}
/**
* Extract JSON from response (handles markdown code blocks)
* Extract JSON from response (handles markdown code blocks and task_complete tags)
*/
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
// Try to find JSON in task_complete tags
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
if (completeMatch) {
const content = completeMatch[1].trim();
// Try to find JSON in the content
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
try {
return JSON.parse(jsonStr);
} catch {
// Try to find JSON object pattern
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch {
return null;
}
}
}
}
// Try to find JSON in markdown code block
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
@@ -232,76 +211,27 @@ function parseJsonToInvoice(response: string): IInvoice | null {
}
/**
* Compare two invoices for consensus (key fields must match)
*/
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
const dateMatch = a.invoice_date === b.invoice_date;
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
return numMatch && dateMatch && totalMatch;
}
/**
* Extract invoice data using consensus approach:
* 1. Pass 1: Fast JSON extraction
* 2. Pass 2: Confirm with thinking enabled
* 3. If mismatch: repeat until consensus or max 5 attempts
* Extract invoice data using smartagent orchestrator with vision
*/
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (consensus)`);
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
const MAX_ATTEMPTS = 5;
let attempt = 0;
const startTime = Date.now();
while (attempt < MAX_ATTEMPTS) {
attempt++;
console.log(` [Attempt ${attempt}/${MAX_ATTEMPTS}]`);
const result = await orchestrator.run(EXTRACTION_PROMPT, { images });
// PASS 1: Fast JSON extraction
console.log(` [Pass 1] Fast extraction...`);
const fastResponse = await queryJsonFast(images);
const fastInvoice = parseJsonToInvoice(fastResponse);
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [Vision] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
if (!fastInvoice) {
console.log(` [Pass 1] JSON parsing failed, retrying...`);
continue;
}
console.log(` [Pass 1] Result: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount} ${fastInvoice.currency}`);
const invoice = parseJsonToInvoice(result.result);
// PASS 2: Confirm with thinking
console.log(` [Pass 2] Thinking confirmation...`);
const thinkResponse = await queryJsonWithThinking(images);
const thinkInvoice = parseJsonToInvoice(thinkResponse);
if (!thinkInvoice) {
console.log(` [Pass 2] JSON parsing failed, retrying...`);
continue;
}
console.log(` [Pass 2] Result: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount} ${thinkInvoice.currency}`);
// Check consensus
if (invoicesMatch(fastInvoice, thinkInvoice)) {
console.log(` [Consensus] MATCH - using result`);
return thinkInvoice; // Prefer thinking result
}
console.log(` [Consensus] MISMATCH - repeating...`);
console.log(` Fast: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount}`);
console.log(` Think: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount}`);
if (invoice) {
console.log(` [Result] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
return invoice;
}
// Max attempts reached - do one final thinking pass and use that
console.log(` [Final] Max attempts reached, using final thinking pass`);
const finalResponse = await queryJsonWithThinking(images);
const finalInvoice = parseJsonToInvoice(finalResponse);
if (finalInvoice) {
console.log(` [Final] Result: ${finalInvoice.invoice_number} | ${finalInvoice.invoice_date} | ${finalInvoice.total_amount} ${finalInvoice.currency}`);
return finalInvoice;
}
// Return empty invoice if all else fails
console.log(` [Final] All parsing failed, returning empty`);
// Return empty invoice if parsing failed
console.log(` [Result] Parsing failed, returning empty invoice`);
return {
invoice_number: '',
invoice_date: '',
@@ -410,6 +340,79 @@ tap.test('setup: ensure Docker containers are running', async () => {
console.log('\n[Setup] All containers ready!\n');
});
tap.test('setup: initialize smartagent orchestrator', async () => {
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
smartAi = new SmartAi({
ollama: {
baseUrl: OLLAMA_URL,
model: MODEL,
defaultOptions: {
num_ctx: 32768,
temperature: 0.1,
},
defaultTimeout: 300000, // 5 minutes for vision tasks
},
});
await smartAi.start();
orchestrator = new DualAgentOrchestrator({
smartAiInstance: smartAi,
defaultProvider: 'ollama',
guardianPolicyPrompt: `You are a Guardian agent overseeing invoice extraction tasks.
APPROVE all tool calls that:
- Use the json.validate action to verify JSON output
- Are reasonable attempts to complete the extraction task
REJECT tool calls that:
- Attempt to access external resources
- Try to execute arbitrary code
- Are clearly unrelated to invoice extraction`,
driverSystemMessage: `You are an AI assistant that extracts invoice data from images.
Your task is to analyze invoice images and extract structured data.
You have access to a json.validate tool to verify your JSON output.
IMPORTANT: Always validate your JSON before completing the task.
## Tool Usage Format
When you need to validate JSON, output:
<tool_call>
<tool>json</tool>
<action>validate</action>
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
</tool_call>
## Completion Format
After validation passes, complete the task:
<task_complete>
{"invoice_number": "...", "invoice_date": "YYYY-MM-DD", ...}
</task_complete>`,
maxIterations: 5,
maxConsecutiveRejections: 3,
onToken: (token, source) => {
if (source === 'driver') {
process.stdout.write(token);
}
},
onProgress: (event) => {
if (event.logLevel === 'error') {
console.error(event.logMessage);
}
},
});
// Register the JsonValidatorTool
orchestrator.registerTool(new JsonValidatorTool());
await orchestrator.start();
console.log('[Setup] Orchestrator initialized!\n');
});
tap.test('should have MiniCPM-V model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
@@ -418,7 +421,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V)\n`);
console.log(`\nFound ${testCases.length} invoice test cases (smartagent + MiniCPM-V)\n`);
let passedCount = 0;
let failedCount = 0;
@@ -455,6 +458,13 @@ for (const testCase of testCases) {
});
}
tap.test('cleanup: stop orchestrator', async () => {
if (orchestrator) {
await orchestrator.stop();
}
console.log('[Cleanup] Orchestrator stopped');
});
tap.test('summary', async () => {
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
@@ -462,9 +472,10 @@ tap.test('summary', async () => {
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
console.log(`\n========================================`);
console.log(` Invoice Extraction Summary (${MODEL})`);
console.log(` Invoice Extraction Summary`);
console.log(` (smartagent + ${MODEL})`);
console.log(`========================================`);
console.log(` Method: Consensus (fast + thinking)`);
console.log(` Method: DualAgentOrchestrator with vision`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);

View File

@@ -1,8 +1,8 @@
/**
* Invoice extraction using Nanonets-OCR-s + Qwen3 (sequential two-stage pipeline)
* Invoice extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
*
* Stage 1: Nanonets-OCR-s converts ALL document pages to markdown (stop after completion)
* Stage 2: Qwen3 extracts structured JSON from saved markdown (after Nanonets stops)
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
*
* This approach avoids GPU contention by running services sequentially.
*/
@@ -12,15 +12,35 @@ import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
import { SmartAi } from '@push.rocks/smartai';
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
const NANONETS_URL = 'http://localhost:8000/v1';
const NANONETS_MODEL = 'nanonets/Nanonets-OCR-s';
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
const OLLAMA_URL = 'http://localhost:11434';
const QWEN_MODEL = 'qwen3:8b';
const EXTRACTION_MODEL = 'gpt-oss:20b';
// Temp directory for storing markdown between stages
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-markdown');
// Persistent cache directory for storing markdown between runs
const MD_CACHE_DIR = path.join(process.cwd(), '.nogit/invoices-md');
// SmartAi instance for Ollama with optimized settings
const smartAi = new SmartAi({
ollama: {
baseUrl: OLLAMA_URL,
model: EXTRACTION_MODEL,
defaultOptions: {
num_ctx: 65536, // 64K context for long invoices + reasoning chains
temperature: 0, // Deterministic for JSON extraction
repeat_penalty: 1.3, // Penalty to prevent repetition loops
think: true, // Enable thinking mode for GPT-OSS reasoning
},
defaultTimeout: 600000, // 10 minute timeout for large documents
},
});
// DualAgentOrchestrator for structured task execution
let orchestrator: DualAgentOrchestrator;
interface IInvoice {
invoice_number: string;
@@ -32,6 +52,13 @@ interface IInvoice {
total_amount: number;
}
interface IImageData {
base64: string;
width: number;
height: number;
pageNum: number;
}
interface ITestCase {
name: string;
pdfPath: string;
@@ -47,38 +74,53 @@ If there is an image in the document and image caption is not present, add a sma
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
// JSON extraction prompt for Qwen3
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
IMPORTANT RULES:
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID)
2. invoice_date: Format as YYYY-MM-DD
3. vendor_name: The company that issued the invoice
WHERE TO FIND DATA:
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer"). Use common sense. Btw. an invoice number might start on INV* .
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
RULES:
1. Use common sense.
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
3. vendor_name: The company issuing the invoice
4. currency: EUR, USD, or GBP
5. net_amount: Amount before tax
6. vat_amount: Tax/VAT amount
7. total_amount: Final total (gross amount)
5. net_amount: Total before tax
6. vat_amount: Tax amount
7. total_amount: Final total with tax
Return ONLY this JSON format, no explanation:
{
"invoice_number": "INV-2024-001",
"invoice_date": "2024-01-15",
"vendor_name": "Company Name",
"currency": "EUR",
"net_amount": 100.00,
"vat_amount": 19.00,
"total_amount": 119.00
}
JSON only:
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}
Double check for valid JSON syntax.
INVOICE TEXT:
`;
// Constants for smart batching
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
/**
* Convert PDF to PNG images
* Estimate visual tokens for an image based on dimensions
*/
function convertPdfToImages(pdfPath: string): string[] {
function estimateVisualTokens(width: number, height: number): number {
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
}
/**
* Process images one page at a time for reliability
*/
function batchImages(images: IImageData[]): IImageData[][] {
// One page per batch for reliable processing
return images.map(img => [img]);
}
/**
* Convert PDF to JPEG images using ImageMagick with dimension tracking
*/
function convertPdfToImages(pdfPath: string): IImageData[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
const outputPattern = path.join(tempDir, 'page-%d.jpg');
try {
execSync(
@@ -86,13 +128,24 @@ function convertPdfToImages(pdfPath: string): string[] {
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
const images: string[] = [];
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
const images: IImageData[] = [];
for (const file of files) {
for (let i = 0; i < files.length; i++) {
const file = files[i];
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
// Get image dimensions using identify command
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
const [width, height] = dimensions.split(' ').map(Number);
images.push({
base64: imageData.toString('base64'),
width,
height,
pageNum: i + 1,
});
}
return images;
@@ -102,10 +155,28 @@ function convertPdfToImages(pdfPath: string): string[] {
}
/**
* Convert a single page to markdown using Nanonets-OCR-s
* Convert a batch of pages to markdown using Nanonets-OCR-s
*/
async function convertPageToMarkdown(image: string, pageNum: number): Promise<string> {
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
const startTime = Date.now();
const pageNums = batch.map(img => img.pageNum).join(', ');
// Build content array with all images first, then the prompt
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
for (const img of batch) {
content.push({
type: 'image_url',
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
});
}
// Add prompt with page separator instruction if multiple pages
const promptText = batch.length > 1
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
: NANONETS_OCR_PROMPT;
content.push({ type: 'text', text: promptText });
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
method: 'POST',
@@ -117,14 +188,12 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
model: NANONETS_MODEL,
messages: [{
role: 'user',
content: [
{ type: 'image_url', image_url: { url: `data:image/png;base64,${image}` }},
{ type: 'text', text: NANONETS_OCR_PROMPT },
],
content,
}],
max_tokens: 4096,
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
temperature: 0.0,
}),
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
});
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
@@ -135,25 +204,35 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
}
const data = await response.json();
const content = (data.choices?.[0]?.message?.content || '').trim();
console.log(` Page ${pageNum}: ${content.length} chars (${elapsed}s)`);
return content;
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
// For single-page batches, add page marker if not present
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
}
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
return responseContent;
}
/**
* Convert all pages of a document to markdown
* Convert all pages of a document to markdown using smart batching
*/
async function convertDocumentToMarkdown(images: string[], docName: string): Promise<string> {
console.log(` [${docName}] Converting ${images.length} page(s)...`);
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
const batches = batchImages(images);
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
const markdownPages: string[] = [];
const markdownParts: string[] = [];
for (let i = 0; i < images.length; i++) {
const markdown = await convertPageToMarkdown(images[i], i + 1);
markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`);
for (let i = 0; i < batches.length; i++) {
const batch = batches[i];
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
const markdown = await convertBatchToMarkdown(batch);
markdownParts.push(markdown);
}
const fullMarkdown = markdownPages.join('\n\n');
const fullMarkdown = markdownParts.join('\n\n');
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
return fullMarkdown;
}
@@ -173,16 +252,16 @@ function stopNanonets(): void {
}
/**
* Ensure Qwen3 model is available
* Ensure GPT-OSS 20B model is available
*/
async function ensureQwen3(): Promise<boolean> {
async function ensureExtractionModel(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === QWEN_MODEL)) {
console.log(` [Ollama] Model available: ${QWEN_MODEL}`);
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
return true;
}
}
@@ -190,11 +269,11 @@ async function ensureQwen3(): Promise<boolean> {
return false;
}
console.log(` [Ollama] Pulling ${QWEN_MODEL}...`);
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: QWEN_MODEL, stream: false }),
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
});
return pullResponse.ok;
@@ -262,16 +341,20 @@ function extractCurrency(s: string | undefined): string {
}
/**
* Extract JSON from response
* Try to extract valid JSON from a response string
*/
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
function tryExtractJson(response: string): Record<string, unknown> | null {
// Remove thinking tags
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Try code block
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
try {
return JSON.parse(jsonStr);
} catch {
// Try to find JSON object
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
@@ -285,115 +368,113 @@ function extractJsonFromResponse(response: string): Record<string, unknown> | nu
}
/**
* Parse JSON response into IInvoice
*/
function parseJsonToInvoice(response: string): IInvoice | null {
const parsed = extractJsonFromResponse(response);
if (!parsed) return null;
return {
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
invoice_date: extractDate(String(parsed.invoice_date || '')),
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
currency: extractCurrency(String(parsed.currency || '')),
net_amount: parseAmount(parsed.net_amount as string | number),
vat_amount: parseAmount(parsed.vat_amount as string | number),
total_amount: parseAmount(parsed.total_amount as string | number),
};
}
/**
* Extract invoice from markdown using Qwen3
* Extract invoice from markdown using smartagent DualAgentOrchestrator
* Validates JSON and retries if invalid
*/
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
console.log(` [${queryId}] Sending to ${QWEN_MODEL}...`);
const startTime = Date.now();
const maxRetries = 2;
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents
body: JSON.stringify({
model: QWEN_MODEL,
messages: [{
role: 'user',
content: JSON_EXTRACTION_PROMPT + markdown,
}],
stream: false,
options: {
num_predict: 2000,
temperature: 0.1,
},
}),
});
console.log(` [${queryId}] Invoice: ${markdown.length} chars`);
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
// Build the extraction task with document context
const taskPrompt = `Extract the invoice data from this document and output ONLY the JSON:
if (!response.ok) {
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
${markdown}
${JSON_EXTRACTION_PROMPT}`;
try {
let result = await orchestrator.run(taskPrompt);
let elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
// Try to parse JSON from result
let jsonData: Record<string, unknown> | null = null;
let responseText = result.result || '';
if (result.success && responseText) {
jsonData = tryExtractJson(responseText);
}
// Fallback: try parsing from history
if (!jsonData && result.history?.length > 0) {
const lastMessage = result.history[result.history.length - 1];
if (lastMessage?.content) {
responseText = lastMessage.content;
jsonData = tryExtractJson(responseText);
}
}
// If JSON is invalid, retry with correction request
let retries = 0;
while (!jsonData && retries < maxRetries) {
retries++;
console.log(` [${queryId}] Invalid JSON, requesting correction (retry ${retries}/${maxRetries})...`);
result = await orchestrator.continueTask(
`Your response was not valid JSON. Please output ONLY the JSON object with no markdown, no explanation, no thinking tags. Just the raw JSON starting with { and ending with }. Format:
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`
);
elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] Retry ${retries}: ${result.status} (${elapsed}s)`);
responseText = result.result || '';
if (responseText) {
jsonData = tryExtractJson(responseText);
}
if (!jsonData && result.history?.length > 0) {
const lastMessage = result.history[result.history.length - 1];
if (lastMessage?.content) {
responseText = lastMessage.content;
jsonData = tryExtractJson(responseText);
}
}
}
if (!jsonData) {
console.log(` [${queryId}] Failed to get valid JSON after ${retries} retries`);
return null;
}
console.log(` [${queryId}] Valid JSON extracted`);
return {
invoice_number: extractInvoiceNumber(String(jsonData.invoice_number || '')),
invoice_date: extractDate(String(jsonData.invoice_date || '')),
vendor_name: String(jsonData.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
currency: extractCurrency(String(jsonData.currency || '')),
net_amount: parseAmount(jsonData.net_amount as string | number),
vat_amount: parseAmount(jsonData.vat_amount as string | number),
total_amount: parseAmount(jsonData.total_amount as string | number),
};
} catch (error) {
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
throw error;
}
const data = await response.json();
const content = (data.message?.content || '').trim();
console.log(` [${queryId}] Response: ${content.length} chars (${elapsed}s)`);
return parseJsonToInvoice(content);
}
/**
* Compare two invoices for consensus
* Extract invoice (single pass - GPT-OSS is more reliable)
*/
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
const dateMatch = a.invoice_date === b.invoice_date;
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
return numMatch && dateMatch && totalMatch;
}
/**
* Extract with consensus
*/
async function extractWithConsensus(markdown: string, docName: string): Promise<IInvoice> {
const MAX_ATTEMPTS = 3;
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
console.log(` [${docName}] Attempt ${attempt}/${MAX_ATTEMPTS}`);
const inv1 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q1`);
const inv2 = await extractInvoiceFromMarkdown(markdown, `${docName}-A${attempt}Q2`);
if (!inv1 || !inv2) {
console.log(` [${docName}] Parsing failed, retrying...`);
continue;
}
console.log(` [${docName}] Q1: ${inv1.invoice_number} | ${inv1.invoice_date} | ${inv1.total_amount}`);
console.log(` [${docName}] Q2: ${inv2.invoice_number} | ${inv2.invoice_date} | ${inv2.total_amount}`);
if (invoicesMatch(inv1, inv2)) {
console.log(` [${docName}] CONSENSUS`);
return inv2;
}
console.log(` [${docName}] No consensus`);
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
console.log(` [${docName}] Extracting...`);
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
if (!invoice) {
return {
invoice_number: '',
invoice_date: '',
vendor_name: '',
currency: 'EUR',
net_amount: 0,
vat_amount: 0,
total_amount: 0,
};
}
// Fallback
const fallback = await extractInvoiceFromMarkdown(markdown, `${docName}-FALLBACK`);
if (fallback) {
console.log(` [${docName}] FALLBACK: ${fallback.invoice_number} | ${fallback.invoice_date} | ${fallback.total_amount}`);
return fallback;
}
return {
invoice_number: '',
invoice_date: '',
vendor_name: '',
currency: 'EUR',
net_amount: 0,
vat_amount: 0,
total_amount: 0,
};
console.log(` [${docName}] Extracted: ${invoice.invoice_number}`);
return invoice;
}
/**
@@ -481,23 +562,45 @@ function findTestCases(): ITestCase[] {
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} invoice test cases\n`);
// Ensure temp directory exists
if (!fs.existsSync(TEMP_MD_DIR)) {
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
// Ensure cache directory exists
if (!fs.existsSync(MD_CACHE_DIR)) {
fs.mkdirSync(MD_CACHE_DIR, { recursive: true });
}
// -------- STAGE 1: OCR with Nanonets --------
tap.test('Stage 1: Setup Nanonets', async () => {
tap.test('Stage 1: Convert invoices to markdown (with caching)', async () => {
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
const ok = await ensureNanonetsOcr();
expect(ok).toBeTrue();
});
tap.test('Stage 1: Convert all invoices to markdown', async () => {
console.log('\n Converting all invoice PDFs to markdown with Nanonets-OCR-s...\n');
// Check which invoices need OCR conversion
const needsConversion: ITestCase[] = [];
let cachedCount = 0;
for (const tc of testCases) {
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
if (fs.existsSync(mdPath)) {
cachedCount++;
tc.markdownPath = mdPath;
console.log(` [CACHED] ${tc.name} - using cached markdown`);
} else {
needsConversion.push(tc);
}
}
console.log(`\n Summary: ${cachedCount} cached, ${needsConversion.length} need conversion\n`);
if (needsConversion.length === 0) {
console.log(' All invoices already cached, skipping Nanonets OCR\n');
return;
}
// Start Nanonets only if there are files to convert
console.log(' Starting Nanonets for OCR conversion...\n');
const ok = await ensureNanonetsOcr();
expect(ok).toBeTrue();
// Convert only the invoices that need conversion
for (const tc of needsConversion) {
console.log(`\n === ${tc.name} ===`);
const images = convertPdfToImages(tc.pdfPath);
@@ -505,13 +608,13 @@ tap.test('Stage 1: Convert all invoices to markdown', async () => {
const markdown = await convertDocumentToMarkdown(images, tc.name);
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
fs.writeFileSync(mdPath, markdown);
tc.markdownPath = mdPath;
console.log(` Saved: ${mdPath}`);
}
console.log('\n Stage 1 complete: All invoices converted to markdown\n');
console.log(`\n Stage 1 complete: ${needsConversion.length} invoices converted to markdown\n`);
});
tap.test('Stage 1: Stop Nanonets', async () => {
@@ -520,16 +623,60 @@ tap.test('Stage 1: Stop Nanonets', async () => {
expect(isContainerRunning('nanonets-test')).toBeFalse();
});
// -------- STAGE 2: Extraction with Qwen3 --------
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
tap.test('Stage 2: Setup Ollama + Qwen3', async () => {
console.log('\n========== STAGE 2: Qwen3 Extraction ==========\n');
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
const qwenOk = await ensureQwen3();
expect(qwenOk).toBeTrue();
const extractionOk = await ensureExtractionModel();
expect(extractionOk).toBeTrue();
// Initialize SmartAi and DualAgentOrchestrator
console.log(' [SmartAgent] Starting SmartAi...');
await smartAi.start();
console.log(' [SmartAgent] Creating DualAgentOrchestrator with native tool calling...');
orchestrator = new DualAgentOrchestrator({
smartAiInstance: smartAi,
defaultProvider: 'ollama',
guardianPolicyPrompt: `
JSON EXTRACTION POLICY:
- APPROVE all JSON extraction tasks
- APPROVE all json.validate tool calls
- This is a read-only operation - no file system or network access needed
- The task is to extract structured data from document text
`,
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract invoice data from documents.
CRITICAL RULES:
1. Output valid JSON with the exact format requested
2. If you cannot find a value, use empty string "" or 0 for numbers
3. Before completing, validate your JSON using the json_validate tool
4. Only complete after validation passes`,
maxIterations: 5,
// Enable native tool calling for GPT-OSS (uses Harmony format instead of XML)
useNativeToolCalling: true,
// Enable streaming for real-time progress visibility
onToken: (token, source) => {
if (source === 'driver') {
process.stdout.write(token);
}
},
// Enable progress events to see tool calls
onProgress: (event: { logMessage: string }) => {
console.log(event.logMessage);
},
});
// Register JsonValidatorTool for self-validation
orchestrator.registerTool(new JsonValidatorTool());
console.log(' [SmartAgent] Starting orchestrator...');
await orchestrator.start();
console.log(' [SmartAgent] Ready for extraction');
});
let passedCount = 0;
@@ -544,14 +691,14 @@ for (const tc of testCases) {
const startTime = Date.now();
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
if (!fs.existsSync(mdPath)) {
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
}
const markdown = fs.readFileSync(mdPath, 'utf-8');
console.log(` Markdown: ${markdown.length} chars`);
const extracted = await extractWithConsensus(markdown, tc.name);
const extracted = await extractInvoice(markdown, tc.name);
const elapsedMs = Date.now() - startTime;
processingTimes.push(elapsedMs);
@@ -574,16 +721,24 @@ for (const tc of testCases) {
}
tap.test('Summary', async () => {
// Cleanup orchestrator and SmartAi
if (orchestrator) {
console.log('\n [SmartAgent] Stopping orchestrator...');
await orchestrator.stop();
}
console.log(' [SmartAgent] Stopping SmartAi...');
await smartAi.stop();
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
console.log(`\n========================================`);
console.log(` Invoice Summary (Nanonets + Qwen3)`);
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
console.log(`========================================`);
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
console.log(` Stage 2: Qwen3 8B (md -> JSON)`);
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (md -> JSON)`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
@@ -591,14 +746,7 @@ tap.test('Summary', async () => {
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
console.log(`========================================\n`);
// Cleanup temp files
try {
fs.rmSync(TEMP_MD_DIR, { recursive: true, force: true });
console.log(` Cleaned up temp directory: ${TEMP_MD_DIR}\n`);
} catch {
// Ignore
}
console.log(` Cache location: ${MD_CACHE_DIR}\n`);
});
export default tap.start();