Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 70913c4b3e | |||
| 2ed419f6e4 | |||
| 45cb87e9e7 | |||
| 74a5b37e92 | |||
| 2bdcc74df0 | |||
| 981c031c6e | |||
| 26d2de824f | |||
| 969d21c51a | |||
| da2b827ba3 | |||
| 9bc1f74978 | |||
| cf282b2437 | |||
| 77d57e80bd | |||
| b202e024a4 | |||
| 2210611f70 | |||
| d8bdb18841 | |||
| d384c1d79b | |||
| 6bd672da61 | |||
| 44d6dc3336 | |||
| d1ff95bd94 | |||
| 09770d3177 | |||
| 235aa1352b | |||
| 08728ada4d | |||
| b58bcabc76 | |||
| 6dbd06073b | |||
| ae28a64902 | |||
| 09ea7440e8 |
34
Dockerfile_nanonets_vllm_gpu_VRAM10GB
Normal file
34
Dockerfile_nanonets_vllm_gpu_VRAM10GB
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Nanonets-OCR2-3B Vision Language Model
|
||||||
|
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR (Oct 2025 release)
|
||||||
|
# Improvements over OCR-s: better semantic tagging, LaTeX equations, flowcharts
|
||||||
|
# ~12-16GB VRAM with 30K context, outputs structured markdown with semantic tags
|
||||||
|
#
|
||||||
|
# Build: docker build -f Dockerfile_nanonets_vllm_gpu_VRAM10GB -t nanonets-ocr .
|
||||||
|
# Run: docker run --gpus all -p 8000:8000 -v ht-huggingface-cache:/root/.cache/huggingface nanonets-ocr
|
||||||
|
|
||||||
|
FROM vllm/vllm-openai:latest
|
||||||
|
|
||||||
|
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||||
|
LABEL description="Nanonets-OCR2-3B - Document OCR optimized Vision Language Model"
|
||||||
|
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||||
|
|
||||||
|
# Environment configuration
|
||||||
|
ENV MODEL_NAME="nanonets/Nanonets-OCR2-3B"
|
||||||
|
ENV HOST="0.0.0.0"
|
||||||
|
ENV PORT="8000"
|
||||||
|
ENV MAX_MODEL_LEN="30000"
|
||||||
|
ENV GPU_MEMORY_UTILIZATION="0.9"
|
||||||
|
|
||||||
|
# Expose OpenAI-compatible API port
|
||||||
|
EXPOSE 8000
|
||||||
|
|
||||||
|
# Health check - vLLM exposes /health endpoint
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=5 \
|
||||||
|
CMD curl -f http://localhost:8000/health || exit 1
|
||||||
|
|
||||||
|
# Start vLLM server with Nanonets-OCR2-3B model
|
||||||
|
CMD ["--model", "nanonets/Nanonets-OCR2-3B", \
|
||||||
|
"--trust-remote-code", \
|
||||||
|
"--max-model-len", "30000", \
|
||||||
|
"--host", "0.0.0.0", \
|
||||||
|
"--port", "8000"]
|
||||||
@@ -13,46 +13,38 @@ NC='\033[0m' # No Color
|
|||||||
|
|
||||||
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
|
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
|
||||||
|
|
||||||
# Build GPU variant
|
# Build MiniCPM-V 4.5 GPU variant
|
||||||
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
|
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
|
||||||
docker build \
|
docker build \
|
||||||
-f Dockerfile_minicpm45v_gpu \
|
-f Dockerfile_minicpm45v_ollama_gpu_VRAM9GB \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
|
||||||
.
|
.
|
||||||
|
|
||||||
# Build CPU variant
|
# Build Qwen3-VL GPU variant
|
||||||
echo -e "${GREEN}Building MiniCPM-V 4.5 CPU variant...${NC}"
|
echo -e "${GREEN}Building Qwen3-VL-30B-A3B GPU variant...${NC}"
|
||||||
docker build \
|
docker build \
|
||||||
-f Dockerfile_minicpm45v_cpu \
|
-f Dockerfile_qwen3vl_ollama_gpu_VRAM20GB \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl \
|
||||||
.
|
.
|
||||||
|
|
||||||
# Build PaddleOCR-VL GPU variant
|
# Build Nanonets-OCR GPU variant
|
||||||
echo -e "${GREEN}Building PaddleOCR-VL GPU variant...${NC}"
|
echo -e "${GREEN}Building Nanonets-OCR-s GPU variant...${NC}"
|
||||||
docker build \
|
docker build \
|
||||||
-f Dockerfile_paddleocr_vl_gpu \
|
-f Dockerfile_nanonets_vllm_gpu_VRAM10GB \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl \
|
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr \
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu \
|
|
||||||
.
|
|
||||||
|
|
||||||
# Build PaddleOCR-VL CPU variant
|
|
||||||
echo -e "${GREEN}Building PaddleOCR-VL CPU variant...${NC}"
|
|
||||||
docker build \
|
|
||||||
-f Dockerfile_paddleocr_vl_cpu \
|
|
||||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu \
|
|
||||||
.
|
.
|
||||||
|
|
||||||
echo -e "${GREEN}All images built successfully!${NC}"
|
echo -e "${GREEN}All images built successfully!${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Available images:"
|
echo "Available images:"
|
||||||
echo " MiniCPM-V 4.5:"
|
echo " MiniCPM-V 4.5 (Ollama, ~9GB VRAM):"
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v"
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest"
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo " PaddleOCR-VL (Vision-Language Model):"
|
echo " Qwen3-VL-30B-A3B (Ollama, ~20GB VRAM):"
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl (GPU/vLLM)"
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl"
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu (GPU/vLLM)"
|
echo ""
|
||||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu (CPU)"
|
echo " Nanonets-OCR-s (vLLM, ~10GB VRAM):"
|
||||||
|
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr"
|
||||||
|
|||||||
85
changelog.md
85
changelog.md
@@ -1,5 +1,90 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.16.0 - feat(invoices)
|
||||||
|
add line_items extraction and normalization for invoice parsing
|
||||||
|
|
||||||
|
- Introduce ILineItem interface and add line_items array to IInvoice.
|
||||||
|
- Add extractLineItems helper to normalize item fields (position, product, description, quantity, unit_price, total_price).
|
||||||
|
- Include line_items in parsed invoice output and sample JSON in test, defaulting to [] when absent.
|
||||||
|
- Update logging to include extracted line item count.
|
||||||
|
- Clarify test instructions to extract items from invoice tables and skip subtotal/total rows.
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.15.3 - fix(tests(nanonets))
|
||||||
|
allow '/' when normalizing invoice strings in tests
|
||||||
|
|
||||||
|
- Adjust regex in test/test.invoices.nanonets.ts to preserve forward slashes when cleaning invoice values
|
||||||
|
- Changed pattern from [^A-Z0-9-] to [^A-Z0-9\/-] to prevent accidental removal of '/' characters in invoice identifiers
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.15.2 - fix(dev-deps)
|
||||||
|
bump devDependencies @push.rocks/smartagent to ^1.6.2 and @push.rocks/smartai to ^0.13.3
|
||||||
|
|
||||||
|
- Bumped @push.rocks/smartagent from ^1.5.4 to ^1.6.2 in devDependencies
|
||||||
|
- Bumped @push.rocks/smartai from ^0.13.2 to ^0.13.3 in devDependencies
|
||||||
|
- Updated test/test.invoices.nanonets.ts JSON extraction prompt: instruct not to omit special characters in invoice_number and to use the json validate tool
|
||||||
|
- No breaking changes; only dev dependency updates and test prompt adjustments
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.15.1 - fix(tests)
|
||||||
|
enable progress events in invoice tests and bump @push.rocks/smartagent devDependency to ^1.5.4
|
||||||
|
|
||||||
|
- Added an onProgress handler in test/test.invoices.nanonets.ts to log progress events (console.log(event.logMessage)) so tool calls and progress are visible during tests.
|
||||||
|
- Bumped devDependency @push.rocks/smartagent from ^1.5.2 to ^1.5.4 in package.json.
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.15.0 - feat(tests)
|
||||||
|
integrate SmartAi/DualAgentOrchestrator into extraction tests and add JSON self-validation
|
||||||
|
|
||||||
|
- Integrate SmartAi and DualAgentOrchestrator into bankstatement and invoice tests to perform structured extraction with streaming
|
||||||
|
- Register and use JsonValidatorTool to validate outputs (json.validate) and enforce validation before task completion
|
||||||
|
- Add tryExtractJson parsing fallback, improved extraction prompts, retries and clearer parsing/logging
|
||||||
|
- Initialize and teardown SmartAi and orchestrator in test setup/summary, and enable onToken streaming handlers for real-time output
|
||||||
|
- Bump devDependencies: @push.rocks/smartagent to ^1.3.0 and @push.rocks/smartai to ^0.12.0
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.14.3 - fix(repo)
|
||||||
|
no changes detected in the diff; no files modified and no release required
|
||||||
|
|
||||||
|
- Diff contained no changes
|
||||||
|
- No files were added, removed, or modified
|
||||||
|
- No code, dependency, or documentation updates to release
|
||||||
|
|
||||||
|
## 2026-01-19 - 1.14.2 - fix(readme)
|
||||||
|
update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command
|
||||||
|
|
||||||
|
- Renamed Nanonets-OCR-s -> Nanonets-OCR2-3B throughout README and examples
|
||||||
|
- Updated Nanonets VRAM guidance from ~10GB to ~12-16GB and documented 30K context
|
||||||
|
- Changed documented MAX_MODEL_LEN default from 8192 to 30000
|
||||||
|
- Updated example model identifiers (model strings and curl/example snippets) to nanonets/Nanonets-OCR2-3B
|
||||||
|
- Added MiniCPM and Qwen feature bullets (multilingual, multi-image, flowchart support, expanded context notes)
|
||||||
|
- Replaced README test command from ./test-images.sh to pnpm test
|
||||||
|
|
||||||
|
## 2026-01-19 - 1.14.1 - fix(extraction)
|
||||||
|
improve JSON extraction prompts and model options for invoice and bank statement tests
|
||||||
|
|
||||||
|
- Refactor JSON extraction prompts to be sent after the document text and add explicit 'WHERE TO FIND DATA' and 'RULES' sections for clearer extraction guidance
|
||||||
|
- Change chat message flow to: send document, assistant acknowledgement, then the JSON extraction prompt (avoids concatenating large prompts into one message)
|
||||||
|
- Add model options (num_ctx: 32768, temperature: 0) to give larger context windows and deterministic JSON output
|
||||||
|
- Simplify logging to avoid printing full prompt contents; log document and prompt lengths instead
|
||||||
|
- Increase timeouts for large documents to 600000ms (10 minutes) where applicable
|
||||||
|
|
||||||
|
## 2026-01-19 - 1.14.0 - feat(docker-images)
|
||||||
|
add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout
|
||||||
|
|
||||||
|
- Add new Dockerfiles for Nanonets (Dockerfile_nanonets_vllm_gpu_VRAM10GB), Qwen3 (Dockerfile_qwen3vl_ollama_gpu_VRAM20GB) and a clarified MiniCPM Ollama variant (Dockerfile_minicpm45v_ollama_gpu_VRAM9GB); remove older, redundant Dockerfiles.
|
||||||
|
- Update build-images.sh to build the new image tags (minicpm45v, qwen3vl, nanonets-ocr) and adjust messaging/targets accordingly.
|
||||||
|
- Documentation overhaul: readme.md and readme.hints.md updated to reflect vLLM vs Ollama runtimes, corrected ports/VRAM estimates, volume recommendations, and API endpoint details.
|
||||||
|
- Tests updated to target the new model ID (nanonets/Nanonets-OCR2-3B), to process one page per batch, and to include a 10-minute AbortSignal timeout for OCR requests.
|
||||||
|
- Added focused extraction test suites (test/test.invoices.extraction.ts and test/test.invoices.failed.ts) for faster iteration and debugging of invoice extraction.
|
||||||
|
- Bump devDependencies: @git.zone/tsrun -> ^2.0.1 and @git.zone/tstest -> ^3.1.5.
|
||||||
|
- Misc: test helper references and docker compose/test port mapping fixed (nanonets uses 8000), and various README sections cleaned and reorganized.
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.13.2 - fix(tests)
|
||||||
|
stabilize OCR extraction tests and manage GPU containers
|
||||||
|
|
||||||
|
- Add stopAllGpuContainers() and call it before starting GPU images to free GPU memory.
|
||||||
|
- Remove PaddleOCR-VL image configs and associated ensure helpers from docker test helper to simplify images list.
|
||||||
|
- Split invoice/bankstatement tests into two sequential stages: Stage 1 runs Nanonets OCR to produce markdown files, Stage 2 stops Nanonets and runs model extraction from saved markdown (avoids GPU contention).
|
||||||
|
- Introduce temporary markdown directory handling and cleanup; add stopNanonets() and container running checks in tests.
|
||||||
|
- Switch bank statement extraction model from qwen3:8b to gpt-oss:20b; add request timeout and improved logging/console output across tests.
|
||||||
|
- Refactor extractWithConsensus and extraction functions to accept document identifiers, improve error messages and JSON extraction robustness.
|
||||||
|
|
||||||
## 2026-01-18 - 1.13.1 - fix(image_support_files)
|
## 2026-01-18 - 1.13.1 - fix(image_support_files)
|
||||||
remove PaddleOCR-VL server scripts from image_support_files
|
remove PaddleOCR-VL server scripts from image_support_files
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@host.today/ht-docker-ai",
|
"name": "@host.today/ht-docker-ai",
|
||||||
"version": "1.13.1",
|
"version": "1.16.0",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||||
@@ -13,8 +13,10 @@
|
|||||||
"test": "tstest test/ --verbose"
|
"test": "tstest test/ --verbose"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsrun": "^1.3.3",
|
"@git.zone/tsrun": "^2.0.1",
|
||||||
"@git.zone/tstest": "^1.0.90"
|
"@git.zone/tstest": "^3.1.5",
|
||||||
|
"@push.rocks/smartagent": "^1.6.2",
|
||||||
|
"@push.rocks/smartai": "^0.13.3"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
|
|||||||
2013
pnpm-lock.yaml
generated
2013
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
209
readme.hints.md
209
readme.hints.md
@@ -2,12 +2,18 @@
|
|||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
This project uses **Ollama** as the runtime framework for serving AI models. This provides:
|
This project uses **Ollama** and **vLLM** as runtime frameworks for serving AI models:
|
||||||
|
|
||||||
|
### Ollama-based Images (MiniCPM-V, Qwen3-VL)
|
||||||
- Automatic model download and caching
|
- Automatic model download and caching
|
||||||
- Unified REST API (compatible with OpenAI format)
|
- Unified REST API (compatible with OpenAI format)
|
||||||
- Built-in quantization support
|
- Built-in quantization support
|
||||||
- GPU/CPU auto-detection
|
- GPU auto-detection
|
||||||
|
|
||||||
|
### vLLM-based Images (Nanonets-OCR)
|
||||||
|
- High-performance inference server
|
||||||
|
- OpenAI-compatible API
|
||||||
|
- Optimized for VLM workloads
|
||||||
|
|
||||||
## Model Details
|
## Model Details
|
||||||
|
|
||||||
@@ -24,18 +30,24 @@ This project uses **Ollama** as the runtime framework for serving AI models. Thi
|
|||||||
|------|---------------|
|
|------|---------------|
|
||||||
| Full precision (bf16) | 18GB |
|
| Full precision (bf16) | 18GB |
|
||||||
| int4 quantized | 9GB |
|
| int4 quantized | 9GB |
|
||||||
| GGUF (CPU) | 8GB RAM |
|
|
||||||
|
|
||||||
## Container Startup Flow
|
## Container Startup Flow
|
||||||
|
|
||||||
|
### Ollama-based containers
|
||||||
1. `docker-entrypoint.sh` starts Ollama server in background
|
1. `docker-entrypoint.sh` starts Ollama server in background
|
||||||
2. Waits for server to be ready
|
2. Waits for server to be ready
|
||||||
3. Checks if model already exists in volume
|
3. Checks if model already exists in volume
|
||||||
4. Pulls model if not present
|
4. Pulls model if not present
|
||||||
5. Keeps container running
|
5. Keeps container running
|
||||||
|
|
||||||
|
### vLLM-based containers
|
||||||
|
1. vLLM server starts with model auto-download
|
||||||
|
2. Health check endpoint available at `/health`
|
||||||
|
3. OpenAI-compatible API at `/v1/chat/completions`
|
||||||
|
|
||||||
## Volume Persistence
|
## Volume Persistence
|
||||||
|
|
||||||
|
### Ollama volumes
|
||||||
Mount `/root/.ollama` to persist downloaded models:
|
Mount `/root/.ollama` to persist downloaded models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -44,9 +56,16 @@ Mount `/root/.ollama` to persist downloaded models:
|
|||||||
|
|
||||||
Without this volume, the model will be re-downloaded on each container start (~5GB download).
|
Without this volume, the model will be re-downloaded on each container start (~5GB download).
|
||||||
|
|
||||||
|
### vLLM/HuggingFace volumes
|
||||||
|
Mount `/root/.cache/huggingface` for model caching:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
-v hf-cache:/root/.cache/huggingface
|
||||||
|
```
|
||||||
|
|
||||||
## API Endpoints
|
## API Endpoints
|
||||||
|
|
||||||
All endpoints follow the Ollama API specification:
|
### Ollama API (MiniCPM-V, Qwen3-VL)
|
||||||
|
|
||||||
| Endpoint | Method | Description |
|
| Endpoint | Method | Description |
|
||||||
|----------|--------|-------------|
|
|----------|--------|-------------|
|
||||||
@@ -56,113 +75,137 @@ All endpoints follow the Ollama API specification:
|
|||||||
| `/api/pull` | POST | Pull a model |
|
| `/api/pull` | POST | Pull a model |
|
||||||
| `/api/show` | POST | Show model info |
|
| `/api/show` | POST | Show model info |
|
||||||
|
|
||||||
## GPU Detection
|
### vLLM API (Nanonets-OCR)
|
||||||
|
|
||||||
The GPU variant uses Ollama's automatic GPU detection. For CPU-only mode, we set:
|
| Endpoint | Method | Description |
|
||||||
|
|----------|--------|-------------|
|
||||||
```dockerfile
|
| `/health` | GET | Health check |
|
||||||
ENV CUDA_VISIBLE_DEVICES=""
|
| `/v1/models` | GET | List available models |
|
||||||
```
|
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||||
|
|
||||||
This forces Ollama to use CPU inference even if GPU is available.
|
|
||||||
|
|
||||||
## Health Checks
|
## Health Checks
|
||||||
|
|
||||||
Both variants include Docker health checks:
|
All containers include Docker health checks:
|
||||||
|
|
||||||
```dockerfile
|
```dockerfile
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||||
```
|
```
|
||||||
|
|
||||||
CPU variant has longer `start-period` (120s) due to slower startup.
|
---
|
||||||
|
|
||||||
## PaddleOCR-VL (Recommended)
|
## Nanonets-OCR-s
|
||||||
|
|
||||||
### Overview
|
### Overview
|
||||||
|
|
||||||
PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized for document parsing. It replaces the older PP-Structure approach with native VLM understanding.
|
Nanonets-OCR-s is a Qwen2.5-VL-3B model fine-tuned specifically for document OCR tasks. It outputs structured markdown with semantic tags.
|
||||||
|
|
||||||
**Key advantages over PP-Structure:**
|
**Key features:**
|
||||||
- Native table understanding (no HTML parsing needed)
|
- Based on Qwen2.5-VL-3B (~4B parameters)
|
||||||
- 109 language support
|
- Fine-tuned for document OCR
|
||||||
- Better handling of complex multi-row tables
|
- Outputs markdown with semantic HTML tags
|
||||||
- Structured Markdown/JSON output
|
- ~10GB VRAM
|
||||||
|
|
||||||
### Docker Images
|
### Docker Images
|
||||||
|
|
||||||
| Tag | Description |
|
| Tag | Description |
|
||||||
|-----|-------------|
|
|-----|-------------|
|
||||||
| `paddleocr-vl` | GPU variant using vLLM (recommended) |
|
| `nanonets-ocr` | GPU variant using vLLM (OpenAI-compatible API) |
|
||||||
| `paddleocr-vl-cpu` | CPU variant using transformers |
|
|
||||||
|
|
||||||
### API Endpoints (OpenAI-compatible)
|
### API Endpoints (OpenAI-compatible via vLLM)
|
||||||
|
|
||||||
| Endpoint | Method | Description |
|
| Endpoint | Method | Description |
|
||||||
|----------|--------|-------------|
|
|----------|--------|-------------|
|
||||||
| `/health` | GET | Health check with model info |
|
| `/health` | GET | Health check |
|
||||||
| `/v1/models` | GET | List available models |
|
| `/v1/models` | GET | List available models |
|
||||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||||
| `/ocr` | POST | Legacy OCR endpoint |
|
|
||||||
|
|
||||||
### Request/Response Format
|
### Request/Response Format
|
||||||
|
|
||||||
**POST /v1/chat/completions (OpenAI-compatible)**
|
**POST /v1/chat/completions (OpenAI-compatible)**
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "paddleocr-vl",
|
"model": "nanonets/Nanonets-OCR-s",
|
||||||
"messages": [
|
"messages": [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
|
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
|
||||||
{"type": "text", "text": "Table Recognition:"}
|
{"type": "text", "text": "Extract the text from the above document..."}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
"max_tokens": 8192
|
"max_tokens": 4096
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Task Prompts:**
|
### Nanonets OCR Prompt
|
||||||
- `"OCR:"` - Text recognition
|
|
||||||
- `"Table Recognition:"` - Table extraction (returns markdown)
|
|
||||||
- `"Formula Recognition:"` - Formula extraction
|
|
||||||
- `"Chart Recognition:"` - Chart extraction
|
|
||||||
|
|
||||||
**Response**
|
The model is designed to work with a specific prompt format:
|
||||||
```json
|
```
|
||||||
{
|
Extract the text from the above document as if you were reading it naturally.
|
||||||
"id": "chatcmpl-...",
|
Return the tables in html format.
|
||||||
"object": "chat.completion",
|
Return the equations in LaTeX representation.
|
||||||
"choices": [
|
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||||
{
|
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||||
"index": 0,
|
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.
|
||||||
"message": {
|
|
||||||
"role": "assistant",
|
|
||||||
"content": "| Date | Description | Amount |\n|---|---|---|\n| 2021-06-01 | GITLAB INC | -119.96 |"
|
|
||||||
},
|
|
||||||
"finish_reason": "stop"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
| Variable | Default | Description |
|
|
||||||
|----------|---------|-------------|
|
|
||||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | Model to load |
|
|
||||||
| `HOST` | `0.0.0.0` | Server host |
|
|
||||||
| `PORT` | `8000` | Server port |
|
|
||||||
| `MAX_BATCHED_TOKENS` | `16384` | vLLM max batch tokens |
|
|
||||||
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
|
||||||
|
|
||||||
### Performance
|
### Performance
|
||||||
|
|
||||||
- **GPU (vLLM)**: ~2-5 seconds per page
|
- **GPU (vLLM)**: ~3-8 seconds per page
|
||||||
- **CPU**: ~30-60 seconds per page
|
- **VRAM usage**: ~10GB
|
||||||
|
|
||||||
|
### Two-Stage Pipeline (Nanonets + Qwen3)
|
||||||
|
|
||||||
|
The Nanonets tests use a two-stage pipeline:
|
||||||
|
1. **Stage 1**: Nanonets-OCR-s converts images to markdown (via vLLM on port 8000)
|
||||||
|
2. **Stage 2**: Qwen3 8B extracts structured JSON from markdown (via Ollama on port 11434)
|
||||||
|
|
||||||
|
**GPU Limitation**: Both vLLM and Ollama require significant GPU memory. On a single GPU system:
|
||||||
|
- Running both simultaneously causes memory contention
|
||||||
|
- For single GPU: Run services sequentially (stop Nanonets before Qwen3)
|
||||||
|
- For multi-GPU: Assign each service to a different GPU
|
||||||
|
|
||||||
|
**Sequential Execution**:
|
||||||
|
```bash
|
||||||
|
# Step 1: Run Nanonets OCR (converts to markdown)
|
||||||
|
docker start nanonets-test
|
||||||
|
# ... perform OCR ...
|
||||||
|
docker stop nanonets-test
|
||||||
|
|
||||||
|
# Step 2: Run Qwen3 extraction (from markdown)
|
||||||
|
docker start minicpm-test
|
||||||
|
# ... extract JSON ...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Multi-Pass Extraction Strategy
|
||||||
|
|
||||||
|
The bank statement extraction uses a dual-VLM consensus approach:
|
||||||
|
|
||||||
|
### Architecture: Dual-VLM Consensus
|
||||||
|
|
||||||
|
| VLM | Model | Purpose |
|
||||||
|
|-----|-------|---------|
|
||||||
|
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
||||||
|
| **Nanonets-OCR-s** | ~4B params | Document OCR with semantic output |
|
||||||
|
|
||||||
|
### Extraction Strategy
|
||||||
|
|
||||||
|
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
||||||
|
2. **Pass 2**: Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||||
|
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
||||||
|
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
||||||
|
|
||||||
|
### Why Dual-VLM Works
|
||||||
|
|
||||||
|
- **Different architectures**: Two independent models cross-check each other
|
||||||
|
- **Specialized strengths**: Nanonets-OCR-s optimized for document structure, MiniCPM-V for general vision
|
||||||
|
- **No structure loss**: Both VLMs see the original images directly
|
||||||
|
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -170,7 +213,7 @@ PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized fo
|
|||||||
|
|
||||||
To add a new model variant:
|
To add a new model variant:
|
||||||
|
|
||||||
1. Create `Dockerfile_<modelname>`
|
1. Create `Dockerfile_<modelname>_<runtime>_<hardware>_VRAM<size>`
|
||||||
2. Set `MODEL_NAME` environment variable
|
2. Set `MODEL_NAME` environment variable
|
||||||
3. Update `build-images.sh` with new build target
|
3. Update `build-images.sh` with new build target
|
||||||
4. Add documentation to `readme.md`
|
4. Add documentation to `readme.md`
|
||||||
@@ -188,8 +231,8 @@ The model download is ~5GB and may take several minutes.
|
|||||||
|
|
||||||
### Out of memory
|
### Out of memory
|
||||||
|
|
||||||
- GPU: Use int4 quantized version or add more VRAM
|
- GPU: Use a lighter model variant or upgrade VRAM
|
||||||
- CPU: Increase container memory limit: `--memory=16g`
|
- Add more GPU memory: Consider multi-GPU setup
|
||||||
|
|
||||||
### API not responding
|
### API not responding
|
||||||
|
|
||||||
@@ -207,41 +250,6 @@ npmci docker build
|
|||||||
npmci docker push code.foss.global
|
npmci docker push code.foss.global
|
||||||
```
|
```
|
||||||
|
|
||||||
## Multi-Pass Extraction Strategy
|
|
||||||
|
|
||||||
The bank statement extraction uses a dual-VLM consensus approach:
|
|
||||||
|
|
||||||
### Architecture: Dual-VLM Consensus
|
|
||||||
|
|
||||||
| VLM | Model | Purpose |
|
|
||||||
|-----|-------|---------|
|
|
||||||
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
|
||||||
| **PaddleOCR-VL** | 0.9B params | Table-specialized extraction |
|
|
||||||
|
|
||||||
### Extraction Strategy
|
|
||||||
|
|
||||||
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
|
||||||
2. **Pass 2**: PaddleOCR-VL table recognition (images → markdown → JSON)
|
|
||||||
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
|
||||||
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
|
||||||
|
|
||||||
### Why Dual-VLM Works
|
|
||||||
|
|
||||||
- **Different architectures**: Two independent models cross-check each other
|
|
||||||
- **Specialized strengths**: PaddleOCR-VL optimized for tables, MiniCPM-V for general vision
|
|
||||||
- **No structure loss**: Both VLMs see the original images directly
|
|
||||||
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
|
||||||
|
|
||||||
### Comparison vs Old PP-Structure Approach
|
|
||||||
|
|
||||||
| Approach | Bank Statement Result | Issue |
|
|
||||||
|----------|----------------------|-------|
|
|
||||||
| MiniCPM-V Visual | 28 transactions ✓ | - |
|
|
||||||
| PP-Structure HTML + Visual | 13 transactions ✗ | HTML merged rows incorrectly |
|
|
||||||
| PaddleOCR-VL Table | 28 transactions ✓ | Native table understanding |
|
|
||||||
|
|
||||||
**Key insight**: PP-Structure's HTML output loses structure for complex tables. PaddleOCR-VL's native VLM approach maintains table integrity.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Related Resources
|
## Related Resources
|
||||||
@@ -249,3 +257,4 @@ The bank statement extraction uses a dual-VLM consensus approach:
|
|||||||
- [Ollama Documentation](https://ollama.ai/docs)
|
- [Ollama Documentation](https://ollama.ai/docs)
|
||||||
- [MiniCPM-V GitHub](https://github.com/OpenBMB/MiniCPM-V)
|
- [MiniCPM-V GitHub](https://github.com/OpenBMB/MiniCPM-V)
|
||||||
- [Ollama API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
- [Ollama API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
||||||
|
- [Nanonets-OCR-s on HuggingFace](https://huggingface.co/nanonets/Nanonets-OCR-s)
|
||||||
|
|||||||
268
readme.md
268
readme.md
@@ -1,40 +1,52 @@
|
|||||||
# @host.today/ht-docker-ai 🚀
|
# @host.today/ht-docker-ai 🚀
|
||||||
|
|
||||||
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
|
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration—**no cloud API keys required**.
|
||||||
|
|
||||||
|
> 🔥 **Three VLMs, one registry.** From high-performance document OCR to GPT-4o-level vision understanding—pick the right tool for your task.
|
||||||
|
|
||||||
## Issue Reporting and Security
|
## Issue Reporting and Security
|
||||||
|
|
||||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 🎯 What's Included
|
## 🎯 What's Included
|
||||||
|
|
||||||
| Model | Parameters | Best For | API |
|
| Model | Parameters | Best For | API | Port | VRAM |
|
||||||
|-------|-----------|----------|-----|
|
|-------|-----------|----------|-----|------|------|
|
||||||
| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
|
| **MiniCPM-V 4.5** | 8B | General vision understanding, multi-image analysis | Ollama-compatible | 11434 | ~9GB |
|
||||||
| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
|
| **Nanonets-OCR2-3B** | ~3B | Document OCR with semantic markdown, LaTeX, flowcharts | OpenAI-compatible | 8000 | ~12-16GB |
|
||||||
|
| **Qwen3-VL-30B** | 30B (A3B) | Advanced visual agents, code generation from images | Ollama-compatible | 11434 | ~20GB |
|
||||||
|
|
||||||
## 📦 Available Images
|
---
|
||||||
|
|
||||||
|
## 📦 Quick Reference: All Available Images
|
||||||
|
|
||||||
```
|
```
|
||||||
code.foss.global/host.today/ht-docker-ai:<tag>
|
code.foss.global/host.today/ht-docker-ai:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
| Tag | Model | Hardware | Port |
|
| Tag | Model | Runtime | Port | VRAM |
|
||||||
|-----|-------|----------|------|
|
|-----|-------|---------|------|------|
|
||||||
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
|
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | Ollama | 11434 | ~9GB |
|
||||||
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
|
| `nanonets-ocr` | Nanonets-OCR2-3B | vLLM | 8000 | ~12-16GB |
|
||||||
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
|
| `qwen3vl` | Qwen3-VL-30B-A3B | Ollama | 11434 | ~20GB |
|
||||||
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🖼️ MiniCPM-V 4.5
|
## 🖼️ MiniCPM-V 4.5
|
||||||
|
|
||||||
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
|
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across **30+ languages**.
|
||||||
|
|
||||||
|
### ✨ Key Features
|
||||||
|
|
||||||
|
- 🌍 **Multilingual:** 30+ languages supported
|
||||||
|
- 🖼️ **Multi-image:** Analyze multiple images in one request
|
||||||
|
- 📊 **Versatile:** Charts, documents, photos, diagrams
|
||||||
|
- ⚡ **Efficient:** Runs on consumer GPUs (9GB VRAM)
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
**GPU (Recommended):**
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name minicpm \
|
--name minicpm \
|
||||||
@@ -44,15 +56,6 @@ docker run -d \
|
|||||||
code.foss.global/host.today/ht-docker-ai:minicpm45v
|
code.foss.global/host.today/ht-docker-ai:minicpm45v
|
||||||
```
|
```
|
||||||
|
|
||||||
**CPU Only:**
|
|
||||||
```bash
|
|
||||||
docker run -d \
|
|
||||||
--name minicpm \
|
|
||||||
-p 11434:11434 \
|
|
||||||
-v ollama-data:/root/.ollama \
|
|
||||||
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
|
|
||||||
```
|
|
||||||
|
|
||||||
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
|
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
|
||||||
|
|
||||||
### API Examples
|
### API Examples
|
||||||
@@ -85,110 +88,131 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
|
|
||||||
### Hardware Requirements
|
### Hardware Requirements
|
||||||
|
|
||||||
| Variant | VRAM/RAM | Notes |
|
| Mode | VRAM Required |
|
||||||
|---------|----------|-------|
|
|------|---------------|
|
||||||
| GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
|
| int4 quantized | ~9GB |
|
||||||
| GPU (full precision) | 18GB VRAM | Maximum quality |
|
| Full precision (bf16) | ~18GB |
|
||||||
| CPU (GGUF) | 8GB+ RAM | Slower but accessible |
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📄 PaddleOCR-VL
|
## 🔍 Nanonets-OCR2-3B
|
||||||
|
|
||||||
A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
|
The **latest Nanonets document OCR model** (October 2025 release)—based on Qwen2.5-VL-3B, fine-tuned specifically for document extraction with significant improvements over the original OCR-s.
|
||||||
|
|
||||||
|
### ✨ Key Features
|
||||||
|
|
||||||
|
- 📝 **Semantic output:** Tables → HTML, equations → LaTeX, flowcharts → structured markup
|
||||||
|
- 🌍 **Multilingual:** Inherits Qwen's broad language support
|
||||||
|
- 📄 **30K context:** Handle large, multi-page documents
|
||||||
|
- 🔌 **OpenAI-compatible:** Drop-in replacement for existing pipelines
|
||||||
|
- 🎯 **Improved accuracy:** Better semantic tagging and LaTeX equation extraction vs. OCR-s
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
**GPU:**
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name paddleocr \
|
--name nanonets \
|
||||||
--gpus all \
|
--gpus all \
|
||||||
-p 8000:8000 \
|
-p 8000:8000 \
|
||||||
-v hf-cache:/root/.cache/huggingface \
|
-v hf-cache:/root/.cache/huggingface \
|
||||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||||
```
|
```
|
||||||
|
|
||||||
**CPU:**
|
### API Usage
|
||||||
```bash
|
|
||||||
docker run -d \
|
|
||||||
--name paddleocr \
|
|
||||||
-p 8000:8000 \
|
|
||||||
-v hf-cache:/root/.cache/huggingface \
|
|
||||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
|
|
||||||
```
|
|
||||||
|
|
||||||
### OpenAI-Compatible API
|
|
||||||
|
|
||||||
PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl http://localhost:8000/v1/chat/completions \
|
curl http://localhost:8000/v1/chat/completions \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d '{
|
-d '{
|
||||||
"model": "paddleocr-vl",
|
"model": "nanonets/Nanonets-OCR2-3B",
|
||||||
"messages": [{
|
"messages": [{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
|
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
|
||||||
{"type": "text", "text": "Table Recognition:"}
|
{"type": "text", "text": "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation."}
|
||||||
]
|
]
|
||||||
}],
|
}],
|
||||||
"max_tokens": 8192
|
"temperature": 0.0,
|
||||||
|
"max_tokens": 4096
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
### Task Prompts
|
### Output Format
|
||||||
|
|
||||||
| Prompt | Output | Use Case |
|
Nanonets-OCR2-3B returns markdown with semantic tags:
|
||||||
|--------|--------|----------|
|
|
||||||
| `OCR:` | Plain text | General text extraction |
|
|
||||||
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
|
|
||||||
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
|
|
||||||
| `Chart Recognition:` | Description | Graphs and visualizations |
|
|
||||||
|
|
||||||
### API Endpoints
|
| Element | Output Format |
|
||||||
|
|---------|---------------|
|
||||||
|
| Tables | `<table>...</table>` (HTML) |
|
||||||
|
| Equations | `$...$` (LaTeX) |
|
||||||
|
| Images | `<img>description</img>` |
|
||||||
|
| Watermarks | `<watermark>OFFICIAL COPY</watermark>` |
|
||||||
|
| Page numbers | `<page_number>14</page_number>` |
|
||||||
|
| Flowcharts | Structured markup |
|
||||||
|
|
||||||
| Endpoint | Method | Description |
|
### Hardware Requirements
|
||||||
|----------|--------|-------------|
|
|
||||||
| `/health` | GET | Health check with model/device info |
|
|
||||||
| `/formats` | GET | Supported image formats and input methods |
|
|
||||||
| `/v1/models` | GET | List available models |
|
|
||||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
|
||||||
| `/ocr` | POST | Legacy OCR endpoint |
|
|
||||||
|
|
||||||
### Image Input Methods
|
| Config | VRAM |
|
||||||
|
|--------|------|
|
||||||
|
| 30K context (default) | ~12-16GB |
|
||||||
|
| Speed | ~3-8 seconds per page |
|
||||||
|
|
||||||
PaddleOCR-VL accepts images in multiple formats:
|
---
|
||||||
|
|
||||||
```javascript
|
## 🧠 Qwen3-VL-30B-A3B
|
||||||
// Base64 data URL
|
|
||||||
"data:image/png;base64,iVBORw0KGgo..."
|
|
||||||
|
|
||||||
// HTTP URL
|
The **most powerful** Qwen vision model—30B parameters with 3B active (MoE architecture). Handles complex visual reasoning, code generation from screenshots, and visual agent capabilities.
|
||||||
"https://example.com/document.png"
|
|
||||||
|
|
||||||
// Raw base64
|
### ✨ Key Features
|
||||||
"iVBORw0KGgo..."
|
|
||||||
|
- 🚀 **256K context** (expandable to 1M tokens!)
|
||||||
|
- 🤖 **Visual agent capabilities** — can plan and execute multi-step tasks
|
||||||
|
- 💻 **Code generation from images** — screenshot → working code
|
||||||
|
- 🎯 **State-of-the-art** visual reasoning
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d \
|
||||||
|
--name qwen3vl \
|
||||||
|
--gpus all \
|
||||||
|
-p 11434:11434 \
|
||||||
|
-v ollama-data:/root/.ollama \
|
||||||
|
code.foss.global/host.today/ht-docker-ai:qwen3vl
|
||||||
```
|
```
|
||||||
|
|
||||||
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
|
Then pull the model (one-time, ~20GB):
|
||||||
|
```bash
|
||||||
|
docker exec qwen3vl ollama pull qwen3-vl:30b-a3b
|
||||||
|
```
|
||||||
|
|
||||||
**Optimal resolution:** 1080p–2K. Images are automatically scaled for best results.
|
### API Usage
|
||||||
|
|
||||||
### Performance
|
```bash
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "qwen3-vl:30b-a3b",
|
||||||
|
"messages": [{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Analyze this screenshot and write the code to recreate this UI",
|
||||||
|
"images": ["<base64-encoded-image>"]
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
| Mode | Speed per Page |
|
### Hardware Requirements
|
||||||
|------|----------------|
|
|
||||||
| GPU (CUDA) | 2–5 seconds |
|
| Requirement | Value |
|
||||||
| CPU | 30–60 seconds |
|
|-------------|-------|
|
||||||
|
| VRAM | ~20GB (Q4_K_M quantization) |
|
||||||
|
| Context | 256K tokens default |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🐳 Docker Compose
|
## 🐳 Docker Compose
|
||||||
|
|
||||||
|
Run multiple VLMs together for maximum flexibility:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3.8'
|
|
||||||
services:
|
services:
|
||||||
# General vision tasks
|
# General vision tasks
|
||||||
minicpm:
|
minicpm:
|
||||||
@@ -206,9 +230,9 @@ services:
|
|||||||
capabilities: [gpu]
|
capabilities: [gpu]
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
# Document parsing / OCR
|
# Document OCR with semantic output
|
||||||
paddleocr:
|
nanonets:
|
||||||
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
image: code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||||
ports:
|
ports:
|
||||||
- "8000:8000"
|
- "8000:8000"
|
||||||
volumes:
|
volumes:
|
||||||
@@ -231,7 +255,7 @@ volumes:
|
|||||||
|
|
||||||
## ⚙️ Environment Variables
|
## ⚙️ Environment Variables
|
||||||
|
|
||||||
### MiniCPM-V 4.5
|
### MiniCPM-V 4.5 & Qwen3-VL (Ollama-based)
|
||||||
|
|
||||||
| Variable | Default | Description |
|
| Variable | Default | Description |
|
||||||
|----------|---------|-------------|
|
|----------|---------|-------------|
|
||||||
@@ -239,13 +263,47 @@ volumes:
|
|||||||
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
|
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
|
||||||
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
|
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
|
||||||
|
|
||||||
### PaddleOCR-VL
|
### Nanonets-OCR (vLLM-based)
|
||||||
|
|
||||||
| Variable | Default | Description |
|
| Variable | Default | Description |
|
||||||
|----------|---------|-------------|
|
|----------|---------|-------------|
|
||||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
|
| `MODEL_NAME` | `nanonets/Nanonets-OCR2-3B` | HuggingFace model ID |
|
||||||
| `SERVER_HOST` | `0.0.0.0` | API bind address |
|
| `HOST` | `0.0.0.0` | API bind address |
|
||||||
| `SERVER_PORT` | `8000` | API port |
|
| `PORT` | `8000` | API port |
|
||||||
|
| `MAX_MODEL_LEN` | `30000` | Maximum sequence length |
|
||||||
|
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🏗️ Architecture Notes
|
||||||
|
|
||||||
|
### Dual-VLM Consensus Strategy
|
||||||
|
|
||||||
|
For production document extraction, consider using multiple models together:
|
||||||
|
|
||||||
|
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
||||||
|
2. **Pass 2:** Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||||
|
3. **Consensus:** If results match → Done (fast path)
|
||||||
|
4. **Pass 3+:** Additional visual passes if needed
|
||||||
|
|
||||||
|
This dual-VLM approach catches extraction errors that single models miss.
|
||||||
|
|
||||||
|
### Why Multi-Model Works
|
||||||
|
|
||||||
|
- **Different architectures:** Independent models cross-validate each other
|
||||||
|
- **Specialized strengths:** Nanonets-OCR2-3B excels at document structure; MiniCPM-V handles general vision
|
||||||
|
- **Native processing:** All VLMs see original images—no intermediate structure loss
|
||||||
|
|
||||||
|
### Model Selection Guide
|
||||||
|
|
||||||
|
| Task | Recommended Model |
|
||||||
|
|------|-------------------|
|
||||||
|
| General image understanding | MiniCPM-V 4.5 |
|
||||||
|
| Document OCR with structure preservation | Nanonets-OCR2-3B |
|
||||||
|
| Complex visual reasoning / code generation | Qwen3-VL-30B |
|
||||||
|
| Multi-image analysis | MiniCPM-V 4.5 |
|
||||||
|
| Visual agent tasks | Qwen3-VL-30B |
|
||||||
|
| Large documents (30K+ tokens) | Nanonets-OCR2-3B |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -260,42 +318,21 @@ cd ht-docker-ai
|
|||||||
./build-images.sh
|
./build-images.sh
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
./test-images.sh
|
pnpm test
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🏗️ Architecture Notes
|
|
||||||
|
|
||||||
### Dual-VLM Consensus Strategy
|
|
||||||
|
|
||||||
For production document extraction, consider using both models together:
|
|
||||||
|
|
||||||
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
|
||||||
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
|
|
||||||
3. **Consensus:** If results match → Done (fast path)
|
|
||||||
4. **Pass 3+:** Additional visual passes if needed
|
|
||||||
|
|
||||||
This dual-VLM approach catches extraction errors that single models miss.
|
|
||||||
|
|
||||||
### Why This Works
|
|
||||||
|
|
||||||
- **Different architectures:** Two independent models cross-validate each other
|
|
||||||
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
|
|
||||||
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔍 Troubleshooting
|
## 🔍 Troubleshooting
|
||||||
|
|
||||||
### Model download hangs
|
### Model download hangs
|
||||||
```bash
|
```bash
|
||||||
docker logs -f <container-name>
|
docker logs -f <container-name>
|
||||||
```
|
```
|
||||||
Model downloads can take several minutes (~5GB for MiniCPM-V).
|
Model downloads can take several minutes (~5GB for MiniCPM-V, ~20GB for Qwen3-VL).
|
||||||
|
|
||||||
### Out of memory
|
### Out of memory
|
||||||
- **GPU:** Use the CPU variant or upgrade VRAM
|
- **GPU:** Use a lighter model variant or upgrade VRAM
|
||||||
- **CPU:** Increase container memory: `--memory=16g`
|
- **CPU:** Increase container memory: `--memory=16g`
|
||||||
|
|
||||||
### API not responding
|
### API not responding
|
||||||
@@ -315,6 +352,13 @@ sudo nvidia-ctk runtime configure --runtime=docker
|
|||||||
sudo systemctl restart docker
|
sudo systemctl restart docker
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### GPU Memory Contention (Multi-Model)
|
||||||
|
|
||||||
|
When running multiple VLMs on a single GPU:
|
||||||
|
- vLLM and Ollama both need significant GPU memory
|
||||||
|
- **Single GPU:** Run services sequentially (stop one before starting another)
|
||||||
|
- **Multi-GPU:** Assign each service to a different GPU via `CUDA_VISIBLE_DEVICES`
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|||||||
@@ -2,11 +2,8 @@ import { execSync } from 'child_process';
|
|||||||
|
|
||||||
// Project container names (only manage these)
|
// Project container names (only manage these)
|
||||||
const PROJECT_CONTAINERS = [
|
const PROJECT_CONTAINERS = [
|
||||||
'paddleocr-vl-test',
|
|
||||||
'paddleocr-vl-gpu-test',
|
|
||||||
'paddleocr-vl-cpu-test',
|
|
||||||
'paddleocr-vl-full-test',
|
|
||||||
'minicpm-test',
|
'minicpm-test',
|
||||||
|
'nanonets-test',
|
||||||
];
|
];
|
||||||
|
|
||||||
// Image configurations
|
// Image configurations
|
||||||
@@ -23,30 +20,6 @@ export interface IImageConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const IMAGES = {
|
export const IMAGES = {
|
||||||
paddleocrVlGpu: {
|
|
||||||
name: 'paddleocr-vl-gpu',
|
|
||||||
dockerfile: 'Dockerfile_paddleocr_vl_gpu',
|
|
||||||
buildContext: '.',
|
|
||||||
containerName: 'paddleocr-vl-test',
|
|
||||||
ports: ['8000:8000'],
|
|
||||||
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
|
||||||
gpus: true,
|
|
||||||
healthEndpoint: 'http://localhost:8000/health',
|
|
||||||
healthTimeout: 300000, // 5 minutes for model loading
|
|
||||||
} as IImageConfig,
|
|
||||||
|
|
||||||
paddleocrVlCpu: {
|
|
||||||
name: 'paddleocr-vl-cpu',
|
|
||||||
dockerfile: 'Dockerfile_paddleocr_vl_cpu',
|
|
||||||
buildContext: '.',
|
|
||||||
containerName: 'paddleocr-vl-test',
|
|
||||||
ports: ['8000:8000'],
|
|
||||||
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
|
||||||
gpus: false,
|
|
||||||
healthEndpoint: 'http://localhost:8000/health',
|
|
||||||
healthTimeout: 300000,
|
|
||||||
} as IImageConfig,
|
|
||||||
|
|
||||||
minicpm: {
|
minicpm: {
|
||||||
name: 'minicpm45v',
|
name: 'minicpm45v',
|
||||||
dockerfile: 'Dockerfile_minicpm45v_gpu',
|
dockerfile: 'Dockerfile_minicpm45v_gpu',
|
||||||
@@ -59,20 +32,17 @@ export const IMAGES = {
|
|||||||
healthTimeout: 120000,
|
healthTimeout: 120000,
|
||||||
} as IImageConfig,
|
} as IImageConfig,
|
||||||
|
|
||||||
// Full PaddleOCR-VL pipeline with PP-DocLayoutV2 + structured JSON output
|
// Nanonets-OCR2-3B - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned, Oct 2025)
|
||||||
paddleocrVlFull: {
|
nanonetsOcr: {
|
||||||
name: 'paddleocr-vl-full',
|
name: 'nanonets-ocr',
|
||||||
dockerfile: 'Dockerfile_paddleocr_vl_full',
|
dockerfile: 'Dockerfile_nanonets_vllm_gpu_VRAM10GB',
|
||||||
buildContext: '.',
|
buildContext: '.',
|
||||||
containerName: 'paddleocr-vl-full-test',
|
containerName: 'nanonets-test',
|
||||||
ports: ['8000:8000'],
|
ports: ['8000:8000'],
|
||||||
volumes: [
|
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
||||||
'ht-huggingface-cache:/root/.cache/huggingface',
|
|
||||||
'ht-paddleocr-cache:/root/.paddleocr',
|
|
||||||
],
|
|
||||||
gpus: true,
|
gpus: true,
|
||||||
healthEndpoint: 'http://localhost:8000/health',
|
healthEndpoint: 'http://localhost:8000/health',
|
||||||
healthTimeout: 600000, // 10 minutes for model loading (vLLM + PP-DocLayoutV2)
|
healthTimeout: 300000, // 5 minutes for model loading
|
||||||
} as IImageConfig,
|
} as IImageConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -126,7 +96,7 @@ export function removeContainer(containerName: string): void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop all project containers that conflict with the required one
|
* Stop all project containers that conflict with the required one (port-based)
|
||||||
*/
|
*/
|
||||||
export function stopConflictingContainers(requiredContainer: string, requiredPort: string): void {
|
export function stopConflictingContainers(requiredContainer: string, requiredPort: string): void {
|
||||||
// Stop project containers using the same port
|
// Stop project containers using the same port
|
||||||
@@ -144,6 +114,24 @@ export function stopConflictingContainers(requiredContainer: string, requiredPor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop all GPU-consuming project containers (for GPU memory management)
|
||||||
|
* This ensures GPU memory is freed before starting a new GPU service
|
||||||
|
*/
|
||||||
|
export function stopAllGpuContainers(exceptContainer?: string): void {
|
||||||
|
for (const container of PROJECT_CONTAINERS) {
|
||||||
|
if (container === exceptContainer) continue;
|
||||||
|
|
||||||
|
if (isContainerRunning(container)) {
|
||||||
|
console.log(`[Docker] Stopping GPU container: ${container}`);
|
||||||
|
exec(`docker stop ${container}`, true);
|
||||||
|
// Give the GPU a moment to free memory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Brief pause to allow GPU memory to be released
|
||||||
|
execSync('sleep 2');
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build a Docker image
|
* Build a Docker image
|
||||||
*/
|
*/
|
||||||
@@ -220,6 +208,11 @@ export async function ensureService(config: IImageConfig): Promise<boolean> {
|
|||||||
buildImage(config);
|
buildImage(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For GPU services, stop ALL other GPU containers to free GPU memory
|
||||||
|
if (config.gpus) {
|
||||||
|
stopAllGpuContainers(config.containerName);
|
||||||
|
}
|
||||||
|
|
||||||
// Stop conflicting containers on the same port
|
// Stop conflicting containers on the same port
|
||||||
const mainPort = config.ports[0];
|
const mainPort = config.ports[0];
|
||||||
stopConflictingContainers(config.containerName, mainPort);
|
stopConflictingContainers(config.containerName, mainPort);
|
||||||
@@ -240,21 +233,7 @@ export async function ensureService(config: IImageConfig): Promise<boolean> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensure PaddleOCR-VL GPU service is running
|
* Ensure MiniCPM service is running (Ollama with GPU)
|
||||||
*/
|
|
||||||
export async function ensurePaddleOcrVlGpu(): Promise<boolean> {
|
|
||||||
return ensureService(IMAGES.paddleocrVlGpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Ensure PaddleOCR-VL CPU service is running
|
|
||||||
*/
|
|
||||||
export async function ensurePaddleOcrVlCpu(): Promise<boolean> {
|
|
||||||
return ensureService(IMAGES.paddleocrVlCpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Ensure MiniCPM service is running
|
|
||||||
*/
|
*/
|
||||||
export async function ensureMiniCpm(): Promise<boolean> {
|
export async function ensureMiniCpm(): Promise<boolean> {
|
||||||
return ensureService(IMAGES.minicpm);
|
return ensureService(IMAGES.minicpm);
|
||||||
@@ -272,30 +251,6 @@ export function isGpuAvailable(): boolean {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Ensure PaddleOCR-VL service (auto-detect GPU/CPU)
|
|
||||||
*/
|
|
||||||
export async function ensurePaddleOcrVl(): Promise<boolean> {
|
|
||||||
if (isGpuAvailable()) {
|
|
||||||
console.log('[Docker] GPU detected, using GPU image');
|
|
||||||
return ensurePaddleOcrVlGpu();
|
|
||||||
} else {
|
|
||||||
console.log('[Docker] No GPU detected, using CPU image');
|
|
||||||
return ensurePaddleOcrVlCpu();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Ensure PaddleOCR-VL Full Pipeline service (PP-DocLayoutV2 + structured output)
|
|
||||||
* This is the recommended service for production use - outputs structured JSON/Markdown
|
|
||||||
*/
|
|
||||||
export async function ensurePaddleOcrVlFull(): Promise<boolean> {
|
|
||||||
if (!isGpuAvailable()) {
|
|
||||||
console.log('[Docker] WARNING: Full pipeline requires GPU, but none detected');
|
|
||||||
}
|
|
||||||
return ensureService(IMAGES.paddleocrVlFull);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensure an Ollama model is pulled and available
|
* Ensure an Ollama model is pulled and available
|
||||||
* Uses the MiniCPM container (which runs Ollama) to pull the model
|
* Uses the MiniCPM container (which runs Ollama) to pull the model
|
||||||
@@ -383,3 +338,14 @@ export async function ensureQwen3Vl(): Promise<boolean> {
|
|||||||
// Then ensure Qwen3-VL 8B is pulled
|
// Then ensure Qwen3-VL 8B is pulled
|
||||||
return ensureOllamaModel('qwen3-vl:8b');
|
return ensureOllamaModel('qwen3-vl:8b');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure Nanonets-OCR2-3B service is running (via vLLM)
|
||||||
|
* Document OCR optimized VLM based on Qwen2.5-VL-3B (Oct 2025 release)
|
||||||
|
*/
|
||||||
|
export async function ensureNanonetsOcr(): Promise<boolean> {
|
||||||
|
if (!isGpuAvailable()) {
|
||||||
|
console.log('[Docker] WARNING: Nanonets-OCR2-3B requires GPU, but none detected');
|
||||||
|
}
|
||||||
|
return ensureService(IMAGES.nanonetsOcr);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* Bank statement extraction using MiniCPM-V (visual extraction)
|
* Bank statement extraction using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||||
*
|
*
|
||||||
* JSON per-page approach:
|
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||||
* 1. Ask for structured JSON of all transactions per page
|
* 1. Process each page with the orchestrator
|
||||||
* 2. Consensus: extract twice, compare, retry if mismatch
|
* 2. Driver extracts transactions and validates JSON before completing
|
||||||
|
* 3. Streaming output during extraction
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -11,6 +12,8 @@ import * as path from 'path';
|
|||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
@@ -21,21 +24,9 @@ interface ITransaction {
|
|||||||
amount: number;
|
amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
const JSON_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
// SmartAi instance and orchestrator (initialized in setup)
|
||||||
|
let smartAi: SmartAi;
|
||||||
IMPORTANT RULES:
|
let orchestrator: DualAgentOrchestrator;
|
||||||
1. Each transaction has: date, description/counterparty, and an amount
|
|
||||||
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
|
||||||
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
|
||||||
4. Date format: YYYY-MM-DD
|
|
||||||
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
|
||||||
6. Only include actual transactions with a specific date and amount
|
|
||||||
|
|
||||||
Return ONLY this JSON format, no explanation:
|
|
||||||
[
|
|
||||||
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
|
||||||
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
|
||||||
]`;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
@@ -65,206 +56,31 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
const EXTRACTION_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
||||||
* Query for JSON extraction
|
|
||||||
*/
|
|
||||||
async function queryJson(image: string, queryId: string): Promise<string> {
|
|
||||||
console.log(` [${queryId}] Sending request to ${MODEL}...`);
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
IMPORTANT RULES:
|
||||||
method: 'POST',
|
1. Each transaction has: date, counterparty (description), and an amount
|
||||||
headers: { 'Content-Type': 'application/json' },
|
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
||||||
body: JSON.stringify({
|
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
||||||
model: MODEL,
|
4. Date format: YYYY-MM-DD
|
||||||
messages: [{
|
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
||||||
role: 'user',
|
6. Only include actual transactions with a specific date and amount
|
||||||
content: JSON_PROMPT,
|
|
||||||
images: [image],
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 4000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
Before completing, validate your JSON output:
|
||||||
|
|
||||||
if (!response.ok) {
|
<tool_call>
|
||||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
<tool>json</tool>
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
<action>validate</action>
|
||||||
}
|
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
const data = await response.json();
|
Output format (must be a valid JSON array):
|
||||||
const content = (data.message?.content || '').trim();
|
[
|
||||||
console.log(` [${queryId}] Response received (${elapsed}s, ${content.length} chars)`);
|
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
||||||
return content;
|
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
||||||
}
|
]
|
||||||
|
|
||||||
/**
|
Only complete after validation passes. Output the final JSON array in <task_complete> tags.`;
|
||||||
* Sanitize JSON string - fix common issues from vision model output
|
|
||||||
*/
|
|
||||||
function sanitizeJson(jsonStr: string): string {
|
|
||||||
let s = jsonStr;
|
|
||||||
|
|
||||||
// Fix +number (e.g., +93.80 -> 93.80) - JSON doesn't allow + prefix
|
|
||||||
// Handle various whitespace patterns
|
|
||||||
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
|
|
||||||
s = s.replace(/:\s*\+(\d)/g, ': $1');
|
|
||||||
|
|
||||||
// Fix European number format with thousands separator (e.g., 1.000.00 -> 1000.00)
|
|
||||||
// Pattern: "amount": X.XXX.XX where X.XXX is thousands and .XX is decimal
|
|
||||||
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
|
|
||||||
// Also handle larger numbers like 10.000.00
|
|
||||||
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3$4.$5');
|
|
||||||
|
|
||||||
// Fix trailing commas before ] or }
|
|
||||||
s = s.replace(/,\s*([}\]])/g, '$1');
|
|
||||||
|
|
||||||
// Fix unescaped newlines inside strings (replace with space)
|
|
||||||
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
|
|
||||||
|
|
||||||
// Fix unescaped tabs inside strings
|
|
||||||
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
|
|
||||||
|
|
||||||
// Fix unescaped backslashes (but not already escaped ones)
|
|
||||||
s = s.replace(/\\(?!["\\/bfnrtu])/g, '\\\\');
|
|
||||||
|
|
||||||
// Fix common issues with counterparty names containing special chars
|
|
||||||
s = s.replace(/"counterparty":\s*"([^"]*)'([^"]*)"/g, '"counterparty": "$1$2"');
|
|
||||||
|
|
||||||
// Remove control characters except newlines (which we handle above)
|
|
||||||
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
|
|
||||||
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse JSON response into transactions
|
|
||||||
*/
|
|
||||||
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
|
|
||||||
console.log(` [${queryId}] Parsing response...`);
|
|
||||||
|
|
||||||
// Try to find JSON in markdown code block
|
|
||||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
||||||
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
|
||||||
|
|
||||||
if (codeBlockMatch) {
|
|
||||||
console.log(` [${queryId}] Found JSON in code block`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize JSON (fix +number issue)
|
|
||||||
jsonStr = sanitizeJson(jsonStr);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(jsonStr);
|
|
||||||
if (Array.isArray(parsed)) {
|
|
||||||
const txs = parsed.map(tx => ({
|
|
||||||
date: String(tx.date || ''),
|
|
||||||
counterparty: String(tx.counterparty || tx.description || ''),
|
|
||||||
amount: parseAmount(tx.amount),
|
|
||||||
}));
|
|
||||||
console.log(` [${queryId}] Parsed ${txs.length} transactions (direct)`);
|
|
||||||
return txs;
|
|
||||||
}
|
|
||||||
console.log(` [${queryId}] Parsed JSON is not an array`);
|
|
||||||
} catch (e) {
|
|
||||||
const errMsg = (e as Error).message;
|
|
||||||
console.log(` [${queryId}] Direct parse failed: ${errMsg}`);
|
|
||||||
|
|
||||||
// Log problematic section with context
|
|
||||||
const posMatch = errMsg.match(/position (\d+)/);
|
|
||||||
if (posMatch) {
|
|
||||||
const pos = parseInt(posMatch[1]);
|
|
||||||
const start = Math.max(0, pos - 40);
|
|
||||||
const end = Math.min(jsonStr.length, pos + 40);
|
|
||||||
const context = jsonStr.substring(start, end);
|
|
||||||
const marker = ' '.repeat(pos - start) + '^';
|
|
||||||
console.log(` [${queryId}] Context around error position ${pos}:`);
|
|
||||||
console.log(` [${queryId}] ...${context}...`);
|
|
||||||
console.log(` [${queryId}] ${marker}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find JSON array pattern
|
|
||||||
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
|
|
||||||
if (arrayMatch) {
|
|
||||||
console.log(` [${queryId}] Found array pattern, trying to parse...`);
|
|
||||||
const sanitizedArray = sanitizeJson(arrayMatch[0]);
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(sanitizedArray);
|
|
||||||
if (Array.isArray(parsed)) {
|
|
||||||
const txs = parsed.map(tx => ({
|
|
||||||
date: String(tx.date || ''),
|
|
||||||
counterparty: String(tx.counterparty || tx.description || ''),
|
|
||||||
amount: parseAmount(tx.amount),
|
|
||||||
}));
|
|
||||||
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
|
|
||||||
return txs;
|
|
||||||
}
|
|
||||||
} catch (e2) {
|
|
||||||
const errMsg2 = (e2 as Error).message;
|
|
||||||
console.log(` [${queryId}] Array parse failed: ${errMsg2}`);
|
|
||||||
const posMatch2 = errMsg2.match(/position (\d+)/);
|
|
||||||
if (posMatch2) {
|
|
||||||
const pos2 = parseInt(posMatch2[1]);
|
|
||||||
console.log(` [${queryId}] Context around error: ...${sanitizedArray.substring(Math.max(0, pos2 - 30), pos2 + 30)}...`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to extract individual objects from the malformed array
|
|
||||||
console.log(` [${queryId}] Attempting object-by-object extraction...`);
|
|
||||||
const extracted = extractTransactionsFromMalformedJson(sanitizedArray, queryId);
|
|
||||||
if (extracted.length > 0) {
|
|
||||||
console.log(` [${queryId}] Recovered ${extracted.length} transactions via object extraction`);
|
|
||||||
return extracted;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.log(` [${queryId}] No array pattern found in response`);
|
|
||||||
console.log(` [${queryId}] Raw response preview: ${response.substring(0, 200)}...`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(` [${queryId}] PARSE FAILED - returning empty array`);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract transactions from malformed JSON by parsing objects individually
|
|
||||||
*/
|
|
||||||
function extractTransactionsFromMalformedJson(jsonStr: string, queryId: string): ITransaction[] {
|
|
||||||
const transactions: ITransaction[] = [];
|
|
||||||
|
|
||||||
// Match individual transaction objects
|
|
||||||
const objectPattern = /\{\s*"date"\s*:\s*"([^"]+)"\s*,\s*"counterparty"\s*:\s*"([^"]+)"\s*,\s*"amount"\s*:\s*([+-]?\d+\.?\d*)\s*\}/g;
|
|
||||||
let match;
|
|
||||||
|
|
||||||
while ((match = objectPattern.exec(jsonStr)) !== null) {
|
|
||||||
transactions.push({
|
|
||||||
date: match[1],
|
|
||||||
counterparty: match[2],
|
|
||||||
amount: parseFloat(match[3]),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also try with different field orders (amount before counterparty, etc.)
|
|
||||||
if (transactions.length === 0) {
|
|
||||||
const altPattern = /\{\s*"date"\s*:\s*"([^"]+)"[^}]*"amount"\s*:\s*([+-]?\d+\.?\d*)[^}]*\}/g;
|
|
||||||
while ((match = altPattern.exec(jsonStr)) !== null) {
|
|
||||||
// Try to extract counterparty from the match
|
|
||||||
const counterpartyMatch = match[0].match(/"counterparty"\s*:\s*"([^"]+)"/);
|
|
||||||
const descMatch = match[0].match(/"description"\s*:\s*"([^"]+)"/);
|
|
||||||
transactions.push({
|
|
||||||
date: match[1],
|
|
||||||
counterparty: counterpartyMatch?.[1] || descMatch?.[1] || 'UNKNOWN',
|
|
||||||
amount: parseFloat(match[2]),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return transactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse amount from various formats
|
* Parse amount from various formats
|
||||||
@@ -284,102 +100,101 @@ function parseAmount(value: unknown): number {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare two transaction arrays for consensus
|
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||||
*/
|
*/
|
||||||
function transactionArraysMatch(a: ITransaction[], b: ITransaction[]): boolean {
|
function extractJsonFromResponse(response: string): unknown[] | null {
|
||||||
if (a.length !== b.length) return false;
|
// Try to find JSON in task_complete tags
|
||||||
|
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
for (let i = 0; i < a.length; i++) {
|
if (completeMatch) {
|
||||||
const dateMatch = a[i].date === b[i].date;
|
const content = completeMatch[1].trim();
|
||||||
const amountMatch = Math.abs(a[i].amount - b[i].amount) < 0.01;
|
// Try to find JSON in the content
|
||||||
if (!dateMatch || !amountMatch) return false;
|
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
}
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||||
|
try {
|
||||||
return true;
|
const parsed = JSON.parse(jsonStr);
|
||||||
}
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
/**
|
// Try to find JSON array pattern
|
||||||
* Compare two transaction arrays and log differences
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
*/
|
if (jsonMatch) {
|
||||||
function compareAndLogDifferences(txs1: ITransaction[], txs2: ITransaction[], pageNum: number): void {
|
try {
|
||||||
if (txs1.length !== txs2.length) {
|
const parsed = JSON.parse(jsonMatch[0]);
|
||||||
console.log(` [Page ${pageNum}] Length mismatch: Q1=${txs1.length}, Q2=${txs2.length}`);
|
if (Array.isArray(parsed)) return parsed;
|
||||||
return;
|
} catch {
|
||||||
}
|
return null;
|
||||||
|
}
|
||||||
for (let i = 0; i < txs1.length; i++) {
|
}
|
||||||
const dateMatch = txs1[i].date === txs2[i].date;
|
|
||||||
const amountMatch = Math.abs(txs1[i].amount - txs2[i].amount) < 0.01;
|
|
||||||
|
|
||||||
if (!dateMatch || !amountMatch) {
|
|
||||||
console.log(` [Page ${pageNum}] Tx ${i + 1} differs:`);
|
|
||||||
console.log(` Q1: ${txs1[i].date} | ${txs1[i].amount}`);
|
|
||||||
console.log(` Q2: ${txs2[i].date} | ${txs2[i].amount}`);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to find JSON in markdown code block
|
||||||
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON array pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonMatch[0]);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract transactions from a single page with consensus
|
* Parse JSON response into transactions
|
||||||
|
*/
|
||||||
|
function parseJsonToTransactions(response: string): ITransaction[] {
|
||||||
|
const parsed = extractJsonFromResponse(response);
|
||||||
|
if (!parsed || !Array.isArray(parsed)) return [];
|
||||||
|
|
||||||
|
return parsed.map((tx: any) => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions from a single page using smartagent orchestrator
|
||||||
*/
|
*/
|
||||||
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||||
const MAX_ATTEMPTS = 5;
|
|
||||||
console.log(`\n ======== Page ${pageNum} ========`);
|
console.log(`\n ======== Page ${pageNum} ========`);
|
||||||
console.log(` [Page ${pageNum}] Starting JSON extraction...`);
|
|
||||||
|
|
||||||
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
|
const startTime = Date.now();
|
||||||
console.log(`\n [Page ${pageNum}] --- Attempt ${attempt}/${MAX_ATTEMPTS} ---`);
|
|
||||||
|
|
||||||
// Extract twice in parallel
|
const result = await orchestrator.run(EXTRACTION_PROMPT, { images: [image] });
|
||||||
const q1Id = `P${pageNum}A${attempt}Q1`;
|
|
||||||
const q2Id = `P${pageNum}A${attempt}Q2`;
|
|
||||||
|
|
||||||
const [response1, response2] = await Promise.all([
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
queryJson(image, q1Id),
|
console.log(`\n [Page ${pageNum}] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||||
queryJson(image, q2Id),
|
|
||||||
]);
|
|
||||||
|
|
||||||
const txs1 = parseJsonResponse(response1, q1Id);
|
const transactions = parseJsonToTransactions(result.result);
|
||||||
const txs2 = parseJsonResponse(response2, q2Id);
|
|
||||||
|
|
||||||
console.log(` [Page ${pageNum}] Results: Q1=${txs1.length} txs, Q2=${txs2.length} txs`);
|
console.log(` [Page ${pageNum}] Extracted ${transactions.length} transactions:`);
|
||||||
|
for (let i = 0; i < Math.min(transactions.length, 10); i++) {
|
||||||
if (txs1.length > 0 && transactionArraysMatch(txs1, txs2)) {
|
const tx = transactions[i];
|
||||||
console.log(` [Page ${pageNum}] ✓ CONSENSUS REACHED: ${txs1.length} transactions`);
|
|
||||||
console.log(` [Page ${pageNum}] Transactions:`);
|
|
||||||
for (let i = 0; i < txs1.length; i++) {
|
|
||||||
const tx = txs1[i];
|
|
||||||
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
|
||||||
}
|
|
||||||
return txs1;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(` [Page ${pageNum}] ✗ NO CONSENSUS`);
|
|
||||||
compareAndLogDifferences(txs1, txs2, pageNum);
|
|
||||||
|
|
||||||
if (attempt < MAX_ATTEMPTS) {
|
|
||||||
console.log(` [Page ${pageNum}] Retrying...`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: use last response
|
|
||||||
console.log(`\n [Page ${pageNum}] === FALLBACK (no consensus after ${MAX_ATTEMPTS} attempts) ===`);
|
|
||||||
const fallbackId = `P${pageNum}FALLBACK`;
|
|
||||||
const fallbackResponse = await queryJson(image, fallbackId);
|
|
||||||
const fallback = parseJsonResponse(fallbackResponse, fallbackId);
|
|
||||||
console.log(` [Page ${pageNum}] ~ FALLBACK RESULT: ${fallback.length} transactions`);
|
|
||||||
for (let i = 0; i < fallback.length; i++) {
|
|
||||||
const tx = fallback[i];
|
|
||||||
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
}
|
}
|
||||||
return fallback;
|
if (transactions.length > 10) {
|
||||||
|
console.log(` ... and ${transactions.length - 10} more transactions`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return transactions;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract all transactions from bank statement
|
* Extract all transactions from bank statement
|
||||||
*/
|
*/
|
||||||
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (JSON consensus)`);
|
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||||
|
|
||||||
const allTransactions: ITransaction[] = [];
|
const allTransactions: ITransaction[] = [];
|
||||||
|
|
||||||
@@ -474,6 +289,80 @@ tap.test('setup: ensure Docker containers are running', async () => {
|
|||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||||
|
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||||
|
|
||||||
|
smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768,
|
||||||
|
num_predict: 4000,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `You are a Guardian agent overseeing bank statement extraction tasks.
|
||||||
|
|
||||||
|
APPROVE all tool calls that:
|
||||||
|
- Use the json.validate action to verify JSON output
|
||||||
|
- Are reasonable attempts to complete the extraction task
|
||||||
|
|
||||||
|
REJECT tool calls that:
|
||||||
|
- Attempt to access external resources
|
||||||
|
- Try to execute arbitrary code
|
||||||
|
- Are clearly unrelated to bank statement extraction`,
|
||||||
|
driverSystemMessage: `You are an AI assistant that extracts bank transactions from statement images.
|
||||||
|
|
||||||
|
Your task is to analyze bank statement images and extract transaction data.
|
||||||
|
You have access to a json.validate tool to verify your JSON output.
|
||||||
|
|
||||||
|
IMPORTANT: Always validate your JSON before completing the task.
|
||||||
|
|
||||||
|
## Tool Usage Format
|
||||||
|
When you need to validate JSON, output:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
## Completion Format
|
||||||
|
After validation passes, complete the task:
|
||||||
|
|
||||||
|
<task_complete>
|
||||||
|
[{"date": "YYYY-MM-DD", "counterparty": "...", "amount": -123.45}, ...]
|
||||||
|
</task_complete>`,
|
||||||
|
maxIterations: 5,
|
||||||
|
maxConsecutiveRejections: 3,
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onProgress: (event) => {
|
||||||
|
if (event.logLevel === 'error') {
|
||||||
|
console.error(event.logMessage);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register the JsonValidatorTool
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log('[Setup] Orchestrator initialized!\n');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
@@ -482,7 +371,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V)\n`);
|
console.log(`\nFound ${testCases.length} bank statement test cases (smartagent + MiniCPM-V)\n`);
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
let failedCount = 0;
|
let failedCount = 0;
|
||||||
@@ -514,7 +403,10 @@ for (const testCase of testCases) {
|
|||||||
// Log counterparty variations (names that differ but date/amount matched)
|
// Log counterparty variations (names that differ but date/amount matched)
|
||||||
if (result.variations.length > 0) {
|
if (result.variations.length > 0) {
|
||||||
console.log(` Counterparty variations (${result.variations.length}):`);
|
console.log(` Counterparty variations (${result.variations.length}):`);
|
||||||
result.variations.forEach((v) => console.log(` ${v}`));
|
result.variations.slice(0, 5).forEach((v) => console.log(` ${v}`));
|
||||||
|
if (result.variations.length > 5) {
|
||||||
|
console.log(` ... and ${result.variations.length - 5} more variations`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
expect(result.matches).toEqual(result.total);
|
expect(result.matches).toEqual(result.total);
|
||||||
@@ -522,12 +414,20 @@ for (const testCase of testCases) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tap.test('cleanup: stop orchestrator', async () => {
|
||||||
|
if (orchestrator) {
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log('[Cleanup] Orchestrator stopped');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
tap.test('summary', async () => {
|
||||||
const total = testCases.length;
|
const total = testCases.length;
|
||||||
console.log(`\n======================================================`);
|
console.log(`\n======================================================`);
|
||||||
console.log(` Bank Statement Summary (${MODEL})`);
|
console.log(` Bank Statement Summary`);
|
||||||
|
console.log(` (smartagent + ${MODEL})`);
|
||||||
console.log(`======================================================`);
|
console.log(`======================================================`);
|
||||||
console.log(` Method: JSON per-page + consensus`);
|
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||||
console.log(` Passed: ${passedCount}/${total}`);
|
console.log(` Passed: ${passedCount}/${total}`);
|
||||||
console.log(` Failed: ${failedCount}/${total}`);
|
console.log(` Failed: ${failedCount}/${total}`);
|
||||||
console.log(`======================================================\n`);
|
console.log(`======================================================\n`);
|
||||||
|
|||||||
752
test/test.bankstatements.nanonets.ts
Normal file
752
test/test.bankstatements.nanonets.ts
Normal file
@@ -0,0 +1,752 @@
|
|||||||
|
/**
|
||||||
|
* Bank statement extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||||
|
*
|
||||||
|
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||||
|
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||||
|
*
|
||||||
|
* This approach avoids GPU contention by running services sequentially.
|
||||||
|
*/
|
||||||
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||||
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||||
|
|
||||||
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||||
|
|
||||||
|
// Temp directory for storing markdown between stages
|
||||||
|
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-markdown');
|
||||||
|
|
||||||
|
// SmartAi instance for Ollama with optimized settings
|
||||||
|
const smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768, // Larger context for long statements + thinking
|
||||||
|
temperature: 0, // Deterministic for JSON extraction
|
||||||
|
},
|
||||||
|
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// DualAgentOrchestrator for structured task execution
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
|
interface ITransaction {
|
||||||
|
date: string;
|
||||||
|
counterparty: string;
|
||||||
|
amount: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface IImageData {
|
||||||
|
base64: string;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
pageNum: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ITestCase {
|
||||||
|
name: string;
|
||||||
|
pdfPath: string;
|
||||||
|
jsonPath: string;
|
||||||
|
markdownPath?: string;
|
||||||
|
images?: IImageData[];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nanonets-specific prompt for document OCR to markdown
|
||||||
|
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||||
|
Return the tables in html format.
|
||||||
|
Return the equations in LaTeX representation.
|
||||||
|
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||||
|
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||||
|
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||||
|
|
||||||
|
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the statement text is provided)
|
||||||
|
const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from the bank statement. Return ONLY valid JSON array.
|
||||||
|
|
||||||
|
WHERE TO FIND DATA:
|
||||||
|
- Transactions are typically in TABLES with columns: Date, Description/Counterparty, Debit, Credit, Balance
|
||||||
|
- Look for rows with actual money movements, NOT header rows or summary totals
|
||||||
|
|
||||||
|
RULES:
|
||||||
|
1. date: Convert to YYYY-MM-DD format
|
||||||
|
2. counterparty: The name/description of who the money went to/from
|
||||||
|
3. amount: NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
|
||||||
|
4. Only include actual transactions, NOT opening/closing balances
|
||||||
|
|
||||||
|
JSON array only:
|
||||||
|
[{"date":"YYYY-MM-DD","counterparty":"NAME","amount":-25.99}]`;
|
||||||
|
|
||||||
|
// Constants for smart batching
|
||||||
|
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
|
||||||
|
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Estimate visual tokens for an image based on dimensions
|
||||||
|
*/
|
||||||
|
function estimateVisualTokens(width: number, height: number): number {
|
||||||
|
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process images one page at a time for reliability
|
||||||
|
*/
|
||||||
|
function batchImages(images: IImageData[]): IImageData[][] {
|
||||||
|
// One page per batch for reliable processing
|
||||||
|
return images.map(img => [img]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||||
|
*/
|
||||||
|
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||||
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||||
|
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||||
|
|
||||||
|
try {
|
||||||
|
execSync(
|
||||||
|
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||||
|
{ stdio: 'pipe' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||||
|
const images: IImageData[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < files.length; i++) {
|
||||||
|
const file = files[i];
|
||||||
|
const imagePath = path.join(tempDir, file);
|
||||||
|
const imageData = fs.readFileSync(imagePath);
|
||||||
|
|
||||||
|
// Get image dimensions using identify command
|
||||||
|
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||||
|
const [width, height] = dimensions.split(' ').map(Number);
|
||||||
|
|
||||||
|
images.push({
|
||||||
|
base64: imageData.toString('base64'),
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
pageNum: i + 1,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return images;
|
||||||
|
} finally {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||||
|
*/
|
||||||
|
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||||
|
|
||||||
|
// Build content array with all images first, then the prompt
|
||||||
|
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||||
|
|
||||||
|
for (const img of batch) {
|
||||||
|
content.push({
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add prompt with page separator instruction if multiple pages
|
||||||
|
const promptText = batch.length > 1
|
||||||
|
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||||
|
: NANONETS_OCR_PROMPT;
|
||||||
|
|
||||||
|
content.push({ type: 'text', text: promptText });
|
||||||
|
|
||||||
|
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': 'Bearer dummy',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: NANONETS_MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content,
|
||||||
|
}],
|
||||||
|
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||||
|
temperature: 0.0,
|
||||||
|
}),
|
||||||
|
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||||
|
});
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||||
|
|
||||||
|
// For single-page batches, add page marker if not present
|
||||||
|
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||||
|
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||||
|
return responseContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert all pages of a document to markdown using smart batching
|
||||||
|
*/
|
||||||
|
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||||
|
const batches = batchImages(images);
|
||||||
|
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||||
|
|
||||||
|
const markdownParts: string[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < batches.length; i++) {
|
||||||
|
const batch = batches[i];
|
||||||
|
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||||
|
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||||
|
const markdown = await convertBatchToMarkdown(batch);
|
||||||
|
markdownParts.push(markdown);
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullMarkdown = markdownParts.join('\n\n');
|
||||||
|
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||||
|
return fullMarkdown;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop Nanonets container
|
||||||
|
*/
|
||||||
|
function stopNanonets(): void {
|
||||||
|
console.log(' [Docker] Stopping Nanonets container...');
|
||||||
|
try {
|
||||||
|
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||||
|
// Wait for GPU memory to be released
|
||||||
|
execSync('sleep 5', { stdio: 'pipe' });
|
||||||
|
console.log(' [Docker] Nanonets stopped');
|
||||||
|
} catch {
|
||||||
|
console.log(' [Docker] Nanonets was not running');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure GPT-OSS 20B model is available and warmed up
|
||||||
|
*/
|
||||||
|
async function ensureExtractionModel(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||||
|
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to extract valid JSON from a response string
|
||||||
|
*/
|
||||||
|
function tryExtractJson(response: string): unknown[] | null {
|
||||||
|
// Remove thinking tags
|
||||||
|
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
|
||||||
|
// Try task_complete tags first
|
||||||
|
const completeMatch = clean.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
|
if (completeMatch) {
|
||||||
|
clean = completeMatch[1].trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try code block
|
||||||
|
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON array
|
||||||
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(sanitizeJson(jsonMatch[0]));
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions from markdown using smartagent DualAgentOrchestrator
|
||||||
|
* Validates JSON and retries if invalid
|
||||||
|
*/
|
||||||
|
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
console.log(` [${queryId}] Statement: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
// Build the extraction task with document context
|
||||||
|
const taskPrompt = `Extract all transactions from this bank statement document and output ONLY the JSON array:
|
||||||
|
|
||||||
|
${markdown}
|
||||||
|
|
||||||
|
${JSON_EXTRACTION_PROMPT}
|
||||||
|
|
||||||
|
Before completing, validate your JSON using the json.validate tool:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
Only complete after validation passes. Output the final JSON array in <task_complete></task_complete> tags.`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await orchestrator.run(taskPrompt);
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||||
|
|
||||||
|
// Try to parse JSON from result
|
||||||
|
let jsonData: unknown[] | null = null;
|
||||||
|
let responseText = result.result || '';
|
||||||
|
|
||||||
|
if (result.success && responseText) {
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: try parsing from history
|
||||||
|
if (!jsonData && result.history?.length > 0) {
|
||||||
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
|
if (lastMessage?.content) {
|
||||||
|
responseText = lastMessage.content;
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData) {
|
||||||
|
console.log(` [${queryId}] Failed to parse JSON`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to transactions
|
||||||
|
const txs = jsonData.map((tx: any) => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
|
||||||
|
return txs;
|
||||||
|
} catch (error) {
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitize JSON string
|
||||||
|
*/
|
||||||
|
function sanitizeJson(jsonStr: string): string {
|
||||||
|
let s = jsonStr;
|
||||||
|
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
|
||||||
|
s = s.replace(/:\s*\+(\d)/g, ': $1');
|
||||||
|
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
|
||||||
|
s = s.replace(/,\s*([}\]])/g, '$1');
|
||||||
|
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
|
||||||
|
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
|
||||||
|
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse amount from various formats
|
||||||
|
*/
|
||||||
|
function parseAmount(value: unknown): number {
|
||||||
|
if (typeof value === 'number') return value;
|
||||||
|
if (typeof value !== 'string') return 0;
|
||||||
|
|
||||||
|
let s = value.replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||||
|
if (s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')) {
|
||||||
|
s = s.replace(/\./g, '').replace(',', '.');
|
||||||
|
} else {
|
||||||
|
s = s.replace(/,/g, '');
|
||||||
|
}
|
||||||
|
return parseFloat(s) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse JSON response into transactions
|
||||||
|
*/
|
||||||
|
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
|
||||||
|
// Remove thinking tags if present
|
||||||
|
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
|
||||||
|
// Debug: show what we're working with
|
||||||
|
console.log(` [${queryId}] Response preview: ${cleanResponse.substring(0, 300)}...`);
|
||||||
|
|
||||||
|
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||||
|
jsonStr = sanitizeJson(jsonStr);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) {
|
||||||
|
const txs = parsed.map(tx => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
|
||||||
|
return txs;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Try to find a JSON array in the text
|
||||||
|
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (arrayMatch) {
|
||||||
|
console.log(` [${queryId}] Array match found: ${arrayMatch[0].length} chars`);
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(sanitizeJson(arrayMatch[0]));
|
||||||
|
if (Array.isArray(parsed)) {
|
||||||
|
const txs = parsed.map(tx => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
|
||||||
|
return txs;
|
||||||
|
}
|
||||||
|
} catch (innerErr) {
|
||||||
|
console.log(` [${queryId}] Array parse error: ${(innerErr as Error).message}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log(` [${queryId}] No JSON array found in response`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [${queryId}] PARSE FAILED`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions (single pass)
|
||||||
|
*/
|
||||||
|
async function extractTransactions(markdown: string, docName: string): Promise<ITransaction[]> {
|
||||||
|
console.log(` [${docName}] Extracting...`);
|
||||||
|
const txs = await extractTransactionsFromMarkdown(markdown, docName);
|
||||||
|
console.log(` [${docName}] Extracted ${txs.length} transactions`);
|
||||||
|
return txs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare transactions
|
||||||
|
*/
|
||||||
|
function compareTransactions(
|
||||||
|
extracted: ITransaction[],
|
||||||
|
expected: ITransaction[]
|
||||||
|
): { matches: number; total: number; errors: string[] } {
|
||||||
|
const errors: string[] = [];
|
||||||
|
let matches = 0;
|
||||||
|
|
||||||
|
for (let i = 0; i < expected.length; i++) {
|
||||||
|
const exp = expected[i];
|
||||||
|
const ext = extracted[i];
|
||||||
|
|
||||||
|
if (!ext) {
|
||||||
|
errors.push(`Missing tx ${i}: ${exp.date} ${exp.counterparty}`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dateMatch = ext.date === exp.date;
|
||||||
|
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
||||||
|
|
||||||
|
if (dateMatch && amountMatch) {
|
||||||
|
matches++;
|
||||||
|
} else {
|
||||||
|
errors.push(`Mismatch ${i}: exp ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extracted.length > expected.length) {
|
||||||
|
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { matches, total: expected.length, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find all test cases
|
||||||
|
*/
|
||||||
|
function findTestCases(): ITestCase[] {
|
||||||
|
const testDir = path.join(process.cwd(), '.nogit');
|
||||||
|
if (!fs.existsSync(testDir)) return [];
|
||||||
|
|
||||||
|
const files = fs.readdirSync(testDir);
|
||||||
|
const testCases: ITestCase[] = [];
|
||||||
|
|
||||||
|
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
|
||||||
|
const baseName = pdf.replace('.pdf', '');
|
||||||
|
const jsonFile = `${baseName}.json`;
|
||||||
|
if (files.includes(jsonFile)) {
|
||||||
|
testCases.push({
|
||||||
|
name: baseName,
|
||||||
|
pdfPath: path.join(testDir, pdf),
|
||||||
|
jsonPath: path.join(testDir, jsonFile),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ TESTS ============
|
||||||
|
|
||||||
|
const testCases = findTestCases();
|
||||||
|
console.log(`\nFound ${testCases.length} bank statement test cases\n`);
|
||||||
|
|
||||||
|
// Ensure temp directory exists
|
||||||
|
if (!fs.existsSync(TEMP_MD_DIR)) {
|
||||||
|
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------- STAGE 1: OCR with Nanonets --------
|
||||||
|
|
||||||
|
// Check if all markdown files already exist
|
||||||
|
function allMarkdownFilesExist(): boolean {
|
||||||
|
for (const tc of testCases) {
|
||||||
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
if (!fs.existsSync(mdPath)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track whether we need to run Stage 1
|
||||||
|
let stage1Needed = !allMarkdownFilesExist();
|
||||||
|
|
||||||
|
tap.test('Stage 1: Setup Nanonets', async () => {
|
||||||
|
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||||
|
|
||||||
|
if (!stage1Needed) {
|
||||||
|
console.log(' [SKIP] All markdown files already exist, skipping Nanonets setup');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ok = await ensureNanonetsOcr();
|
||||||
|
expect(ok).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Stage 1: Convert all documents to markdown', async () => {
|
||||||
|
if (!stage1Needed) {
|
||||||
|
console.log(' [SKIP] Using existing markdown files from previous run\n');
|
||||||
|
// Load existing markdown paths
|
||||||
|
for (const tc of testCases) {
|
||||||
|
tc.markdownPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
console.log(` Loaded: ${tc.markdownPath}`);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n Converting all PDFs to markdown with Nanonets-OCR-s...\n');
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
|
||||||
|
// Convert PDF to images
|
||||||
|
const images = convertPdfToImages(tc.pdfPath);
|
||||||
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
|
// Convert to markdown
|
||||||
|
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||||
|
|
||||||
|
// Save markdown to temp file
|
||||||
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
fs.writeFileSync(mdPath, markdown);
|
||||||
|
tc.markdownPath = mdPath;
|
||||||
|
console.log(` Saved: ${mdPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n Stage 1 complete: All documents converted to markdown\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||||
|
if (!stage1Needed) {
|
||||||
|
console.log(' [SKIP] Nanonets was not started');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
stopNanonets();
|
||||||
|
// Verify it's stopped
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||||
|
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||||
|
|
||||||
|
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||||
|
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||||
|
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
|
||||||
|
const extractionOk = await ensureExtractionModel();
|
||||||
|
expect(extractionOk).toBeTrue();
|
||||||
|
|
||||||
|
// Initialize SmartAi and DualAgentOrchestrator
|
||||||
|
console.log(' [SmartAgent] Starting SmartAi...');
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `
|
||||||
|
JSON EXTRACTION POLICY:
|
||||||
|
- APPROVE all JSON extraction tasks
|
||||||
|
- APPROVE all json.validate tool calls
|
||||||
|
- This is a read-only operation - no file system or network access needed
|
||||||
|
- The task is to extract structured transaction data from document text
|
||||||
|
`,
|
||||||
|
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract transaction data from bank statements.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
1. Output valid JSON array with the exact format requested
|
||||||
|
2. Amounts should be NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
|
||||||
|
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
4. Only complete after validation passes
|
||||||
|
|
||||||
|
When done, wrap your JSON array in <task_complete></task_complete> tags.`,
|
||||||
|
maxIterations: 5,
|
||||||
|
// Enable streaming for real-time progress visibility
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register JsonValidatorTool for self-validation
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Starting orchestrator...');
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log(' [SmartAgent] Ready for extraction');
|
||||||
|
});
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||||
|
const expected: ITransaction[] = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||||
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
console.log(` Expected: ${expected.length} transactions`);
|
||||||
|
|
||||||
|
// Load saved markdown
|
||||||
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
if (!fs.existsSync(mdPath)) {
|
||||||
|
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||||
|
}
|
||||||
|
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||||
|
console.log(` Markdown: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
// Extract transactions (single pass)
|
||||||
|
const extracted = await extractTransactions(markdown, tc.name);
|
||||||
|
|
||||||
|
// Log results
|
||||||
|
console.log(` Extracted: ${extracted.length} transactions`);
|
||||||
|
for (let i = 0; i < Math.min(extracted.length, 5); i++) {
|
||||||
|
const tx = extracted[i];
|
||||||
|
console.log(` ${i + 1}. ${tx.date} | ${tx.counterparty.substring(0, 25).padEnd(25)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
|
}
|
||||||
|
if (extracted.length > 5) {
|
||||||
|
console.log(` ... and ${extracted.length - 5} more`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare
|
||||||
|
const result = compareTransactions(extracted, expected);
|
||||||
|
const pass = result.matches === result.total && extracted.length === expected.length;
|
||||||
|
|
||||||
|
if (pass) {
|
||||||
|
passedCount++;
|
||||||
|
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||||
|
result.errors.slice(0, 5).forEach(e => console.log(` - ${e}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(result.matches).toEqual(result.total);
|
||||||
|
expect(extracted.length).toEqual(expected.length);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('Summary', async () => {
|
||||||
|
// Cleanup orchestrator and SmartAi
|
||||||
|
if (orchestrator) {
|
||||||
|
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||||
|
await smartAi.stop();
|
||||||
|
|
||||||
|
console.log(`\n======================================================`);
|
||||||
|
console.log(` Bank Statement Summary (Nanonets + SmartAgent)`);
|
||||||
|
console.log(`======================================================`);
|
||||||
|
console.log(` Stage 1: Nanonets-OCR-s (document -> markdown)`);
|
||||||
|
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (markdown -> JSON)`);
|
||||||
|
console.log(` Passed: ${passedCount}/${testCases.length}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${testCases.length}`);
|
||||||
|
console.log(`======================================================\n`);
|
||||||
|
|
||||||
|
// Only cleanup temp files if ALL tests passed
|
||||||
|
if (failedCount === 0 && passedCount === testCases.length) {
|
||||||
|
try {
|
||||||
|
fs.rmSync(TEMP_MD_DIR, { recursive: true, force: true });
|
||||||
|
console.log(` Cleaned up temp directory: ${TEMP_MD_DIR}\n`);
|
||||||
|
} catch {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log(` Keeping temp directory for debugging: ${TEMP_MD_DIR}\n`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
440
test/test.invoices.extraction.ts
Normal file
440
test/test.invoices.extraction.ts
Normal file
@@ -0,0 +1,440 @@
|
|||||||
|
/**
|
||||||
|
* Invoice extraction tuning - uses pre-generated markdown files
|
||||||
|
*
|
||||||
|
* Skips OCR stage, only runs GPT-OSS extraction on existing .debug.md files.
|
||||||
|
* Use this to quickly iterate on extraction prompts and logic.
|
||||||
|
*
|
||||||
|
* Run with: tstest test/test.invoices.extraction.ts --verbose
|
||||||
|
*/
|
||||||
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
|
||||||
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||||
|
|
||||||
|
// Test these specific invoices (must have .debug.md files)
|
||||||
|
const TEST_INVOICES = [
|
||||||
|
'consensus_2021-09',
|
||||||
|
'hetzner_2022-04',
|
||||||
|
'qonto_2021-08',
|
||||||
|
'qonto_2021-09',
|
||||||
|
];
|
||||||
|
|
||||||
|
interface IInvoice {
|
||||||
|
invoice_number: string;
|
||||||
|
invoice_date: string;
|
||||||
|
vendor_name: string;
|
||||||
|
currency: string;
|
||||||
|
net_amount: number;
|
||||||
|
vat_amount: number;
|
||||||
|
total_amount: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ITestCase {
|
||||||
|
name: string;
|
||||||
|
markdownPath: string;
|
||||||
|
jsonPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
|
||||||
|
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||||
|
|
||||||
|
WHERE TO FIND DATA:
|
||||||
|
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer")
|
||||||
|
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||||
|
|
||||||
|
RULES:
|
||||||
|
1. invoice_number: Extract ONLY the value (e.g., "R0015632540"), NOT the label "Invoice no."
|
||||||
|
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||||
|
3. vendor_name: The company issuing the invoice
|
||||||
|
4. currency: EUR, USD, or GBP
|
||||||
|
5. net_amount: Total before tax
|
||||||
|
6. vat_amount: Tax amount
|
||||||
|
7. total_amount: Final total with tax
|
||||||
|
|
||||||
|
JSON only:
|
||||||
|
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure GPT-OSS 20B model is available
|
||||||
|
*/
|
||||||
|
async function ensureExtractionModel(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||||
|
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse amount from string (handles European format)
|
||||||
|
*/
|
||||||
|
function parseAmount(s: string | number | undefined): number {
|
||||||
|
if (s === undefined || s === null) return 0;
|
||||||
|
if (typeof s === 'number') return s;
|
||||||
|
const match = s.match(/([\d.,]+)/);
|
||||||
|
if (!match) return 0;
|
||||||
|
const numStr = match[1];
|
||||||
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||||
|
? numStr.replace(/\./g, '').replace(',', '.')
|
||||||
|
: numStr.replace(/,/g, '');
|
||||||
|
return parseFloat(normalized) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice number - minimal normalization
|
||||||
|
*/
|
||||||
|
function extractInvoiceNumber(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract date (YYYY-MM-DD) from response
|
||||||
|
*/
|
||||||
|
function extractDate(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||||
|
if (isoMatch) return isoMatch[1];
|
||||||
|
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||||
|
if (dmyMatch) {
|
||||||
|
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
return clean.replace(/[^\d-]/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract currency
|
||||||
|
*/
|
||||||
|
function extractCurrency(s: string | undefined): string {
|
||||||
|
if (!s) return 'EUR';
|
||||||
|
const upper = s.toUpperCase();
|
||||||
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||||
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||||
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||||
|
return 'EUR';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract JSON from response
|
||||||
|
*/
|
||||||
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||||
|
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||||
|
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonMatch[0]);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse JSON response into IInvoice
|
||||||
|
*/
|
||||||
|
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||||
|
const parsed = extractJsonFromResponse(response);
|
||||||
|
if (!parsed) return null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||||
|
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||||
|
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||||
|
currency: extractCurrency(String(parsed.currency || '')),
|
||||||
|
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||||
|
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||||
|
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||||
|
*/
|
||||||
|
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
console.log(` [${queryId}] Invoice: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
|
||||||
|
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
messages: [
|
||||||
|
{ role: 'user', content: 'Hi there, how are you?' },
|
||||||
|
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||||
|
{ role: 'user', content: `Here is an invoice document:\n\n${markdown}` },
|
||||||
|
{ role: 'assistant', content: 'I have read the invoice document you provided. I can see all the text content. What would you like me to do with it?' },
|
||||||
|
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
|
||||||
|
],
|
||||||
|
stream: true,
|
||||||
|
options: {
|
||||||
|
num_ctx: 32768, // Larger context for long invoices + thinking
|
||||||
|
temperature: 0, // Deterministic for JSON extraction
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
signal: AbortSignal.timeout(120000), // 2 min timeout
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||||
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream the response
|
||||||
|
let content = '';
|
||||||
|
let thinkingContent = '';
|
||||||
|
let thinkingStarted = false;
|
||||||
|
let outputStarted = false;
|
||||||
|
const reader = response.body!.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = decoder.decode(value, { stream: true });
|
||||||
|
|
||||||
|
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||||
|
try {
|
||||||
|
const json = JSON.parse(line);
|
||||||
|
|
||||||
|
const thinking = json.message?.thinking || '';
|
||||||
|
if (thinking) {
|
||||||
|
if (!thinkingStarted) {
|
||||||
|
process.stdout.write(` [${queryId}] THINKING: `);
|
||||||
|
thinkingStarted = true;
|
||||||
|
}
|
||||||
|
process.stdout.write(thinking);
|
||||||
|
thinkingContent += thinking;
|
||||||
|
}
|
||||||
|
|
||||||
|
const token = json.message?.content || '';
|
||||||
|
if (token) {
|
||||||
|
if (!outputStarted) {
|
||||||
|
if (thinkingStarted) process.stdout.write('\n');
|
||||||
|
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||||
|
outputStarted = true;
|
||||||
|
}
|
||||||
|
process.stdout.write(token);
|
||||||
|
content += token;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore parse errors for partial chunks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking, ${content.length} output (${elapsed}s)`);
|
||||||
|
|
||||||
|
return parseJsonToInvoice(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize date to YYYY-MM-DD
|
||||||
|
*/
|
||||||
|
function normalizeDate(dateStr: string | null): string {
|
||||||
|
if (!dateStr) return '';
|
||||||
|
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||||
|
|
||||||
|
const monthMap: Record<string, string> = {
|
||||||
|
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||||
|
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||||
|
};
|
||||||
|
|
||||||
|
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return dateStr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize invoice number for comparison (remove spaces, lowercase)
|
||||||
|
*/
|
||||||
|
function normalizeInvoiceNumber(s: string): string {
|
||||||
|
return s.replace(/\s+/g, '').toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare extracted invoice against expected
|
||||||
|
*/
|
||||||
|
function compareInvoice(
|
||||||
|
extracted: IInvoice,
|
||||||
|
expected: IInvoice
|
||||||
|
): { match: boolean; errors: string[] } {
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
// Invoice number - normalize spaces for comparison
|
||||||
|
const extNum = normalizeInvoiceNumber(extracted.invoice_number || '');
|
||||||
|
const expNum = normalizeInvoiceNumber(expected.invoice_number || '');
|
||||||
|
if (extNum !== expNum) {
|
||||||
|
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||||
|
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||||
|
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||||
|
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { match: errors.length === 0, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find test cases with existing debug markdown
|
||||||
|
*/
|
||||||
|
function findTestCases(): ITestCase[] {
|
||||||
|
const invoicesDir = path.join(process.cwd(), '.nogit/invoices');
|
||||||
|
if (!fs.existsSync(invoicesDir)) return [];
|
||||||
|
|
||||||
|
const testCases: ITestCase[] = [];
|
||||||
|
|
||||||
|
for (const invoiceName of TEST_INVOICES) {
|
||||||
|
const markdownPath = path.join(invoicesDir, `${invoiceName}.debug.md`);
|
||||||
|
const jsonPath = path.join(invoicesDir, `${invoiceName}.json`);
|
||||||
|
|
||||||
|
if (fs.existsSync(markdownPath) && fs.existsSync(jsonPath)) {
|
||||||
|
testCases.push({
|
||||||
|
name: invoiceName,
|
||||||
|
markdownPath,
|
||||||
|
jsonPath,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
if (!fs.existsSync(markdownPath)) {
|
||||||
|
console.warn(`Warning: Missing markdown: ${markdownPath}`);
|
||||||
|
}
|
||||||
|
if (!fs.existsSync(jsonPath)) {
|
||||||
|
console.warn(`Warning: Missing JSON: ${jsonPath}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testCases;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ TESTS ============
|
||||||
|
|
||||||
|
const testCases = findTestCases();
|
||||||
|
console.log(`\n========================================`);
|
||||||
|
console.log(` EXTRACTION TUNING TEST`);
|
||||||
|
console.log(` (Skips OCR, uses existing .debug.md)`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Testing ${testCases.length} invoices:`);
|
||||||
|
for (const tc of testCases) {
|
||||||
|
console.log(` - ${tc.name}`);
|
||||||
|
}
|
||||||
|
console.log(`========================================\n`);
|
||||||
|
|
||||||
|
tap.test('Setup Ollama + GPT-OSS 20B', async () => {
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
|
||||||
|
const extractionOk = await ensureExtractionModel();
|
||||||
|
expect(extractionOk).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
tap.test(`Extract ${tc.name}`, async () => {
|
||||||
|
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||||
|
const markdown = fs.readFileSync(tc.markdownPath, 'utf-8');
|
||||||
|
|
||||||
|
console.log(`\n ========================================`);
|
||||||
|
console.log(` === ${tc.name} ===`);
|
||||||
|
console.log(` ========================================`);
|
||||||
|
console.log(` EXPECTED: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||||
|
console.log(` Markdown: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
const extracted = await extractInvoiceFromMarkdown(markdown, tc.name);
|
||||||
|
|
||||||
|
if (!extracted) {
|
||||||
|
failedCount++;
|
||||||
|
console.log(`\n Result: ✗ FAILED TO PARSE (${((Date.now() - startTime) / 1000).toFixed(1)}s)`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const elapsedMs = Date.now() - startTime;
|
||||||
|
|
||||||
|
console.log(` EXTRACTED: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||||
|
|
||||||
|
const result = compareInvoice(extracted, expected);
|
||||||
|
|
||||||
|
if (result.match) {
|
||||||
|
passedCount++;
|
||||||
|
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
console.log(` ERRORS:`);
|
||||||
|
result.errors.forEach(e => console.log(` - ${e}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('Summary', async () => {
|
||||||
|
const totalInvoices = testCases.length;
|
||||||
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
|
|
||||||
|
console.log(`\n========================================`);
|
||||||
|
console.log(` Extraction Tuning Summary`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Model: ${EXTRACTION_MODEL}`);
|
||||||
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
console.log(`========================================\n`);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
695
test/test.invoices.failed.ts
Normal file
695
test/test.invoices.failed.ts
Normal file
@@ -0,0 +1,695 @@
|
|||||||
|
/**
|
||||||
|
* Focused test for failed invoice extractions
|
||||||
|
*
|
||||||
|
* Tests only the 4 invoices that failed in the main test:
|
||||||
|
* - consensus_2021-09: invoice_number "2021/1384" → "20211384" (slash stripped)
|
||||||
|
* - hetzner_2022-04: model hallucinated after 281s thinking
|
||||||
|
* - qonto_2021-08: invoice_number "08-21-INVOICE-410870" → "4108705" (prefix stripped)
|
||||||
|
* - qonto_2021-09: invoice_number "09-21-INVOICE-4303642" → "4303642" (prefix stripped)
|
||||||
|
*
|
||||||
|
* Run with: tstest test/test.invoices.failed.ts --verbose
|
||||||
|
*/
|
||||||
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||||
|
|
||||||
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||||
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||||
|
|
||||||
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||||
|
|
||||||
|
// Temp directory for storing markdown between stages
|
||||||
|
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-failed-debug');
|
||||||
|
|
||||||
|
// Only test these specific invoices that failed
|
||||||
|
const FAILED_INVOICES = [
|
||||||
|
'consensus_2021-09',
|
||||||
|
'hetzner_2022-04',
|
||||||
|
'qonto_2021-08',
|
||||||
|
'qonto_2021-09',
|
||||||
|
];
|
||||||
|
|
||||||
|
interface IInvoice {
|
||||||
|
invoice_number: string;
|
||||||
|
invoice_date: string;
|
||||||
|
vendor_name: string;
|
||||||
|
currency: string;
|
||||||
|
net_amount: number;
|
||||||
|
vat_amount: number;
|
||||||
|
total_amount: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface IImageData {
|
||||||
|
base64: string;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
pageNum: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ITestCase {
|
||||||
|
name: string;
|
||||||
|
pdfPath: string;
|
||||||
|
jsonPath: string;
|
||||||
|
markdownPath?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nanonets-specific prompt for document OCR to markdown
|
||||||
|
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||||
|
Return the tables in html format.
|
||||||
|
Return the equations in LaTeX representation.
|
||||||
|
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||||
|
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||||
|
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||||
|
|
||||||
|
// JSON extraction prompt for GPT-OSS 20B
|
||||||
|
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
|
||||||
|
|
||||||
|
IMPORTANT RULES:
|
||||||
|
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID). PRESERVE ALL CHARACTERS including slashes, dashes, and prefixes.
|
||||||
|
2. invoice_date: Format as YYYY-MM-DD
|
||||||
|
3. vendor_name: The company that issued the invoice
|
||||||
|
4. currency: EUR, USD, or GBP
|
||||||
|
5. net_amount: Amount before tax
|
||||||
|
6. vat_amount: Tax/VAT amount
|
||||||
|
7. total_amount: Final total (gross amount)
|
||||||
|
|
||||||
|
Return ONLY this JSON format, no explanation:
|
||||||
|
{
|
||||||
|
"invoice_number": "INV-2024-001",
|
||||||
|
"invoice_date": "2024-01-15",
|
||||||
|
"vendor_name": "Company Name",
|
||||||
|
"currency": "EUR",
|
||||||
|
"net_amount": 100.00,
|
||||||
|
"vat_amount": 19.00,
|
||||||
|
"total_amount": 119.00
|
||||||
|
}
|
||||||
|
|
||||||
|
INVOICE TEXT:
|
||||||
|
`;
|
||||||
|
|
||||||
|
const PATCH_SIZE = 14;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Estimate visual tokens for an image based on dimensions
|
||||||
|
*/
|
||||||
|
function estimateVisualTokens(width: number, height: number): number {
|
||||||
|
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process images one page at a time for reliability
|
||||||
|
*/
|
||||||
|
function batchImages(images: IImageData[]): IImageData[][] {
|
||||||
|
return images.map(img => [img]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||||
|
*/
|
||||||
|
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||||
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||||
|
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||||
|
|
||||||
|
try {
|
||||||
|
execSync(
|
||||||
|
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||||
|
{ stdio: 'pipe' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||||
|
const images: IImageData[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < files.length; i++) {
|
||||||
|
const file = files[i];
|
||||||
|
const imagePath = path.join(tempDir, file);
|
||||||
|
const imageData = fs.readFileSync(imagePath);
|
||||||
|
|
||||||
|
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||||
|
const [width, height] = dimensions.split(' ').map(Number);
|
||||||
|
|
||||||
|
images.push({
|
||||||
|
base64: imageData.toString('base64'),
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
pageNum: i + 1,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return images;
|
||||||
|
} finally {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||||
|
*/
|
||||||
|
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||||
|
|
||||||
|
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||||
|
|
||||||
|
for (const img of batch) {
|
||||||
|
content.push({
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const promptText = batch.length > 1
|
||||||
|
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||||
|
: NANONETS_OCR_PROMPT;
|
||||||
|
|
||||||
|
content.push({ type: 'text', text: promptText });
|
||||||
|
|
||||||
|
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': 'Bearer dummy',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: NANONETS_MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content,
|
||||||
|
}],
|
||||||
|
max_tokens: 4096 * batch.length,
|
||||||
|
temperature: 0.0,
|
||||||
|
}),
|
||||||
|
signal: AbortSignal.timeout(600000),
|
||||||
|
});
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||||
|
|
||||||
|
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||||
|
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||||
|
return responseContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert all pages of a document to markdown using smart batching
|
||||||
|
*/
|
||||||
|
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||||
|
const batches = batchImages(images);
|
||||||
|
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||||
|
|
||||||
|
const markdownParts: string[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < batches.length; i++) {
|
||||||
|
const batch = batches[i];
|
||||||
|
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||||
|
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||||
|
const markdown = await convertBatchToMarkdown(batch);
|
||||||
|
markdownParts.push(markdown);
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullMarkdown = markdownParts.join('\n\n');
|
||||||
|
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||||
|
return fullMarkdown;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop Nanonets container
|
||||||
|
*/
|
||||||
|
function stopNanonets(): void {
|
||||||
|
console.log(' [Docker] Stopping Nanonets container...');
|
||||||
|
try {
|
||||||
|
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||||
|
execSync('sleep 5', { stdio: 'pipe' });
|
||||||
|
console.log(' [Docker] Nanonets stopped');
|
||||||
|
} catch {
|
||||||
|
console.log(' [Docker] Nanonets was not running');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure GPT-OSS 20B model is available
|
||||||
|
*/
|
||||||
|
async function ensureExtractionModel(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||||
|
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse amount from string (handles European format)
|
||||||
|
*/
|
||||||
|
function parseAmount(s: string | number | undefined): number {
|
||||||
|
if (s === undefined || s === null) return 0;
|
||||||
|
if (typeof s === 'number') return s;
|
||||||
|
const match = s.match(/([\d.,]+)/);
|
||||||
|
if (!match) return 0;
|
||||||
|
const numStr = match[1];
|
||||||
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||||
|
? numStr.replace(/\./g, '').replace(',', '.')
|
||||||
|
: numStr.replace(/,/g, '');
|
||||||
|
return parseFloat(normalized) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice number - MINIMAL normalization for debugging
|
||||||
|
*/
|
||||||
|
function extractInvoiceNumber(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
// Only remove markdown formatting, preserve everything else
|
||||||
|
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract date (YYYY-MM-DD) from response
|
||||||
|
*/
|
||||||
|
function extractDate(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||||
|
if (isoMatch) return isoMatch[1];
|
||||||
|
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||||
|
if (dmyMatch) {
|
||||||
|
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
return clean.replace(/[^\d-]/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract currency
|
||||||
|
*/
|
||||||
|
function extractCurrency(s: string | undefined): string {
|
||||||
|
if (!s) return 'EUR';
|
||||||
|
const upper = s.toUpperCase();
|
||||||
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||||
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||||
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||||
|
return 'EUR';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract JSON from response
|
||||||
|
*/
|
||||||
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||||
|
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||||
|
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonMatch[0]);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse JSON response into IInvoice
|
||||||
|
*/
|
||||||
|
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||||
|
const parsed = extractJsonFromResponse(response);
|
||||||
|
if (!parsed) return null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||||
|
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||||
|
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||||
|
currency: extractCurrency(String(parsed.currency || '')),
|
||||||
|
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||||
|
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||||
|
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||||
|
*/
|
||||||
|
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
||||||
|
|
||||||
|
// Log exact prompt
|
||||||
|
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
||||||
|
console.log(fullPrompt);
|
||||||
|
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
||||||
|
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
messages: [
|
||||||
|
{ role: 'user', content: 'Hi there, how are you?' },
|
||||||
|
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||||
|
{ role: 'user', content: fullPrompt },
|
||||||
|
],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
signal: AbortSignal.timeout(600000),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||||
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream the response
|
||||||
|
let content = '';
|
||||||
|
let thinkingContent = '';
|
||||||
|
let thinkingStarted = false;
|
||||||
|
let outputStarted = false;
|
||||||
|
const reader = response.body!.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = decoder.decode(value, { stream: true });
|
||||||
|
|
||||||
|
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||||
|
try {
|
||||||
|
const json = JSON.parse(line);
|
||||||
|
|
||||||
|
const thinking = json.message?.thinking || '';
|
||||||
|
if (thinking) {
|
||||||
|
if (!thinkingStarted) {
|
||||||
|
process.stdout.write(` [${queryId}] THINKING: `);
|
||||||
|
thinkingStarted = true;
|
||||||
|
}
|
||||||
|
process.stdout.write(thinking);
|
||||||
|
thinkingContent += thinking;
|
||||||
|
}
|
||||||
|
|
||||||
|
const token = json.message?.content || '';
|
||||||
|
if (token) {
|
||||||
|
if (!outputStarted) {
|
||||||
|
if (thinkingStarted) process.stdout.write('\n');
|
||||||
|
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||||
|
outputStarted = true;
|
||||||
|
}
|
||||||
|
process.stdout.write(token);
|
||||||
|
content += token;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore parse errors for partial chunks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
||||||
|
|
||||||
|
// Log raw response for debugging
|
||||||
|
console.log(` [${queryId}] RAW RESPONSE: ${content}`);
|
||||||
|
|
||||||
|
return parseJsonToInvoice(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice (single pass)
|
||||||
|
*/
|
||||||
|
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||||
|
console.log(` [${docName}] Extracting...`);
|
||||||
|
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||||
|
if (!invoice) {
|
||||||
|
return {
|
||||||
|
invoice_number: '',
|
||||||
|
invoice_date: '',
|
||||||
|
vendor_name: '',
|
||||||
|
currency: 'EUR',
|
||||||
|
net_amount: 0,
|
||||||
|
vat_amount: 0,
|
||||||
|
total_amount: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
console.log(` [${docName}] Extracted: ${JSON.stringify(invoice, null, 2)}`);
|
||||||
|
return invoice;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize date to YYYY-MM-DD
|
||||||
|
*/
|
||||||
|
function normalizeDate(dateStr: string | null): string {
|
||||||
|
if (!dateStr) return '';
|
||||||
|
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||||
|
|
||||||
|
const monthMap: Record<string, string> = {
|
||||||
|
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||||
|
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||||
|
};
|
||||||
|
|
||||||
|
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return dateStr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare extracted invoice against expected - detailed output
|
||||||
|
*/
|
||||||
|
function compareInvoice(
|
||||||
|
extracted: IInvoice,
|
||||||
|
expected: IInvoice
|
||||||
|
): { match: boolean; errors: string[] } {
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
// Invoice number comparison - exact match after whitespace normalization
|
||||||
|
const extNum = extracted.invoice_number?.trim() || '';
|
||||||
|
const expNum = expected.invoice_number?.trim() || '';
|
||||||
|
if (extNum.toLowerCase() !== expNum.toLowerCase()) {
|
||||||
|
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||||
|
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||||
|
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||||
|
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { match: errors.length === 0, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find test cases for failed invoices only
|
||||||
|
*/
|
||||||
|
function findTestCases(): ITestCase[] {
|
||||||
|
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||||
|
if (!fs.existsSync(testDir)) return [];
|
||||||
|
|
||||||
|
const files = fs.readdirSync(testDir);
|
||||||
|
const testCases: ITestCase[] = [];
|
||||||
|
|
||||||
|
for (const invoiceName of FAILED_INVOICES) {
|
||||||
|
const pdfFile = `${invoiceName}.pdf`;
|
||||||
|
const jsonFile = `${invoiceName}.json`;
|
||||||
|
|
||||||
|
if (files.includes(pdfFile) && files.includes(jsonFile)) {
|
||||||
|
testCases.push({
|
||||||
|
name: invoiceName,
|
||||||
|
pdfPath: path.join(testDir, pdfFile),
|
||||||
|
jsonPath: path.join(testDir, jsonFile),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.warn(`Warning: Missing files for ${invoiceName}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testCases;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ TESTS ============
|
||||||
|
|
||||||
|
const testCases = findTestCases();
|
||||||
|
console.log(`\n========================================`);
|
||||||
|
console.log(` FAILED INVOICES DEBUG TEST`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Testing ${testCases.length} failed invoices:`);
|
||||||
|
for (const tc of testCases) {
|
||||||
|
console.log(` - ${tc.name}`);
|
||||||
|
}
|
||||||
|
console.log(`========================================\n`);
|
||||||
|
|
||||||
|
// Ensure temp directory exists
|
||||||
|
if (!fs.existsSync(TEMP_MD_DIR)) {
|
||||||
|
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------- STAGE 1: OCR with Nanonets --------
|
||||||
|
|
||||||
|
tap.test('Stage 1: Setup Nanonets', async () => {
|
||||||
|
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||||
|
const ok = await ensureNanonetsOcr();
|
||||||
|
expect(ok).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Stage 1: Convert failed invoices to markdown', async () => {
|
||||||
|
console.log('\n Converting failed invoice PDFs to markdown with Nanonets-OCR-s...\n');
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
|
||||||
|
const images = convertPdfToImages(tc.pdfPath);
|
||||||
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
|
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||||
|
|
||||||
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
fs.writeFileSync(mdPath, markdown);
|
||||||
|
tc.markdownPath = mdPath;
|
||||||
|
console.log(` Saved: ${mdPath}`);
|
||||||
|
|
||||||
|
// Also save to .nogit for inspection
|
||||||
|
const debugMdPath = path.join(process.cwd(), '.nogit/invoices', `${tc.name}.debug.md`);
|
||||||
|
fs.writeFileSync(debugMdPath, markdown);
|
||||||
|
console.log(` Debug copy: ${debugMdPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n Stage 1 complete: All failed invoices converted to markdown\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||||
|
stopNanonets();
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||||
|
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||||
|
|
||||||
|
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||||
|
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||||
|
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
|
||||||
|
const extractionOk = await ensureExtractionModel();
|
||||||
|
expect(extractionOk).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||||
|
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||||
|
console.log(`\n ========================================`);
|
||||||
|
console.log(` === ${tc.name} ===`);
|
||||||
|
console.log(` ========================================`);
|
||||||
|
console.log(` EXPECTED:`);
|
||||||
|
console.log(` invoice_number: "${expected.invoice_number}"`);
|
||||||
|
console.log(` invoice_date: "${expected.invoice_date}"`);
|
||||||
|
console.log(` vendor_name: "${expected.vendor_name}"`);
|
||||||
|
console.log(` total_amount: ${expected.total_amount} ${expected.currency}`);
|
||||||
|
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||||
|
if (!fs.existsSync(mdPath)) {
|
||||||
|
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||||
|
}
|
||||||
|
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||||
|
console.log(` Markdown: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
const extracted = await extractInvoice(markdown, tc.name);
|
||||||
|
|
||||||
|
const elapsedMs = Date.now() - startTime;
|
||||||
|
|
||||||
|
console.log(`\n EXTRACTED:`);
|
||||||
|
console.log(` invoice_number: "${extracted.invoice_number}"`);
|
||||||
|
console.log(` invoice_date: "${extracted.invoice_date}"`);
|
||||||
|
console.log(` vendor_name: "${extracted.vendor_name}"`);
|
||||||
|
console.log(` total_amount: ${extracted.total_amount} ${extracted.currency}`);
|
||||||
|
|
||||||
|
const result = compareInvoice(extracted, expected);
|
||||||
|
|
||||||
|
if (result.match) {
|
||||||
|
passedCount++;
|
||||||
|
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
console.log(` ERRORS:`);
|
||||||
|
result.errors.forEach(e => console.log(` - ${e}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't fail the test - we're debugging
|
||||||
|
// expect(result.match).toBeTrue();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('Summary', async () => {
|
||||||
|
const totalInvoices = testCases.length;
|
||||||
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
|
|
||||||
|
console.log(`\n========================================`);
|
||||||
|
console.log(` Failed Invoices Debug Summary`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Markdown files saved to: ${TEMP_MD_DIR}`);
|
||||||
|
console.log(` Debug copies in: .nogit/invoices/*.debug.md`);
|
||||||
|
console.log(`========================================\n`);
|
||||||
|
|
||||||
|
// Don't cleanup temp files for debugging
|
||||||
|
console.log(` Keeping temp files for debugging.\n`);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* Invoice extraction test using MiniCPM-V (visual extraction)
|
* Invoice extraction test using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||||
*
|
*
|
||||||
* Consensus approach:
|
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||||
* 1. Pass 1: Fast JSON extraction
|
* 1. Pass images to the orchestrator
|
||||||
* 2. Pass 2: Confirm with thinking enabled
|
* 2. Driver extracts invoice data and validates JSON before completing
|
||||||
* 3. If mismatch: repeat until consensus or max attempts
|
* 3. If validation fails, driver retries within the same task
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -12,6 +12,8 @@ import * as path from 'path';
|
|||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
@@ -26,6 +28,10 @@ interface IInvoice {
|
|||||||
total_amount: number;
|
total_amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SmartAi instance and orchestrator (initialized in setup)
|
||||||
|
let smartAi: SmartAi;
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
*/
|
*/
|
||||||
@@ -54,7 +60,9 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON object with these exact fields:
|
const EXTRACTION_PROMPT = `Extract invoice data from the provided image(s).
|
||||||
|
|
||||||
|
IMPORTANT: You must output a valid JSON object with these exact fields:
|
||||||
{
|
{
|
||||||
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
||||||
"invoice_date": "YYYY-MM-DD format",
|
"invoice_date": "YYYY-MM-DD format",
|
||||||
@@ -64,67 +72,16 @@ const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON ob
|
|||||||
"vat_amount": 0.00,
|
"vat_amount": 0.00,
|
||||||
"total_amount": 0.00
|
"total_amount": 0.00
|
||||||
}
|
}
|
||||||
Return only the JSON, no explanation.`;
|
|
||||||
|
|
||||||
/**
|
Before completing, use the json.validate tool to verify your output is valid JSON with all required fields.
|
||||||
* Query MiniCPM-V for JSON output (fast, no thinking)
|
|
||||||
*/
|
|
||||||
async function queryJsonFast(images: string[]): Promise<string> {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: MODEL,
|
|
||||||
messages: [{
|
|
||||||
role: 'user',
|
|
||||||
content: JSON_PROMPT,
|
|
||||||
images: images,
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 1000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
<tool_call>
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
<tool>json</tool>
|
||||||
}
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_HERE", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
const data = await response.json();
|
Only complete the task after validation passes. Output the final JSON in <task_complete> tags.`;
|
||||||
return (data.message?.content || '').trim();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Query MiniCPM-V for JSON output with thinking enabled (slower, more accurate)
|
|
||||||
*/
|
|
||||||
async function queryJsonWithThinking(images: string[]): Promise<string> {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: MODEL,
|
|
||||||
messages: [{
|
|
||||||
role: 'user',
|
|
||||||
content: `Think carefully about this invoice image, then ${JSON_PROMPT}`,
|
|
||||||
images: images,
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 2000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
return (data.message?.content || '').trim();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse amount from string (handles European format)
|
* Parse amount from string (handles European format)
|
||||||
@@ -190,9 +147,31 @@ function extractCurrency(s: string | undefined): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract JSON from response (handles markdown code blocks)
|
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||||
*/
|
*/
|
||||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||||
|
// Try to find JSON in task_complete tags
|
||||||
|
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
|
if (completeMatch) {
|
||||||
|
const content = completeMatch[1].trim();
|
||||||
|
// Try to find JSON in the content
|
||||||
|
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON object pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonMatch[0]);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Try to find JSON in markdown code block
|
// Try to find JSON in markdown code block
|
||||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
@@ -232,76 +211,27 @@ function parseJsonToInvoice(response: string): IInvoice | null {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare two invoices for consensus (key fields must match)
|
* Extract invoice data using smartagent orchestrator with vision
|
||||||
*/
|
|
||||||
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
|
|
||||||
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
|
|
||||||
const dateMatch = a.invoice_date === b.invoice_date;
|
|
||||||
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
|
|
||||||
return numMatch && dateMatch && totalMatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract invoice data using consensus approach:
|
|
||||||
* 1. Pass 1: Fast JSON extraction
|
|
||||||
* 2. Pass 2: Confirm with thinking enabled
|
|
||||||
* 3. If mismatch: repeat until consensus or max 5 attempts
|
|
||||||
*/
|
*/
|
||||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (consensus)`);
|
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||||
|
|
||||||
const MAX_ATTEMPTS = 5;
|
const startTime = Date.now();
|
||||||
let attempt = 0;
|
|
||||||
|
|
||||||
while (attempt < MAX_ATTEMPTS) {
|
const result = await orchestrator.run(EXTRACTION_PROMPT, { images });
|
||||||
attempt++;
|
|
||||||
console.log(` [Attempt ${attempt}/${MAX_ATTEMPTS}]`);
|
|
||||||
|
|
||||||
// PASS 1: Fast JSON extraction
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
console.log(` [Pass 1] Fast extraction...`);
|
console.log(` [Vision] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||||
const fastResponse = await queryJsonFast(images);
|
|
||||||
const fastInvoice = parseJsonToInvoice(fastResponse);
|
|
||||||
|
|
||||||
if (!fastInvoice) {
|
const invoice = parseJsonToInvoice(result.result);
|
||||||
console.log(` [Pass 1] JSON parsing failed, retrying...`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
console.log(` [Pass 1] Result: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount} ${fastInvoice.currency}`);
|
|
||||||
|
|
||||||
// PASS 2: Confirm with thinking
|
if (invoice) {
|
||||||
console.log(` [Pass 2] Thinking confirmation...`);
|
console.log(` [Result] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||||
const thinkResponse = await queryJsonWithThinking(images);
|
return invoice;
|
||||||
const thinkInvoice = parseJsonToInvoice(thinkResponse);
|
|
||||||
|
|
||||||
if (!thinkInvoice) {
|
|
||||||
console.log(` [Pass 2] JSON parsing failed, retrying...`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
console.log(` [Pass 2] Result: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount} ${thinkInvoice.currency}`);
|
|
||||||
|
|
||||||
// Check consensus
|
|
||||||
if (invoicesMatch(fastInvoice, thinkInvoice)) {
|
|
||||||
console.log(` [Consensus] MATCH - using result`);
|
|
||||||
return thinkInvoice; // Prefer thinking result
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(` [Consensus] MISMATCH - repeating...`);
|
|
||||||
console.log(` Fast: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount}`);
|
|
||||||
console.log(` Think: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount}`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max attempts reached - do one final thinking pass and use that
|
// Return empty invoice if parsing failed
|
||||||
console.log(` [Final] Max attempts reached, using final thinking pass`);
|
console.log(` [Result] Parsing failed, returning empty invoice`);
|
||||||
const finalResponse = await queryJsonWithThinking(images);
|
|
||||||
const finalInvoice = parseJsonToInvoice(finalResponse);
|
|
||||||
|
|
||||||
if (finalInvoice) {
|
|
||||||
console.log(` [Final] Result: ${finalInvoice.invoice_number} | ${finalInvoice.invoice_date} | ${finalInvoice.total_amount} ${finalInvoice.currency}`);
|
|
||||||
return finalInvoice;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return empty invoice if all else fails
|
|
||||||
console.log(` [Final] All parsing failed, returning empty`);
|
|
||||||
return {
|
return {
|
||||||
invoice_number: '',
|
invoice_number: '',
|
||||||
invoice_date: '',
|
invoice_date: '',
|
||||||
@@ -410,6 +340,79 @@ tap.test('setup: ensure Docker containers are running', async () => {
|
|||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||||
|
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||||
|
|
||||||
|
smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `You are a Guardian agent overseeing invoice extraction tasks.
|
||||||
|
|
||||||
|
APPROVE all tool calls that:
|
||||||
|
- Use the json.validate action to verify JSON output
|
||||||
|
- Are reasonable attempts to complete the extraction task
|
||||||
|
|
||||||
|
REJECT tool calls that:
|
||||||
|
- Attempt to access external resources
|
||||||
|
- Try to execute arbitrary code
|
||||||
|
- Are clearly unrelated to invoice extraction`,
|
||||||
|
driverSystemMessage: `You are an AI assistant that extracts invoice data from images.
|
||||||
|
|
||||||
|
Your task is to analyze invoice images and extract structured data.
|
||||||
|
You have access to a json.validate tool to verify your JSON output.
|
||||||
|
|
||||||
|
IMPORTANT: Always validate your JSON before completing the task.
|
||||||
|
|
||||||
|
## Tool Usage Format
|
||||||
|
When you need to validate JSON, output:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
## Completion Format
|
||||||
|
After validation passes, complete the task:
|
||||||
|
|
||||||
|
<task_complete>
|
||||||
|
{"invoice_number": "...", "invoice_date": "YYYY-MM-DD", ...}
|
||||||
|
</task_complete>`,
|
||||||
|
maxIterations: 5,
|
||||||
|
maxConsecutiveRejections: 3,
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onProgress: (event) => {
|
||||||
|
if (event.logLevel === 'error') {
|
||||||
|
console.error(event.logMessage);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register the JsonValidatorTool
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log('[Setup] Orchestrator initialized!\n');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
@@ -418,7 +421,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V)\n`);
|
console.log(`\nFound ${testCases.length} invoice test cases (smartagent + MiniCPM-V)\n`);
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
let failedCount = 0;
|
let failedCount = 0;
|
||||||
@@ -455,6 +458,13 @@ for (const testCase of testCases) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tap.test('cleanup: stop orchestrator', async () => {
|
||||||
|
if (orchestrator) {
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log('[Cleanup] Orchestrator stopped');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
tap.test('summary', async () => {
|
||||||
const totalInvoices = testCases.length;
|
const totalInvoices = testCases.length;
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
@@ -462,9 +472,10 @@ tap.test('summary', async () => {
|
|||||||
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||||
|
|
||||||
console.log(`\n========================================`);
|
console.log(`\n========================================`);
|
||||||
console.log(` Invoice Extraction Summary (${MODEL})`);
|
console.log(` Invoice Extraction Summary`);
|
||||||
|
console.log(` (smartagent + ${MODEL})`);
|
||||||
console.log(`========================================`);
|
console.log(`========================================`);
|
||||||
console.log(` Method: Consensus (fast + thinking)`);
|
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
|||||||
792
test/test.invoices.nanonets.ts
Normal file
792
test/test.invoices.nanonets.ts
Normal file
@@ -0,0 +1,792 @@
|
|||||||
|
/**
|
||||||
|
* Invoice extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||||
|
*
|
||||||
|
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||||
|
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||||
|
*
|
||||||
|
* This approach avoids GPU contention by running services sequentially.
|
||||||
|
*/
|
||||||
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||||
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||||
|
|
||||||
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||||
|
|
||||||
|
// Persistent cache directory for storing markdown between runs
|
||||||
|
const MD_CACHE_DIR = path.join(process.cwd(), '.nogit/invoices-md');
|
||||||
|
|
||||||
|
// SmartAi instance for Ollama with optimized settings
|
||||||
|
const smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 65536, // 64K context for long invoices + reasoning chains
|
||||||
|
temperature: 0, // Deterministic for JSON extraction
|
||||||
|
repeat_penalty: 1.3, // Penalty to prevent repetition loops
|
||||||
|
think: true, // Enable thinking mode for GPT-OSS reasoning
|
||||||
|
},
|
||||||
|
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// DualAgentOrchestrator for structured task execution
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
|
interface ILineItem {
|
||||||
|
position: number;
|
||||||
|
product: string;
|
||||||
|
description: string;
|
||||||
|
quantity: number;
|
||||||
|
unit_price: number;
|
||||||
|
total_price: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface IInvoice {
|
||||||
|
invoice_number: string;
|
||||||
|
invoice_date: string;
|
||||||
|
vendor_name: string;
|
||||||
|
currency: string;
|
||||||
|
net_amount: number;
|
||||||
|
vat_amount: number;
|
||||||
|
total_amount: number;
|
||||||
|
line_items: ILineItem[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface IImageData {
|
||||||
|
base64: string;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
pageNum: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ITestCase {
|
||||||
|
name: string;
|
||||||
|
pdfPath: string;
|
||||||
|
jsonPath: string;
|
||||||
|
markdownPath?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nanonets-specific prompt for document OCR to markdown
|
||||||
|
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||||
|
Return the tables in html format.
|
||||||
|
Return the equations in LaTeX representation.
|
||||||
|
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||||
|
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||||
|
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||||
|
|
||||||
|
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
|
||||||
|
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||||
|
|
||||||
|
WHERE TO FIND DATA:
|
||||||
|
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer"). Use common sense. Btw. an invoice number might start on INV* . Also be sure to not omit special chars like / - and sp on. They are part of the invoice number.
|
||||||
|
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||||
|
- line_items: Look in the TABLE(s) with columns like Pos, Product, Description, Quantity, Unit Price, Price
|
||||||
|
|
||||||
|
RULES:
|
||||||
|
1. Use common sense.
|
||||||
|
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||||
|
3. vendor_name: The company issuing the invoice
|
||||||
|
4. currency: EUR, USD, or GBP
|
||||||
|
5. net_amount: Total before tax
|
||||||
|
6. vat_amount: Tax amount
|
||||||
|
7. total_amount: Final total with tax
|
||||||
|
8. line_items: Array of items from the invoice table. Skip subtotal/total rows.
|
||||||
|
|
||||||
|
JSON format:
|
||||||
|
{
|
||||||
|
"invoice_number": "X",
|
||||||
|
"invoice_date": "YYYY-MM-DD",
|
||||||
|
"vendor_name": "X",
|
||||||
|
"currency": "EUR",
|
||||||
|
"net_amount": 0,
|
||||||
|
"vat_amount": 0,
|
||||||
|
"total_amount": 0,
|
||||||
|
"line_items": [
|
||||||
|
{"position": 1, "product": "X", "description": "X", "quantity": 1, "unit_price": 0, "total_price": 0}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
Double check for valid JSON syntax. use the json validate tool.
|
||||||
|
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Constants for smart batching
|
||||||
|
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Estimate visual tokens for an image based on dimensions
|
||||||
|
*/
|
||||||
|
function estimateVisualTokens(width: number, height: number): number {
|
||||||
|
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process images one page at a time for reliability
|
||||||
|
*/
|
||||||
|
function batchImages(images: IImageData[]): IImageData[][] {
|
||||||
|
// One page per batch for reliable processing
|
||||||
|
return images.map(img => [img]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||||
|
*/
|
||||||
|
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||||
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||||
|
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||||
|
|
||||||
|
try {
|
||||||
|
execSync(
|
||||||
|
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||||
|
{ stdio: 'pipe' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||||
|
const images: IImageData[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < files.length; i++) {
|
||||||
|
const file = files[i];
|
||||||
|
const imagePath = path.join(tempDir, file);
|
||||||
|
const imageData = fs.readFileSync(imagePath);
|
||||||
|
|
||||||
|
// Get image dimensions using identify command
|
||||||
|
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||||
|
const [width, height] = dimensions.split(' ').map(Number);
|
||||||
|
|
||||||
|
images.push({
|
||||||
|
base64: imageData.toString('base64'),
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
pageNum: i + 1,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return images;
|
||||||
|
} finally {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||||
|
*/
|
||||||
|
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||||
|
|
||||||
|
// Build content array with all images first, then the prompt
|
||||||
|
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||||
|
|
||||||
|
for (const img of batch) {
|
||||||
|
content.push({
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add prompt with page separator instruction if multiple pages
|
||||||
|
const promptText = batch.length > 1
|
||||||
|
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||||
|
: NANONETS_OCR_PROMPT;
|
||||||
|
|
||||||
|
content.push({ type: 'text', text: promptText });
|
||||||
|
|
||||||
|
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': 'Bearer dummy',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: NANONETS_MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content,
|
||||||
|
}],
|
||||||
|
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||||
|
temperature: 0.0,
|
||||||
|
}),
|
||||||
|
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||||
|
});
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||||
|
|
||||||
|
// For single-page batches, add page marker if not present
|
||||||
|
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||||
|
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||||
|
return responseContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert all pages of a document to markdown using smart batching
|
||||||
|
*/
|
||||||
|
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||||
|
const batches = batchImages(images);
|
||||||
|
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||||
|
|
||||||
|
const markdownParts: string[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < batches.length; i++) {
|
||||||
|
const batch = batches[i];
|
||||||
|
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||||
|
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||||
|
const markdown = await convertBatchToMarkdown(batch);
|
||||||
|
markdownParts.push(markdown);
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullMarkdown = markdownParts.join('\n\n');
|
||||||
|
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||||
|
return fullMarkdown;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop Nanonets container
|
||||||
|
*/
|
||||||
|
function stopNanonets(): void {
|
||||||
|
console.log(' [Docker] Stopping Nanonets container...');
|
||||||
|
try {
|
||||||
|
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||||
|
execSync('sleep 5', { stdio: 'pipe' });
|
||||||
|
console.log(' [Docker] Nanonets stopped');
|
||||||
|
} catch {
|
||||||
|
console.log(' [Docker] Nanonets was not running');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure GPT-OSS 20B model is available
|
||||||
|
*/
|
||||||
|
async function ensureExtractionModel(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||||
|
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse amount from string (handles European format)
|
||||||
|
*/
|
||||||
|
function parseAmount(s: string | number | undefined): number {
|
||||||
|
if (s === undefined || s === null) return 0;
|
||||||
|
if (typeof s === 'number') return s;
|
||||||
|
const match = s.match(/([\d.,]+)/);
|
||||||
|
if (!match) return 0;
|
||||||
|
const numStr = match[1];
|
||||||
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||||
|
? numStr.replace(/\./g, '').replace(',', '.')
|
||||||
|
: numStr.replace(/,/g, '');
|
||||||
|
return parseFloat(normalized) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice number from potentially verbose response
|
||||||
|
*/
|
||||||
|
function extractInvoiceNumber(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const patterns = [
|
||||||
|
/\b([A-Z]{2,3}\d{10,})\b/i,
|
||||||
|
/\b([A-Z]\d{8,})\b/i,
|
||||||
|
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i,
|
||||||
|
/\b(\d{7,})\b/,
|
||||||
|
];
|
||||||
|
for (const pattern of patterns) {
|
||||||
|
const match = clean.match(pattern);
|
||||||
|
if (match) return match[1];
|
||||||
|
}
|
||||||
|
return clean.replace(/[^A-Z0-9\/-]/gi, '').trim() || clean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract date (YYYY-MM-DD) from response
|
||||||
|
*/
|
||||||
|
function extractDate(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||||
|
if (isoMatch) return isoMatch[1];
|
||||||
|
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||||
|
if (dmyMatch) {
|
||||||
|
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
return clean.replace(/[^\d-]/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract currency
|
||||||
|
*/
|
||||||
|
function extractCurrency(s: string | undefined): string {
|
||||||
|
if (!s) return 'EUR';
|
||||||
|
const upper = s.toUpperCase();
|
||||||
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||||
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||||
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||||
|
return 'EUR';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract and normalize line items array
|
||||||
|
*/
|
||||||
|
function extractLineItems(items: unknown): ILineItem[] {
|
||||||
|
if (!Array.isArray(items)) return [];
|
||||||
|
return items.map((item: Record<string, unknown>, index: number) => ({
|
||||||
|
position: typeof item.position === 'number' ? item.position : index + 1,
|
||||||
|
product: String(item.product || '').trim(),
|
||||||
|
description: String(item.description || '').trim(),
|
||||||
|
quantity: parseAmount(item.quantity as string | number) || 1,
|
||||||
|
unit_price: parseAmount(item.unit_price as string | number),
|
||||||
|
total_price: parseAmount(item.total_price as string | number),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to extract valid JSON from a response string
|
||||||
|
*/
|
||||||
|
function tryExtractJson(response: string): Record<string, unknown> | null {
|
||||||
|
// Remove thinking tags
|
||||||
|
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
|
||||||
|
// Try code block
|
||||||
|
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||||
|
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON object
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonMatch[0]);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice from markdown using smartagent DualAgentOrchestrator
|
||||||
|
* Validates JSON and retries if invalid
|
||||||
|
*/
|
||||||
|
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const maxRetries = 2;
|
||||||
|
|
||||||
|
console.log(` [${queryId}] Invoice: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
// Build the extraction task with document context
|
||||||
|
const taskPrompt = `Extract the invoice data from this document and output ONLY the JSON:
|
||||||
|
|
||||||
|
${markdown}
|
||||||
|
|
||||||
|
${JSON_EXTRACTION_PROMPT}`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
let result = await orchestrator.run(taskPrompt);
|
||||||
|
let elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||||
|
|
||||||
|
// Try to parse JSON from result
|
||||||
|
let jsonData: Record<string, unknown> | null = null;
|
||||||
|
let responseText = result.result || '';
|
||||||
|
|
||||||
|
if (result.success && responseText) {
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: try parsing from history
|
||||||
|
if (!jsonData && result.history?.length > 0) {
|
||||||
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
|
if (lastMessage?.content) {
|
||||||
|
responseText = lastMessage.content;
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If JSON is invalid, retry with correction request
|
||||||
|
let retries = 0;
|
||||||
|
while (!jsonData && retries < maxRetries) {
|
||||||
|
retries++;
|
||||||
|
console.log(` [${queryId}] Invalid JSON, requesting correction (retry ${retries}/${maxRetries})...`);
|
||||||
|
|
||||||
|
result = await orchestrator.continueTask(
|
||||||
|
`Your response was not valid JSON. Please output ONLY the JSON object with no markdown, no explanation, no thinking tags. Just the raw JSON starting with { and ending with }. Format:
|
||||||
|
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`
|
||||||
|
);
|
||||||
|
|
||||||
|
elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Retry ${retries}: ${result.status} (${elapsed}s)`);
|
||||||
|
|
||||||
|
responseText = result.result || '';
|
||||||
|
if (responseText) {
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData && result.history?.length > 0) {
|
||||||
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
|
if (lastMessage?.content) {
|
||||||
|
responseText = lastMessage.content;
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData) {
|
||||||
|
console.log(` [${queryId}] Failed to get valid JSON after ${retries} retries`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [${queryId}] Valid JSON extracted`);
|
||||||
|
return {
|
||||||
|
invoice_number: extractInvoiceNumber(String(jsonData.invoice_number || '')),
|
||||||
|
invoice_date: extractDate(String(jsonData.invoice_date || '')),
|
||||||
|
vendor_name: String(jsonData.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||||
|
currency: extractCurrency(String(jsonData.currency || '')),
|
||||||
|
net_amount: parseAmount(jsonData.net_amount as string | number),
|
||||||
|
vat_amount: parseAmount(jsonData.vat_amount as string | number),
|
||||||
|
total_amount: parseAmount(jsonData.total_amount as string | number),
|
||||||
|
line_items: extractLineItems(jsonData.line_items),
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract invoice (single pass - GPT-OSS is more reliable)
|
||||||
|
*/
|
||||||
|
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||||
|
console.log(` [${docName}] Extracting...`);
|
||||||
|
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||||
|
if (!invoice) {
|
||||||
|
return {
|
||||||
|
invoice_number: '',
|
||||||
|
invoice_date: '',
|
||||||
|
vendor_name: '',
|
||||||
|
currency: 'EUR',
|
||||||
|
net_amount: 0,
|
||||||
|
vat_amount: 0,
|
||||||
|
total_amount: 0,
|
||||||
|
line_items: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
console.log(` [${docName}] Extracted: ${invoice.invoice_number}`);
|
||||||
|
return invoice;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize date to YYYY-MM-DD
|
||||||
|
*/
|
||||||
|
function normalizeDate(dateStr: string | null): string {
|
||||||
|
if (!dateStr) return '';
|
||||||
|
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||||
|
|
||||||
|
const monthMap: Record<string, string> = {
|
||||||
|
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||||
|
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||||
|
};
|
||||||
|
|
||||||
|
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||||
|
if (match) {
|
||||||
|
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return dateStr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare extracted invoice against expected
|
||||||
|
*/
|
||||||
|
function compareInvoice(
|
||||||
|
extracted: IInvoice,
|
||||||
|
expected: IInvoice
|
||||||
|
): { match: boolean; errors: string[] } {
|
||||||
|
const errors: string[] = [];
|
||||||
|
|
||||||
|
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||||
|
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||||
|
if (extNum !== expNum) {
|
||||||
|
errors.push(`invoice_number: exp "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||||
|
errors.push(`invoice_date: exp "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||||
|
errors.push(`total_amount: exp ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||||
|
errors.push(`currency: exp "${expected.currency}", got "${extracted.currency}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { match: errors.length === 0, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find all test cases
|
||||||
|
*/
|
||||||
|
function findTestCases(): ITestCase[] {
|
||||||
|
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||||
|
if (!fs.existsSync(testDir)) return [];
|
||||||
|
|
||||||
|
const files = fs.readdirSync(testDir);
|
||||||
|
const testCases: ITestCase[] = [];
|
||||||
|
|
||||||
|
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
|
||||||
|
const baseName = pdf.replace('.pdf', '');
|
||||||
|
const jsonFile = `${baseName}.json`;
|
||||||
|
if (files.includes(jsonFile)) {
|
||||||
|
testCases.push({
|
||||||
|
name: baseName,
|
||||||
|
pdfPath: path.join(testDir, pdf),
|
||||||
|
jsonPath: path.join(testDir, jsonFile),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ TESTS ============
|
||||||
|
|
||||||
|
const testCases = findTestCases();
|
||||||
|
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
||||||
|
|
||||||
|
// Ensure cache directory exists
|
||||||
|
if (!fs.existsSync(MD_CACHE_DIR)) {
|
||||||
|
fs.mkdirSync(MD_CACHE_DIR, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------- STAGE 1: OCR with Nanonets --------
|
||||||
|
|
||||||
|
tap.test('Stage 1: Convert invoices to markdown (with caching)', async () => {
|
||||||
|
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||||
|
|
||||||
|
// Check which invoices need OCR conversion
|
||||||
|
const needsConversion: ITestCase[] = [];
|
||||||
|
let cachedCount = 0;
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
|
if (fs.existsSync(mdPath)) {
|
||||||
|
cachedCount++;
|
||||||
|
tc.markdownPath = mdPath;
|
||||||
|
console.log(` [CACHED] ${tc.name} - using cached markdown`);
|
||||||
|
} else {
|
||||||
|
needsConversion.push(tc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`\n Summary: ${cachedCount} cached, ${needsConversion.length} need conversion\n`);
|
||||||
|
|
||||||
|
if (needsConversion.length === 0) {
|
||||||
|
console.log(' All invoices already cached, skipping Nanonets OCR\n');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start Nanonets only if there are files to convert
|
||||||
|
console.log(' Starting Nanonets for OCR conversion...\n');
|
||||||
|
const ok = await ensureNanonetsOcr();
|
||||||
|
expect(ok).toBeTrue();
|
||||||
|
|
||||||
|
// Convert only the invoices that need conversion
|
||||||
|
for (const tc of needsConversion) {
|
||||||
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
|
||||||
|
const images = convertPdfToImages(tc.pdfPath);
|
||||||
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
|
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||||
|
|
||||||
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
|
fs.writeFileSync(mdPath, markdown);
|
||||||
|
tc.markdownPath = mdPath;
|
||||||
|
console.log(` Saved: ${mdPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`\n Stage 1 complete: ${needsConversion.length} invoices converted to markdown\n`);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||||
|
stopNanonets();
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||||
|
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||||
|
|
||||||
|
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||||
|
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||||
|
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
|
||||||
|
const extractionOk = await ensureExtractionModel();
|
||||||
|
expect(extractionOk).toBeTrue();
|
||||||
|
|
||||||
|
// Initialize SmartAi and DualAgentOrchestrator
|
||||||
|
console.log(' [SmartAgent] Starting SmartAi...');
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Creating DualAgentOrchestrator with native tool calling...');
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `
|
||||||
|
JSON EXTRACTION POLICY:
|
||||||
|
- APPROVE all JSON extraction tasks
|
||||||
|
- APPROVE all json.validate tool calls
|
||||||
|
- This is a read-only operation - no file system or network access needed
|
||||||
|
- The task is to extract structured data from document text
|
||||||
|
`,
|
||||||
|
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract invoice data from documents.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
1. Output valid JSON with the exact format requested
|
||||||
|
2. If you cannot find a value, use empty string "" or 0 for numbers
|
||||||
|
3. Before completing, validate your JSON using the json_validate tool
|
||||||
|
4. Only complete after validation passes`,
|
||||||
|
maxIterations: 5,
|
||||||
|
// Enable native tool calling for GPT-OSS (uses Harmony format instead of XML)
|
||||||
|
useNativeToolCalling: true,
|
||||||
|
// Enable streaming for real-time progress visibility
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Enable progress events to see tool calls
|
||||||
|
onProgress: (event: { logMessage: string }) => {
|
||||||
|
console.log(event.logMessage);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register JsonValidatorTool for self-validation
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Starting orchestrator...');
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log(' [SmartAgent] Ready for extraction');
|
||||||
|
});
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
const processingTimes: number[] = [];
|
||||||
|
|
||||||
|
for (const tc of testCases) {
|
||||||
|
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||||
|
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||||
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
console.log(` Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||||
|
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
|
if (!fs.existsSync(mdPath)) {
|
||||||
|
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||||
|
}
|
||||||
|
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||||
|
console.log(` Markdown: ${markdown.length} chars`);
|
||||||
|
|
||||||
|
const extracted = await extractInvoice(markdown, tc.name);
|
||||||
|
|
||||||
|
const elapsedMs = Date.now() - startTime;
|
||||||
|
processingTimes.push(elapsedMs);
|
||||||
|
|
||||||
|
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency} | ${extracted.line_items.length} items`);
|
||||||
|
|
||||||
|
const result = compareInvoice(extracted, expected);
|
||||||
|
|
||||||
|
if (result.match) {
|
||||||
|
passedCount++;
|
||||||
|
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||||
|
result.errors.forEach(e => console.log(` - ${e}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(result.match).toBeTrue();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('Summary', async () => {
|
||||||
|
// Cleanup orchestrator and SmartAi
|
||||||
|
if (orchestrator) {
|
||||||
|
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||||
|
await smartAi.stop();
|
||||||
|
|
||||||
|
const totalInvoices = testCases.length;
|
||||||
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
|
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||||
|
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||||
|
|
||||||
|
console.log(`\n========================================`);
|
||||||
|
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
|
||||||
|
console.log(`========================================`);
|
||||||
|
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
|
||||||
|
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (md -> JSON)`);
|
||||||
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
console.log(`----------------------------------------`);
|
||||||
|
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
||||||
|
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||||
|
console.log(`========================================\n`);
|
||||||
|
console.log(` Cache location: ${MD_CACHE_DIR}\n`);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
Reference in New Issue
Block a user