Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 177e87d3b8 | |||
| 17ea7717eb | |||
| bd5bb5d874 | |||
| d91df70fff | |||
| d6c97a9625 | |||
| 76b21f1f7b | |||
| 4c368dfef9 | |||
| e76768da55 | |||
| 63d72a52c9 | |||
| 386122c8c7 | |||
| 7c8f10497e |
@@ -1,27 +0,0 @@
|
|||||||
# MiniCPM-V 4.5 CPU Variant
|
|
||||||
# Vision-Language Model optimized for CPU-only inference
|
|
||||||
FROM ollama/ollama:latest
|
|
||||||
|
|
||||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
|
||||||
LABEL description="MiniCPM-V 4.5 Vision-Language Model - CPU optimized (GGUF)"
|
|
||||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
|
||||||
|
|
||||||
# Environment configuration for CPU-only mode
|
|
||||||
ENV MODEL_NAME="minicpm-v"
|
|
||||||
ENV OLLAMA_HOST="0.0.0.0"
|
|
||||||
ENV OLLAMA_ORIGINS="*"
|
|
||||||
# Disable GPU usage for CPU-only variant
|
|
||||||
ENV CUDA_VISIBLE_DEVICES=""
|
|
||||||
|
|
||||||
# Copy and setup entrypoint
|
|
||||||
COPY image_support_files/minicpm45v_entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
|
||||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
|
||||||
|
|
||||||
# Expose Ollama API port
|
|
||||||
EXPOSE 11434
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
|
|
||||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# PaddleOCR-VL CPU Variant
|
|
||||||
# Vision-Language Model for document parsing using transformers (slower, no GPU required)
|
|
||||||
FROM python:3.11-slim-bookworm
|
|
||||||
|
|
||||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
|
||||||
LABEL description="PaddleOCR-VL 0.9B CPU - Vision-Language Model for document parsing"
|
|
||||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
|
||||||
|
|
||||||
# Environment configuration
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
|
||||||
ENV HF_HOME=/root/.cache/huggingface
|
|
||||||
ENV CUDA_VISIBLE_DEVICES=""
|
|
||||||
ENV SERVER_PORT=8000
|
|
||||||
ENV SERVER_HOST=0.0.0.0
|
|
||||||
|
|
||||||
# Set working directory
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install system dependencies
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgomp1 \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install Python dependencies
|
|
||||||
RUN pip install --no-cache-dir --upgrade pip && \
|
|
||||||
pip install --no-cache-dir \
|
|
||||||
torch==2.5.1 torchvision==0.20.1 --index-url https://download.pytorch.org/whl/cpu && \
|
|
||||||
pip install --no-cache-dir \
|
|
||||||
transformers \
|
|
||||||
accelerate \
|
|
||||||
safetensors \
|
|
||||||
pillow \
|
|
||||||
fastapi \
|
|
||||||
uvicorn[standard] \
|
|
||||||
python-multipart \
|
|
||||||
httpx \
|
|
||||||
protobuf \
|
|
||||||
sentencepiece \
|
|
||||||
einops
|
|
||||||
|
|
||||||
# Copy server files
|
|
||||||
COPY image_support_files/paddleocr_vl_server.py /app/paddleocr_vl_server.py
|
|
||||||
COPY image_support_files/paddleocr_vl_entrypoint.sh /usr/local/bin/paddleocr-vl-cpu-entrypoint.sh
|
|
||||||
RUN chmod +x /usr/local/bin/paddleocr-vl-cpu-entrypoint.sh
|
|
||||||
|
|
||||||
# Expose API port
|
|
||||||
EXPOSE 8000
|
|
||||||
|
|
||||||
# Health check (longer start-period for CPU + model download)
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
|
|
||||||
CMD curl -f http://localhost:8000/health || exit 1
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/paddleocr-vl-cpu-entrypoint.sh"]
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
# PaddleOCR-VL Full Pipeline (PP-DocLayoutV2 + PaddleOCR-VL + Structured Output)
|
|
||||||
# Self-contained GPU image with complete document parsing pipeline
|
|
||||||
FROM nvidia/cuda:12.4.0-devel-ubuntu22.04
|
|
||||||
|
|
||||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
|
||||||
LABEL description="PaddleOCR-VL Full Pipeline - Layout Detection + VL Recognition + JSON/Markdown Output"
|
|
||||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
|
||||||
|
|
||||||
# Environment configuration
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
|
||||||
ENV HF_HOME=/root/.cache/huggingface
|
|
||||||
ENV PADDLEOCR_HOME=/root/.paddleocr
|
|
||||||
ENV SERVER_PORT=8000
|
|
||||||
ENV SERVER_HOST=0.0.0.0
|
|
||||||
ENV VLM_PORT=8080
|
|
||||||
|
|
||||||
# Set working directory
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install system dependencies
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.11 \
|
|
||||||
python3.11-venv \
|
|
||||||
python3.11-dev \
|
|
||||||
python3-pip \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgomp1 \
|
|
||||||
libsm6 \
|
|
||||||
libxext6 \
|
|
||||||
libxrender1 \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
wget \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 \
|
|
||||||
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
|
||||||
|
|
||||||
# Create and activate virtual environment
|
|
||||||
RUN python -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# Upgrade pip
|
|
||||||
RUN pip install --no-cache-dir --upgrade pip setuptools wheel
|
|
||||||
|
|
||||||
# Install PaddlePaddle GPU (CUDA 12.x)
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
paddlepaddle-gpu==3.2.1 \
|
|
||||||
--extra-index-url https://www.paddlepaddle.org.cn/packages/stable/cu126/
|
|
||||||
|
|
||||||
# Install PaddleOCR with doc-parser (includes PP-DocLayoutV2)
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
"paddleocr[doc-parser]" \
|
|
||||||
safetensors
|
|
||||||
|
|
||||||
# Install PyTorch with CUDA support
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
torch==2.5.1 \
|
|
||||||
torchvision \
|
|
||||||
--index-url https://download.pytorch.org/whl/cu124
|
|
||||||
|
|
||||||
# Install transformers for PaddleOCR-VL inference (no vLLM - use local inference)
|
|
||||||
# PaddleOCR-VL requires transformers>=4.55.0 for use_kernel_forward_from_hub
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
transformers>=4.55.0 \
|
|
||||||
accelerate \
|
|
||||||
hf-kernels
|
|
||||||
|
|
||||||
# Install our API server dependencies
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
fastapi \
|
|
||||||
uvicorn[standard] \
|
|
||||||
python-multipart \
|
|
||||||
httpx \
|
|
||||||
pillow
|
|
||||||
|
|
||||||
# Copy server files
|
|
||||||
COPY image_support_files/paddleocr_vl_full_server.py /app/server.py
|
|
||||||
COPY image_support_files/paddleocr_vl_full_entrypoint.sh /usr/local/bin/entrypoint.sh
|
|
||||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
|
||||||
|
|
||||||
# Expose ports (8000 = API, 8080 = internal VLM server)
|
|
||||||
EXPOSE 8000
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
|
|
||||||
CMD curl -f http://localhost:8000/health || exit 1
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
# PaddleOCR-VL GPU Variant (Transformers-based, not vLLM)
|
|
||||||
# Vision-Language Model for document parsing using transformers with CUDA
|
|
||||||
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04
|
|
||||||
|
|
||||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
|
||||||
LABEL description="PaddleOCR-VL 0.9B GPU - Vision-Language Model using transformers"
|
|
||||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
|
||||||
|
|
||||||
# Environment configuration
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
|
||||||
ENV HF_HOME=/root/.cache/huggingface
|
|
||||||
ENV SERVER_PORT=8000
|
|
||||||
ENV SERVER_HOST=0.0.0.0
|
|
||||||
|
|
||||||
# Set working directory
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install system dependencies
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.11 \
|
|
||||||
python3.11-venv \
|
|
||||||
python3.11-dev \
|
|
||||||
python3-pip \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgomp1 \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 \
|
|
||||||
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
|
||||||
|
|
||||||
# Create and activate virtual environment
|
|
||||||
RUN python -m venv /opt/venv
|
|
||||||
ENV PATH="/opt/venv/bin:$PATH"
|
|
||||||
|
|
||||||
# Install PyTorch with CUDA support
|
|
||||||
RUN pip install --no-cache-dir --upgrade pip && \
|
|
||||||
pip install --no-cache-dir \
|
|
||||||
torch==2.5.1 \
|
|
||||||
torchvision \
|
|
||||||
--index-url https://download.pytorch.org/whl/cu124
|
|
||||||
|
|
||||||
# Install Python dependencies (transformers-based, not vLLM)
|
|
||||||
RUN pip install --no-cache-dir \
|
|
||||||
transformers \
|
|
||||||
accelerate \
|
|
||||||
safetensors \
|
|
||||||
pillow \
|
|
||||||
fastapi \
|
|
||||||
uvicorn[standard] \
|
|
||||||
python-multipart \
|
|
||||||
httpx \
|
|
||||||
protobuf \
|
|
||||||
sentencepiece \
|
|
||||||
einops
|
|
||||||
|
|
||||||
# Copy server files (same as CPU variant - it auto-detects CUDA)
|
|
||||||
COPY image_support_files/paddleocr_vl_server.py /app/paddleocr_vl_server.py
|
|
||||||
COPY image_support_files/paddleocr_vl_entrypoint.sh /usr/local/bin/paddleocr-vl-entrypoint.sh
|
|
||||||
RUN chmod +x /usr/local/bin/paddleocr-vl-entrypoint.sh
|
|
||||||
|
|
||||||
# Expose API port
|
|
||||||
EXPOSE 8000
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=300s --retries=3 \
|
|
||||||
CMD curl -f http://localhost:8000/health || exit 1
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/paddleocr-vl-entrypoint.sh"]
|
|
||||||
45
changelog.md
45
changelog.md
@@ -1,5 +1,50 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.13.1 - fix(image_support_files)
|
||||||
|
remove PaddleOCR-VL server scripts from image_support_files
|
||||||
|
|
||||||
|
- Deleted files: image_support_files/paddleocr_vl_full_server.py (approx. 636 lines) and image_support_files/paddleocr_vl_server.py (approx. 465 lines)
|
||||||
|
- Cleanup/removal of legacy PaddleOCR-VL FastAPI server implementations — may affect users who relied on these local scripts
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.13.0 - feat(tests)
|
||||||
|
revamp tests and remove legacy Dockerfiles: adopt JSON/consensus workflows, switch MiniCPM model, and delete deprecated Docker/test variants
|
||||||
|
|
||||||
|
- Removed multiple Dockerfiles and related entrypoints for MiniCPM and PaddleOCR-VL (cpu/gpu/full), cleaning up legacy image recipes.
|
||||||
|
- Pruned many older test files (combined, ministral3, paddleocr-vl, and several invoice/test variants) to consolidate the test suite.
|
||||||
|
- Updated bank statement MiniCPM test: now uses MODEL='openbmb/minicpm-v4.5:q8_0', JSON per-page extraction prompt, consensus retry logic, expanded logging, and stricter result matching.
|
||||||
|
- Updated invoice MiniCPM test: switched to a consensus flow (fast JSON pass + thinking pass), increased PDF conversion quality, endpoints migrated to chat-style API calls with image-in-message payloads, and improved finalization logic.
|
||||||
|
- API usage changed from /api/generate to /api/chat with message-based payloads and embedded images — CI and local test runners will need model availability and possible pipeline adjustments.
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.12.0 - feat(tests)
|
||||||
|
switch vision tests to multi-query extraction (count then per-row/field queries) and add logging/summaries
|
||||||
|
|
||||||
|
- Replace streaming + consensus pipeline with multi-query approach: count rows per page, then query each transaction/field individually (batched parallel queries).
|
||||||
|
- Introduce unified helpers (queryVision / queryField / getTransaction / countTransactions) and simplify Ollama requests (stream:false, reduced num_predict, /no_think prompts).
|
||||||
|
- Improve parsing and normalization for amounts (European formats), invoice numbers, dates and currency extraction.
|
||||||
|
- Adjust model checks to look for generic 'minicpm' and update test names/messages; add pass/fail counters and a summary test output.
|
||||||
|
- Remove previous consensus voting and streaming JSON accumulation logic, and add immediate per-transaction logging and batching.
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.11.0 - feat(vision)
|
||||||
|
process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction
|
||||||
|
|
||||||
|
- Bank statements: split extraction into extractTransactionsFromPage and sequentially process pages to avoid thinking-token exhaustion
|
||||||
|
- Bank statements: reduced num_predict from 8000 to 4000, send single image per request, added per-page logging and non-throwing handling for empty or non-JSON responses
|
||||||
|
- Bank statements: catch JSON.parse errors and return empty array instead of throwing
|
||||||
|
- Invoices: introduced queryField to request single values and perform multiple simple queries (reduces model thinking usage)
|
||||||
|
- Invoices: reduced num_predict for invoice queries from 4000 to 500 and parse amounts robustly (handles European formats like 1.234,56)
|
||||||
|
- Invoices: normalize currency to uppercase 3-letter code, return safe defaults (empty strings / 0) instead of nulls, and parse net/vat/total with fallbacks
|
||||||
|
- General: simplified Ollama API error messages to avoid including response body content in thrown errors
|
||||||
|
|
||||||
|
## 2026-01-18 - 1.10.1 - fix(tests)
|
||||||
|
improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options
|
||||||
|
|
||||||
|
- Replaced streaming reader logic with direct JSON parsing of the /api/chat response
|
||||||
|
- Added ensureQwen3Vl() to check and pull the Qwen3-VL:8b model from Ollama
|
||||||
|
- Switched to ensureMiniCpm() to verify Ollama service is running before model checks
|
||||||
|
- Use /no_think prompt for direct JSON output and set temperature to 0.0 and num_predict to 512
|
||||||
|
- Removed retry loop and streaming parsing; improved error messages to include response body
|
||||||
|
- Updated logging and test setup messages for clarity
|
||||||
|
|
||||||
## 2026-01-18 - 1.10.0 - feat(vision)
|
## 2026-01-18 - 1.10.0 - feat(vision)
|
||||||
add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting
|
add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "==================================="
|
|
||||||
echo "PaddleOCR-VL Server (CPU)"
|
|
||||||
echo "==================================="
|
|
||||||
|
|
||||||
HOST="${SERVER_HOST:-0.0.0.0}"
|
|
||||||
PORT="${SERVER_PORT:-8000}"
|
|
||||||
|
|
||||||
echo "Host: ${HOST}"
|
|
||||||
echo "Port: ${PORT}"
|
|
||||||
echo "Device: CPU (no GPU)"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
echo "Starting PaddleOCR-VL CPU server..."
|
|
||||||
echo "==================================="
|
|
||||||
|
|
||||||
exec python /app/paddleocr_vl_server.py
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "Starting PaddleOCR-VL Full Pipeline Server (Transformers backend)..."
|
|
||||||
|
|
||||||
# Environment
|
|
||||||
SERVER_PORT=${SERVER_PORT:-8000}
|
|
||||||
SERVER_HOST=${SERVER_HOST:-0.0.0.0}
|
|
||||||
|
|
||||||
# Start our API server directly (no vLLM - uses local transformers inference)
|
|
||||||
echo "Starting API server on port $SERVER_PORT..."
|
|
||||||
exec python /app/server.py
|
|
||||||
@@ -1,636 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
PaddleOCR-VL Full Pipeline API Server (Transformers backend)
|
|
||||||
|
|
||||||
Provides REST API for document parsing using:
|
|
||||||
- PP-DocLayoutV2 for layout detection
|
|
||||||
- PaddleOCR-VL (transformers) for recognition
|
|
||||||
- Structured JSON/Markdown output
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import io
|
|
||||||
import re
|
|
||||||
import base64
|
|
||||||
import logging
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
from typing import Optional, List, Union
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
|
|
||||||
from fastapi.responses import JSONResponse
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from PIL import Image
|
|
||||||
import torch
|
|
||||||
|
|
||||||
# Configure logging
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Environment configuration
|
|
||||||
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
|
|
||||||
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
|
|
||||||
MODEL_NAME = "PaddlePaddle/PaddleOCR-VL"
|
|
||||||
|
|
||||||
# Device configuration
|
|
||||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
logger.info(f"Using device: {DEVICE}")
|
|
||||||
|
|
||||||
# Task prompts
|
|
||||||
TASK_PROMPTS = {
|
|
||||||
"ocr": "OCR:",
|
|
||||||
"table": "Table Recognition:",
|
|
||||||
"formula": "Formula Recognition:",
|
|
||||||
"chart": "Chart Recognition:",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialize FastAPI app
|
|
||||||
app = FastAPI(
|
|
||||||
title="PaddleOCR-VL Full Pipeline Server",
|
|
||||||
description="Document parsing with PP-DocLayoutV2 + PaddleOCR-VL (transformers)",
|
|
||||||
version="1.0.0"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Global model instances
|
|
||||||
vl_model = None
|
|
||||||
vl_processor = None
|
|
||||||
layout_model = None
|
|
||||||
|
|
||||||
|
|
||||||
def load_vl_model():
|
|
||||||
"""Load the PaddleOCR-VL model for element recognition"""
|
|
||||||
global vl_model, vl_processor
|
|
||||||
|
|
||||||
if vl_model is not None:
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
|
|
||||||
from transformers import AutoModelForCausalLM, AutoProcessor
|
|
||||||
|
|
||||||
vl_processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
|
||||||
|
|
||||||
if DEVICE == "cuda":
|
|
||||||
vl_model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
MODEL_NAME,
|
|
||||||
trust_remote_code=True,
|
|
||||||
torch_dtype=torch.bfloat16,
|
|
||||||
).to(DEVICE).eval()
|
|
||||||
else:
|
|
||||||
vl_model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
MODEL_NAME,
|
|
||||||
trust_remote_code=True,
|
|
||||||
torch_dtype=torch.float32,
|
|
||||||
low_cpu_mem_usage=True,
|
|
||||||
).eval()
|
|
||||||
|
|
||||||
logger.info("PaddleOCR-VL model loaded successfully")
|
|
||||||
|
|
||||||
|
|
||||||
def load_layout_model():
|
|
||||||
"""Load the LayoutDetection model for layout detection"""
|
|
||||||
global layout_model
|
|
||||||
|
|
||||||
if layout_model is not None:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
logger.info("Loading LayoutDetection model (PP-DocLayout_plus-L)...")
|
|
||||||
from paddleocr import LayoutDetection
|
|
||||||
|
|
||||||
layout_model = LayoutDetection()
|
|
||||||
logger.info("LayoutDetection model loaded successfully")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not load LayoutDetection: {e}")
|
|
||||||
logger.info("Falling back to VL-only mode (no layout detection)")
|
|
||||||
|
|
||||||
|
|
||||||
def recognize_element(image: Image.Image, task: str = "ocr") -> str:
|
|
||||||
"""Recognize a single element using PaddleOCR-VL"""
|
|
||||||
load_vl_model()
|
|
||||||
|
|
||||||
prompt = TASK_PROMPTS.get(task, TASK_PROMPTS["ocr"])
|
|
||||||
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": [
|
|
||||||
{"type": "image", "image": image},
|
|
||||||
{"type": "text", "text": prompt},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
inputs = vl_processor.apply_chat_template(
|
|
||||||
messages,
|
|
||||||
tokenize=True,
|
|
||||||
add_generation_prompt=True,
|
|
||||||
return_dict=True,
|
|
||||||
return_tensors="pt"
|
|
||||||
)
|
|
||||||
|
|
||||||
if DEVICE == "cuda":
|
|
||||||
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
|
||||||
|
|
||||||
with torch.inference_mode():
|
|
||||||
outputs = vl_model.generate(
|
|
||||||
**inputs,
|
|
||||||
max_new_tokens=4096,
|
|
||||||
do_sample=False,
|
|
||||||
use_cache=True
|
|
||||||
)
|
|
||||||
|
|
||||||
response = vl_processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
|
||||||
|
|
||||||
# Extract only the assistant's response content
|
|
||||||
# The response format is: "User: <prompt>\nAssistant: <content>"
|
|
||||||
# We want to extract just the content after "Assistant:"
|
|
||||||
if "Assistant:" in response:
|
|
||||||
parts = response.split("Assistant:")
|
|
||||||
if len(parts) > 1:
|
|
||||||
response = parts[-1].strip()
|
|
||||||
elif "assistant:" in response.lower():
|
|
||||||
# Case-insensitive fallback
|
|
||||||
import re
|
|
||||||
match = re.split(r'[Aa]ssistant:', response)
|
|
||||||
if len(match) > 1:
|
|
||||||
response = match[-1].strip()
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
def detect_layout(image: Image.Image) -> List[dict]:
|
|
||||||
"""Detect layout regions in the image"""
|
|
||||||
load_layout_model()
|
|
||||||
|
|
||||||
if layout_model is None:
|
|
||||||
# No layout model - return a single region covering the whole image
|
|
||||||
return [{
|
|
||||||
"type": "text",
|
|
||||||
"bbox": [0, 0, image.width, image.height],
|
|
||||||
"score": 1.0
|
|
||||||
}]
|
|
||||||
|
|
||||||
# Save image to temp file
|
|
||||||
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
|
||||||
image.save(tmp.name, "PNG")
|
|
||||||
tmp_path = tmp.name
|
|
||||||
|
|
||||||
try:
|
|
||||||
results = layout_model.predict(tmp_path)
|
|
||||||
regions = []
|
|
||||||
|
|
||||||
for res in results:
|
|
||||||
# LayoutDetection returns boxes in 'boxes' key
|
|
||||||
for box in res.get("boxes", []):
|
|
||||||
coord = box.get("coordinate", [0, 0, image.width, image.height])
|
|
||||||
# Convert numpy floats to regular floats
|
|
||||||
bbox = [float(c) for c in coord]
|
|
||||||
regions.append({
|
|
||||||
"type": box.get("label", "text"),
|
|
||||||
"bbox": bbox,
|
|
||||||
"score": float(box.get("score", 1.0))
|
|
||||||
})
|
|
||||||
|
|
||||||
# Sort regions by vertical position (top to bottom)
|
|
||||||
regions.sort(key=lambda r: r["bbox"][1])
|
|
||||||
|
|
||||||
return regions if regions else [{
|
|
||||||
"type": "text",
|
|
||||||
"bbox": [0, 0, image.width, image.height],
|
|
||||||
"score": 1.0
|
|
||||||
}]
|
|
||||||
|
|
||||||
finally:
|
|
||||||
os.unlink(tmp_path)
|
|
||||||
|
|
||||||
|
|
||||||
def process_document(image: Image.Image) -> dict:
|
|
||||||
"""Process a document through the full pipeline"""
|
|
||||||
logger.info(f"Processing document: {image.size}")
|
|
||||||
|
|
||||||
# Step 1: Detect layout
|
|
||||||
regions = detect_layout(image)
|
|
||||||
logger.info(f"Detected {len(regions)} layout regions")
|
|
||||||
|
|
||||||
# Step 2: Recognize each region
|
|
||||||
blocks = []
|
|
||||||
for i, region in enumerate(regions):
|
|
||||||
region_type = region["type"].lower()
|
|
||||||
bbox = region["bbox"]
|
|
||||||
|
|
||||||
# Crop region from image
|
|
||||||
x1, y1, x2, y2 = [int(c) for c in bbox]
|
|
||||||
region_image = image.crop((x1, y1, x2, y2))
|
|
||||||
|
|
||||||
# Determine task based on region type
|
|
||||||
if "table" in region_type:
|
|
||||||
task = "table"
|
|
||||||
elif "formula" in region_type or "math" in region_type:
|
|
||||||
task = "formula"
|
|
||||||
elif "chart" in region_type or "figure" in region_type:
|
|
||||||
task = "chart"
|
|
||||||
else:
|
|
||||||
task = "ocr"
|
|
||||||
|
|
||||||
# Recognize the region
|
|
||||||
try:
|
|
||||||
content = recognize_element(region_image, task)
|
|
||||||
blocks.append({
|
|
||||||
"index": i,
|
|
||||||
"type": region_type,
|
|
||||||
"bbox": bbox,
|
|
||||||
"content": content,
|
|
||||||
"task": task
|
|
||||||
})
|
|
||||||
logger.info(f" Region {i} ({region_type}): {len(content)} chars")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f" Region {i} error: {e}")
|
|
||||||
blocks.append({
|
|
||||||
"index": i,
|
|
||||||
"type": region_type,
|
|
||||||
"bbox": bbox,
|
|
||||||
"content": "",
|
|
||||||
"error": str(e)
|
|
||||||
})
|
|
||||||
|
|
||||||
return {"blocks": blocks, "image_size": list(image.size)}
|
|
||||||
|
|
||||||
|
|
||||||
def result_to_markdown(result: dict) -> str:
|
|
||||||
"""Convert result to Markdown format with structural hints for LLM processing.
|
|
||||||
|
|
||||||
Adds positional and type-based formatting to help downstream LLMs
|
|
||||||
understand document structure:
|
|
||||||
- Tables are marked with **[TABLE]** prefix
|
|
||||||
- Header zone content (top 15%) is bolded
|
|
||||||
- Footer zone content (bottom 15%) is separated with horizontal rule
|
|
||||||
- Titles are formatted as # headers
|
|
||||||
- Figures/charts are marked with *[Figure: ...]*
|
|
||||||
"""
|
|
||||||
lines = []
|
|
||||||
image_height = result.get("image_size", [0, 1000])[1]
|
|
||||||
|
|
||||||
for block in result.get("blocks", []):
|
|
||||||
block_type = block.get("type", "text").lower()
|
|
||||||
content = block.get("content", "").strip()
|
|
||||||
bbox = block.get("bbox", [])
|
|
||||||
|
|
||||||
if not content:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Determine position zone (top 15%, middle, bottom 15%)
|
|
||||||
y_pos = bbox[1] if bbox and len(bbox) > 1 else 0
|
|
||||||
y_end = bbox[3] if bbox and len(bbox) > 3 else y_pos
|
|
||||||
is_header_zone = y_pos < image_height * 0.15
|
|
||||||
is_footer_zone = y_end > image_height * 0.85
|
|
||||||
|
|
||||||
# Format based on type and position
|
|
||||||
if "table" in block_type:
|
|
||||||
lines.append(f"\n**[TABLE]**\n{content}\n")
|
|
||||||
elif "title" in block_type:
|
|
||||||
lines.append(f"# {content}")
|
|
||||||
elif "formula" in block_type or "math" in block_type:
|
|
||||||
lines.append(f"\n$$\n{content}\n$$\n")
|
|
||||||
elif "figure" in block_type or "chart" in block_type:
|
|
||||||
lines.append(f"*[Figure: {content}]*")
|
|
||||||
elif is_header_zone:
|
|
||||||
lines.append(f"**{content}**")
|
|
||||||
elif is_footer_zone:
|
|
||||||
lines.append(f"---\n{content}")
|
|
||||||
else:
|
|
||||||
lines.append(content)
|
|
||||||
|
|
||||||
return "\n\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_markdown_table(content: str) -> str:
|
|
||||||
"""Convert table content to HTML table.
|
|
||||||
|
|
||||||
Handles:
|
|
||||||
- PaddleOCR-VL format: <fcel>cell<lcel>cell<nl> (detected by <fcel> tags)
|
|
||||||
- Pipe-delimited tables: | Header | Header |
|
|
||||||
- Separator rows: |---|---|
|
|
||||||
- Returns HTML <table> structure
|
|
||||||
"""
|
|
||||||
content_stripped = content.strip()
|
|
||||||
|
|
||||||
# Check for PaddleOCR-VL table format (<fcel>, <lcel>, <ecel>, <nl>)
|
|
||||||
if '<fcel>' in content_stripped or '<nl>' in content_stripped:
|
|
||||||
return parse_paddleocr_table(content_stripped)
|
|
||||||
|
|
||||||
lines = content_stripped.split('\n')
|
|
||||||
if not lines:
|
|
||||||
return f'<pre>{content}</pre>'
|
|
||||||
|
|
||||||
# Check if it looks like a markdown table
|
|
||||||
if not any('|' in line for line in lines):
|
|
||||||
return f'<pre>{content}</pre>'
|
|
||||||
|
|
||||||
html_rows = []
|
|
||||||
is_header = True
|
|
||||||
|
|
||||||
for line in lines:
|
|
||||||
line = line.strip()
|
|
||||||
if not line or line.startswith('|') == False and '|' not in line:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Skip separator rows (|---|---|)
|
|
||||||
if re.match(r'^[\|\s\-:]+$', line):
|
|
||||||
is_header = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Parse cells
|
|
||||||
cells = [c.strip() for c in line.split('|')]
|
|
||||||
cells = [c for c in cells if c] # Remove empty from edges
|
|
||||||
|
|
||||||
if is_header:
|
|
||||||
row = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
|
|
||||||
html_rows.append(f'<thead>{row}</thead>')
|
|
||||||
is_header = False
|
|
||||||
else:
|
|
||||||
row = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
|
|
||||||
html_rows.append(row)
|
|
||||||
|
|
||||||
if html_rows:
|
|
||||||
# Wrap body rows in tbody
|
|
||||||
header = html_rows[0] if '<thead>' in html_rows[0] else ''
|
|
||||||
body_rows = [r for r in html_rows if '<thead>' not in r]
|
|
||||||
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
|
|
||||||
return f'<table>{header}{body}</table>'
|
|
||||||
|
|
||||||
return f'<pre>{content}</pre>'
|
|
||||||
|
|
||||||
|
|
||||||
def parse_paddleocr_table(content: str) -> str:
|
|
||||||
"""Convert PaddleOCR-VL table format to HTML table.
|
|
||||||
|
|
||||||
PaddleOCR-VL uses:
|
|
||||||
- <fcel> = first cell in a row
|
|
||||||
- <lcel> = subsequent cells
|
|
||||||
- <ecel> = empty cell
|
|
||||||
- <nl> = row separator (newline)
|
|
||||||
|
|
||||||
Example input:
|
|
||||||
<fcel>Header1<lcel>Header2<nl><fcel>Value1<lcel>Value2<nl>
|
|
||||||
"""
|
|
||||||
# Split into rows by <nl>
|
|
||||||
rows_raw = re.split(r'<nl>', content)
|
|
||||||
html_rows = []
|
|
||||||
is_first_row = True
|
|
||||||
|
|
||||||
for row_content in rows_raw:
|
|
||||||
row_content = row_content.strip()
|
|
||||||
if not row_content:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Extract cells: split by <fcel>, <lcel>, or <ecel>
|
|
||||||
# Each cell is the text between these markers
|
|
||||||
cells = []
|
|
||||||
|
|
||||||
# Pattern to match cell markers and capture content
|
|
||||||
# Content is everything between markers
|
|
||||||
parts = re.split(r'<fcel>|<lcel>|<ecel>', row_content)
|
|
||||||
for part in parts:
|
|
||||||
part = part.strip()
|
|
||||||
if part:
|
|
||||||
cells.append(part)
|
|
||||||
|
|
||||||
if not cells:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# First row is header
|
|
||||||
if is_first_row:
|
|
||||||
row_html = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
|
|
||||||
html_rows.append(f'<thead>{row_html}</thead>')
|
|
||||||
is_first_row = False
|
|
||||||
else:
|
|
||||||
row_html = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
|
|
||||||
html_rows.append(row_html)
|
|
||||||
|
|
||||||
if html_rows:
|
|
||||||
header = html_rows[0] if '<thead>' in html_rows[0] else ''
|
|
||||||
body_rows = [r for r in html_rows if '<thead>' not in r]
|
|
||||||
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
|
|
||||||
return f'<table>{header}{body}</table>'
|
|
||||||
|
|
||||||
return f'<pre>{content}</pre>'
|
|
||||||
|
|
||||||
|
|
||||||
def result_to_html(result: dict) -> str:
|
|
||||||
"""Convert result to semantic HTML for optimal LLM processing.
|
|
||||||
|
|
||||||
Uses semantic HTML5 tags with position metadata as data-* attributes.
|
|
||||||
Markdown tables are converted to proper HTML <table> tags for
|
|
||||||
unambiguous parsing by downstream LLMs.
|
|
||||||
"""
|
|
||||||
parts = []
|
|
||||||
image_height = result.get("image_size", [0, 1000])[1]
|
|
||||||
|
|
||||||
parts.append('<!DOCTYPE html><html><body>')
|
|
||||||
|
|
||||||
for block in result.get("blocks", []):
|
|
||||||
block_type = block.get("type", "text").lower()
|
|
||||||
content = block.get("content", "").strip()
|
|
||||||
bbox = block.get("bbox", [])
|
|
||||||
|
|
||||||
if not content:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Position metadata
|
|
||||||
y_pos = bbox[1] / image_height if bbox and len(bbox) > 1 else 0
|
|
||||||
data_attrs = f'data-type="{block_type}" data-y="{y_pos:.2f}"'
|
|
||||||
|
|
||||||
# Format based on type
|
|
||||||
if "table" in block_type:
|
|
||||||
table_html = parse_markdown_table(content)
|
|
||||||
parts.append(f'<section {data_attrs} class="table-region">{table_html}</section>')
|
|
||||||
elif "title" in block_type:
|
|
||||||
parts.append(f'<h1 {data_attrs}>{content}</h1>')
|
|
||||||
elif "formula" in block_type or "math" in block_type:
|
|
||||||
parts.append(f'<div {data_attrs} class="formula"><code>{content}</code></div>')
|
|
||||||
elif "figure" in block_type or "chart" in block_type:
|
|
||||||
parts.append(f'<figure {data_attrs}><figcaption>{content}</figcaption></figure>')
|
|
||||||
elif y_pos < 0.15:
|
|
||||||
parts.append(f'<header {data_attrs}><strong>{content}</strong></header>')
|
|
||||||
elif y_pos > 0.85:
|
|
||||||
parts.append(f'<footer {data_attrs}>{content}</footer>')
|
|
||||||
else:
|
|
||||||
parts.append(f'<p {data_attrs}>{content}</p>')
|
|
||||||
|
|
||||||
parts.append('</body></html>')
|
|
||||||
return '\n'.join(parts)
|
|
||||||
|
|
||||||
|
|
||||||
# Request/Response models
|
|
||||||
class ParseRequest(BaseModel):
|
|
||||||
image: str # base64 encoded image
|
|
||||||
output_format: Optional[str] = "json"
|
|
||||||
|
|
||||||
|
|
||||||
class ParseResponse(BaseModel):
|
|
||||||
success: bool
|
|
||||||
format: str
|
|
||||||
result: Union[dict, str]
|
|
||||||
processing_time: float
|
|
||||||
error: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
def decode_image(image_source: str) -> Image.Image:
|
|
||||||
"""Decode image from base64 or data URL"""
|
|
||||||
if image_source.startswith("data:"):
|
|
||||||
header, data = image_source.split(",", 1)
|
|
||||||
image_data = base64.b64decode(data)
|
|
||||||
else:
|
|
||||||
image_data = base64.b64decode(image_source)
|
|
||||||
|
|
||||||
return Image.open(io.BytesIO(image_data)).convert("RGB")
|
|
||||||
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup_event():
|
|
||||||
"""Pre-load models on startup"""
|
|
||||||
logger.info("Starting PaddleOCR-VL Full Pipeline Server...")
|
|
||||||
try:
|
|
||||||
load_vl_model()
|
|
||||||
load_layout_model()
|
|
||||||
logger.info("Models loaded successfully")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Failed to pre-load models: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health")
|
|
||||||
async def health_check():
|
|
||||||
"""Health check endpoint"""
|
|
||||||
return {
|
|
||||||
"status": "healthy" if vl_model is not None else "loading",
|
|
||||||
"service": "PaddleOCR-VL Full Pipeline (Transformers)",
|
|
||||||
"device": DEVICE,
|
|
||||||
"vl_model_loaded": vl_model is not None,
|
|
||||||
"layout_model_loaded": layout_model is not None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/formats")
|
|
||||||
async def supported_formats():
|
|
||||||
"""List supported output formats"""
|
|
||||||
return {
|
|
||||||
"output_formats": ["json", "markdown", "html"],
|
|
||||||
"image_formats": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
|
|
||||||
"capabilities": [
|
|
||||||
"Layout detection (PP-DocLayoutV2)",
|
|
||||||
"Text recognition (OCR)",
|
|
||||||
"Table recognition",
|
|
||||||
"Formula recognition (LaTeX)",
|
|
||||||
"Chart recognition",
|
|
||||||
"Multi-language support (109 languages)"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@app.post("/parse", response_model=ParseResponse)
|
|
||||||
async def parse_document_endpoint(request: ParseRequest):
|
|
||||||
"""Parse a document image and return structured output"""
|
|
||||||
try:
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
image = decode_image(request.image)
|
|
||||||
result = process_document(image)
|
|
||||||
|
|
||||||
if request.output_format == "markdown":
|
|
||||||
markdown = result_to_markdown(result)
|
|
||||||
output = {"markdown": markdown}
|
|
||||||
elif request.output_format == "html":
|
|
||||||
html = result_to_html(result)
|
|
||||||
output = {"html": html}
|
|
||||||
else:
|
|
||||||
output = result
|
|
||||||
|
|
||||||
elapsed = time.time() - start_time
|
|
||||||
logger.info(f"Processing complete in {elapsed:.2f}s")
|
|
||||||
|
|
||||||
return ParseResponse(
|
|
||||||
success=True,
|
|
||||||
format=request.output_format,
|
|
||||||
result=output,
|
|
||||||
processing_time=elapsed
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error processing document: {e}", exc_info=True)
|
|
||||||
return ParseResponse(
|
|
||||||
success=False,
|
|
||||||
format=request.output_format,
|
|
||||||
result={},
|
|
||||||
processing_time=0,
|
|
||||||
error=str(e)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.post("/v1/chat/completions")
|
|
||||||
async def chat_completions(request: dict):
|
|
||||||
"""OpenAI-compatible chat completions endpoint"""
|
|
||||||
try:
|
|
||||||
messages = request.get("messages", [])
|
|
||||||
output_format = request.get("output_format", "json")
|
|
||||||
|
|
||||||
# Find user message with image
|
|
||||||
image = None
|
|
||||||
for msg in reversed(messages):
|
|
||||||
if msg.get("role") == "user":
|
|
||||||
content = msg.get("content", [])
|
|
||||||
if isinstance(content, list):
|
|
||||||
for item in content:
|
|
||||||
if item.get("type") == "image_url":
|
|
||||||
url = item.get("image_url", {}).get("url", "")
|
|
||||||
image = decode_image(url)
|
|
||||||
break
|
|
||||||
break
|
|
||||||
|
|
||||||
if image is None:
|
|
||||||
raise HTTPException(status_code=400, detail="No image provided")
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
result = process_document(image)
|
|
||||||
|
|
||||||
if output_format == "markdown":
|
|
||||||
content = result_to_markdown(result)
|
|
||||||
elif output_format == "html":
|
|
||||||
content = result_to_html(result)
|
|
||||||
else:
|
|
||||||
content = json.dumps(result, ensure_ascii=False, indent=2)
|
|
||||||
|
|
||||||
elapsed = time.time() - start_time
|
|
||||||
|
|
||||||
return {
|
|
||||||
"id": f"chatcmpl-{int(time.time()*1000)}",
|
|
||||||
"object": "chat.completion",
|
|
||||||
"created": int(time.time()),
|
|
||||||
"model": "paddleocr-vl-full",
|
|
||||||
"choices": [{
|
|
||||||
"index": 0,
|
|
||||||
"message": {"role": "assistant", "content": content},
|
|
||||||
"finish_reason": "stop"
|
|
||||||
}],
|
|
||||||
"usage": {
|
|
||||||
"prompt_tokens": 100,
|
|
||||||
"completion_tokens": len(content) // 4,
|
|
||||||
"total_tokens": 100 + len(content) // 4
|
|
||||||
},
|
|
||||||
"processing_time": elapsed
|
|
||||||
}
|
|
||||||
|
|
||||||
except HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error in chat completions: {e}", exc_info=True)
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import uvicorn
|
|
||||||
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)
|
|
||||||
@@ -1,465 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
PaddleOCR-VL FastAPI Server (CPU variant)
|
|
||||||
Provides OpenAI-compatible REST API for document parsing using PaddleOCR-VL
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import io
|
|
||||||
import base64
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
from typing import Optional, List, Any, Dict, Union
|
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException
|
|
||||||
from fastapi.responses import JSONResponse
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import torch
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
# Configure logging
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.INFO,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Environment configuration
|
|
||||||
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
|
|
||||||
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
|
|
||||||
MODEL_NAME = os.environ.get('MODEL_NAME', 'PaddlePaddle/PaddleOCR-VL')
|
|
||||||
|
|
||||||
# Device configuration
|
|
||||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
logger.info(f"Using device: {DEVICE}")
|
|
||||||
|
|
||||||
# Task prompts for PaddleOCR-VL
|
|
||||||
TASK_PROMPTS = {
|
|
||||||
"ocr": "OCR:",
|
|
||||||
"table": "Table Recognition:",
|
|
||||||
"formula": "Formula Recognition:",
|
|
||||||
"chart": "Chart Recognition:",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialize FastAPI app
|
|
||||||
app = FastAPI(
|
|
||||||
title="PaddleOCR-VL Server",
|
|
||||||
description="OpenAI-compatible REST API for document parsing using PaddleOCR-VL",
|
|
||||||
version="1.0.0"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Global model instances
|
|
||||||
model = None
|
|
||||||
processor = None
|
|
||||||
|
|
||||||
|
|
||||||
# Request/Response models (OpenAI-compatible)
|
|
||||||
class ImageUrl(BaseModel):
|
|
||||||
url: str
|
|
||||||
|
|
||||||
|
|
||||||
class ContentItem(BaseModel):
|
|
||||||
type: str
|
|
||||||
text: Optional[str] = None
|
|
||||||
image_url: Optional[ImageUrl] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Message(BaseModel):
|
|
||||||
role: str
|
|
||||||
content: Union[str, List[ContentItem]]
|
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionRequest(BaseModel):
|
|
||||||
model: str = "paddleocr-vl"
|
|
||||||
messages: List[Message]
|
|
||||||
temperature: Optional[float] = 0.0
|
|
||||||
max_tokens: Optional[int] = 4096
|
|
||||||
|
|
||||||
|
|
||||||
class Choice(BaseModel):
|
|
||||||
index: int
|
|
||||||
message: Message
|
|
||||||
finish_reason: str
|
|
||||||
|
|
||||||
|
|
||||||
class Usage(BaseModel):
|
|
||||||
prompt_tokens: int
|
|
||||||
completion_tokens: int
|
|
||||||
total_tokens: int
|
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionResponse(BaseModel):
|
|
||||||
id: str
|
|
||||||
object: str = "chat.completion"
|
|
||||||
created: int
|
|
||||||
model: str
|
|
||||||
choices: List[Choice]
|
|
||||||
usage: Usage
|
|
||||||
|
|
||||||
|
|
||||||
class HealthResponse(BaseModel):
|
|
||||||
status: str
|
|
||||||
model: str
|
|
||||||
device: str
|
|
||||||
|
|
||||||
|
|
||||||
def load_model():
|
|
||||||
"""Load the PaddleOCR-VL model and processor"""
|
|
||||||
global model, processor
|
|
||||||
|
|
||||||
if model is not None:
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
|
|
||||||
|
|
||||||
from transformers import AutoModelForCausalLM, AutoProcessor
|
|
||||||
|
|
||||||
# Load processor
|
|
||||||
processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
|
||||||
|
|
||||||
# Load model with appropriate settings for CPU/GPU
|
|
||||||
if DEVICE == "cuda":
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
MODEL_NAME,
|
|
||||||
trust_remote_code=True,
|
|
||||||
torch_dtype=torch.bfloat16,
|
|
||||||
).to(DEVICE).eval()
|
|
||||||
else:
|
|
||||||
# CPU mode - use float32 for compatibility
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
MODEL_NAME,
|
|
||||||
trust_remote_code=True,
|
|
||||||
torch_dtype=torch.float32,
|
|
||||||
low_cpu_mem_usage=True,
|
|
||||||
).eval()
|
|
||||||
|
|
||||||
logger.info("PaddleOCR-VL model loaded successfully")
|
|
||||||
|
|
||||||
|
|
||||||
def optimize_image_resolution(image: Image.Image, max_size: int = 2048, min_size: int = 1080) -> Image.Image:
|
|
||||||
"""
|
|
||||||
Optimize image resolution for PaddleOCR-VL.
|
|
||||||
|
|
||||||
Best results are achieved with images in the 1080p-2K range.
|
|
||||||
- Images larger than max_size are scaled down
|
|
||||||
- Very small images are scaled up to min_size
|
|
||||||
"""
|
|
||||||
width, height = image.size
|
|
||||||
max_dim = max(width, height)
|
|
||||||
min_dim = min(width, height)
|
|
||||||
|
|
||||||
# Scale down if too large (4K+ images often miss text)
|
|
||||||
if max_dim > max_size:
|
|
||||||
scale = max_size / max_dim
|
|
||||||
new_width = int(width * scale)
|
|
||||||
new_height = int(height * scale)
|
|
||||||
logger.info(f"Scaling down image from {width}x{height} to {new_width}x{new_height}")
|
|
||||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
||||||
# Scale up if too small
|
|
||||||
elif max_dim < min_size and min_dim < min_size:
|
|
||||||
scale = min_size / max_dim
|
|
||||||
new_width = int(width * scale)
|
|
||||||
new_height = int(height * scale)
|
|
||||||
logger.info(f"Scaling up image from {width}x{height} to {new_width}x{new_height}")
|
|
||||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
||||||
else:
|
|
||||||
logger.info(f"Image size {width}x{height} is optimal, no scaling needed")
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def decode_image(image_source: str, optimize: bool = True) -> Image.Image:
|
|
||||||
"""
|
|
||||||
Decode image from various sources.
|
|
||||||
|
|
||||||
Supported formats:
|
|
||||||
- Base64 data URL: data:image/png;base64,... or data:image/jpeg;base64,...
|
|
||||||
- HTTP/HTTPS URL: https://example.com/image.png
|
|
||||||
- Raw base64 string
|
|
||||||
- Local file path
|
|
||||||
|
|
||||||
Supported image types: PNG, JPEG, WebP, BMP, GIF, TIFF
|
|
||||||
"""
|
|
||||||
image = None
|
|
||||||
|
|
||||||
if image_source.startswith("data:"):
|
|
||||||
# Base64 encoded image with MIME type header
|
|
||||||
# Supports: data:image/png;base64,... data:image/jpeg;base64,... etc.
|
|
||||||
header, data = image_source.split(",", 1)
|
|
||||||
image_data = base64.b64decode(data)
|
|
||||||
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
|
||||||
logger.debug(f"Decoded base64 image with header: {header}")
|
|
||||||
elif image_source.startswith("http://") or image_source.startswith("https://"):
|
|
||||||
# URL - fetch image
|
|
||||||
import httpx
|
|
||||||
response = httpx.get(image_source, timeout=30.0)
|
|
||||||
response.raise_for_status()
|
|
||||||
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
|
||||||
logger.debug(f"Fetched image from URL: {image_source[:50]}...")
|
|
||||||
else:
|
|
||||||
# Assume it's a file path or raw base64
|
|
||||||
try:
|
|
||||||
image_data = base64.b64decode(image_source)
|
|
||||||
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
|
||||||
logger.debug("Decoded raw base64 image")
|
|
||||||
except:
|
|
||||||
# Try as file path
|
|
||||||
image = Image.open(image_source).convert("RGB")
|
|
||||||
logger.debug(f"Loaded image from file: {image_source}")
|
|
||||||
|
|
||||||
# Optimize resolution for best OCR results
|
|
||||||
if optimize:
|
|
||||||
image = optimize_image_resolution(image)
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def extract_image_and_text(content: Union[str, List[ContentItem]]) -> tuple:
|
|
||||||
"""Extract image and text prompt from message content"""
|
|
||||||
if isinstance(content, str):
|
|
||||||
return None, content
|
|
||||||
|
|
||||||
image = None
|
|
||||||
text = ""
|
|
||||||
|
|
||||||
for item in content:
|
|
||||||
if item.type == "image_url" and item.image_url:
|
|
||||||
image = decode_image(item.image_url.url)
|
|
||||||
elif item.type == "text" and item.text:
|
|
||||||
text = item.text
|
|
||||||
|
|
||||||
return image, text
|
|
||||||
|
|
||||||
|
|
||||||
def generate_response(image: Image.Image, prompt: str, max_tokens: int = 4096) -> str:
|
|
||||||
"""Generate response using PaddleOCR-VL"""
|
|
||||||
load_model()
|
|
||||||
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": [
|
|
||||||
{"type": "image", "image": image},
|
|
||||||
{"type": "text", "text": prompt},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
inputs = processor.apply_chat_template(
|
|
||||||
messages,
|
|
||||||
tokenize=True,
|
|
||||||
add_generation_prompt=True,
|
|
||||||
return_dict=True,
|
|
||||||
return_tensors="pt"
|
|
||||||
)
|
|
||||||
|
|
||||||
if DEVICE == "cuda":
|
|
||||||
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
|
||||||
|
|
||||||
with torch.inference_mode():
|
|
||||||
outputs = model.generate(
|
|
||||||
**inputs,
|
|
||||||
max_new_tokens=max_tokens,
|
|
||||||
do_sample=False,
|
|
||||||
use_cache=True
|
|
||||||
)
|
|
||||||
|
|
||||||
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
|
||||||
|
|
||||||
# Extract the assistant's response (after the prompt)
|
|
||||||
if "assistant" in response.lower():
|
|
||||||
parts = response.split("assistant")
|
|
||||||
if len(parts) > 1:
|
|
||||||
response = parts[-1].strip()
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup_event():
|
|
||||||
"""Pre-load the model on startup"""
|
|
||||||
logger.info("Pre-loading PaddleOCR-VL model...")
|
|
||||||
try:
|
|
||||||
load_model()
|
|
||||||
logger.info("Model pre-loaded successfully")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Failed to pre-load model: {e}")
|
|
||||||
# Don't fail startup - model will be loaded on first request
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health", response_model=HealthResponse)
|
|
||||||
async def health_check():
|
|
||||||
"""Health check endpoint"""
|
|
||||||
return HealthResponse(
|
|
||||||
status="healthy" if model is not None else "loading",
|
|
||||||
model=MODEL_NAME,
|
|
||||||
device=DEVICE
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/formats")
|
|
||||||
async def supported_formats():
|
|
||||||
"""List supported image formats and input methods"""
|
|
||||||
return {
|
|
||||||
"image_formats": {
|
|
||||||
"supported": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
|
|
||||||
"recommended": ["PNG", "JPEG"],
|
|
||||||
"mime_types": [
|
|
||||||
"image/png",
|
|
||||||
"image/jpeg",
|
|
||||||
"image/webp",
|
|
||||||
"image/bmp",
|
|
||||||
"image/gif",
|
|
||||||
"image/tiff"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"input_methods": {
|
|
||||||
"base64_data_url": {
|
|
||||||
"description": "Base64 encoded image with MIME type header",
|
|
||||||
"example": "data:image/png;base64,iVBORw0KGgo..."
|
|
||||||
},
|
|
||||||
"http_url": {
|
|
||||||
"description": "Direct HTTP/HTTPS URL to image",
|
|
||||||
"example": "https://example.com/image.png"
|
|
||||||
},
|
|
||||||
"raw_base64": {
|
|
||||||
"description": "Raw base64 string without header",
|
|
||||||
"example": "iVBORw0KGgo..."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"resolution": {
|
|
||||||
"optimal_range": "1080p to 2K (1080-2048 pixels on longest side)",
|
|
||||||
"auto_scaling": True,
|
|
||||||
"note": "Images are automatically scaled to optimal range. 4K+ images are scaled down for better accuracy."
|
|
||||||
},
|
|
||||||
"task_prompts": TASK_PROMPTS
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/v1/models")
|
|
||||||
async def list_models():
|
|
||||||
"""List available models (OpenAI-compatible)"""
|
|
||||||
return {
|
|
||||||
"object": "list",
|
|
||||||
"data": [
|
|
||||||
{
|
|
||||||
"id": "paddleocr-vl",
|
|
||||||
"object": "model",
|
|
||||||
"created": int(time.time()),
|
|
||||||
"owned_by": "paddlepaddle"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
|
|
||||||
async def chat_completions(request: ChatCompletionRequest):
|
|
||||||
"""
|
|
||||||
OpenAI-compatible chat completions endpoint for PaddleOCR-VL
|
|
||||||
|
|
||||||
Supports tasks:
|
|
||||||
- "OCR:" - Text recognition
|
|
||||||
- "Table Recognition:" - Table extraction
|
|
||||||
- "Formula Recognition:" - Formula extraction
|
|
||||||
- "Chart Recognition:" - Chart extraction
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Get the last user message
|
|
||||||
user_message = None
|
|
||||||
for msg in reversed(request.messages):
|
|
||||||
if msg.role == "user":
|
|
||||||
user_message = msg
|
|
||||||
break
|
|
||||||
|
|
||||||
if not user_message:
|
|
||||||
raise HTTPException(status_code=400, detail="No user message found")
|
|
||||||
|
|
||||||
# Extract image and prompt
|
|
||||||
image, prompt = extract_image_and_text(user_message.content)
|
|
||||||
|
|
||||||
if image is None:
|
|
||||||
raise HTTPException(status_code=400, detail="No image provided in message")
|
|
||||||
|
|
||||||
# Default to OCR if no specific prompt
|
|
||||||
if not prompt or prompt.strip() == "":
|
|
||||||
prompt = "OCR:"
|
|
||||||
|
|
||||||
logger.info(f"Processing request with prompt: {prompt[:50]}...")
|
|
||||||
|
|
||||||
# Generate response
|
|
||||||
start_time = time.time()
|
|
||||||
response_text = generate_response(image, prompt, request.max_tokens or 4096)
|
|
||||||
elapsed = time.time() - start_time
|
|
||||||
|
|
||||||
logger.info(f"Generated response in {elapsed:.2f}s ({len(response_text)} chars)")
|
|
||||||
|
|
||||||
# Build OpenAI-compatible response
|
|
||||||
return ChatCompletionResponse(
|
|
||||||
id=f"chatcmpl-{int(time.time()*1000)}",
|
|
||||||
created=int(time.time()),
|
|
||||||
model=request.model,
|
|
||||||
choices=[
|
|
||||||
Choice(
|
|
||||||
index=0,
|
|
||||||
message=Message(role="assistant", content=response_text),
|
|
||||||
finish_reason="stop"
|
|
||||||
)
|
|
||||||
],
|
|
||||||
usage=Usage(
|
|
||||||
prompt_tokens=100, # Approximate
|
|
||||||
completion_tokens=len(response_text) // 4,
|
|
||||||
total_tokens=100 + len(response_text) // 4
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
except HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error processing request: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
# Legacy endpoint for compatibility with old PaddleOCR API
|
|
||||||
class LegacyOCRRequest(BaseModel):
|
|
||||||
image: str
|
|
||||||
task: Optional[str] = "ocr"
|
|
||||||
|
|
||||||
|
|
||||||
class LegacyOCRResponse(BaseModel):
|
|
||||||
success: bool
|
|
||||||
result: str
|
|
||||||
task: str
|
|
||||||
error: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
@app.post("/ocr", response_model=LegacyOCRResponse)
|
|
||||||
async def legacy_ocr(request: LegacyOCRRequest):
|
|
||||||
"""
|
|
||||||
Legacy OCR endpoint for backwards compatibility
|
|
||||||
|
|
||||||
Tasks: ocr, table, formula, chart
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
image = decode_image(request.image)
|
|
||||||
prompt = TASK_PROMPTS.get(request.task, TASK_PROMPTS["ocr"])
|
|
||||||
|
|
||||||
result = generate_response(image, prompt)
|
|
||||||
|
|
||||||
return LegacyOCRResponse(
|
|
||||||
success=True,
|
|
||||||
result=result,
|
|
||||||
task=request.task
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Legacy OCR error: {e}")
|
|
||||||
return LegacyOCRResponse(
|
|
||||||
success=False,
|
|
||||||
result="",
|
|
||||||
task=request.task,
|
|
||||||
error=str(e)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import uvicorn
|
|
||||||
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@host.today/ht-docker-ai",
|
"name": "@host.today/ht-docker-ai",
|
||||||
"version": "1.10.0",
|
"version": "1.13.1",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||||
|
|||||||
@@ -1,549 +0,0 @@
|
|||||||
/**
|
|
||||||
* Bank statement extraction test using MiniCPM-V (visual) + PaddleOCR-VL (table recognition)
|
|
||||||
*
|
|
||||||
* This is the combined/dual-VLM approach that uses both models for consensus:
|
|
||||||
* - MiniCPM-V for visual extraction
|
|
||||||
* - PaddleOCR-VL for table recognition
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensurePaddleOcrVl, ensureMiniCpm } from './helpers/docker.js';
|
|
||||||
|
|
||||||
// Service URLs
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
|
||||||
|
|
||||||
// Models
|
|
||||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
|
||||||
const PADDLEOCR_VL_MODEL = 'paddleocr-vl';
|
|
||||||
|
|
||||||
// Prompt for MiniCPM-V visual extraction
|
|
||||||
const MINICPM_EXTRACT_PROMPT = `/nothink
|
|
||||||
You are a bank statement parser. Extract EVERY transaction from the table.
|
|
||||||
|
|
||||||
Read the Amount column carefully:
|
|
||||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
|
||||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
|
||||||
- European format: comma = decimal point
|
|
||||||
|
|
||||||
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
|
||||||
|
|
||||||
Do not skip any rows. Return ONLY the JSON array, no explanation.`;
|
|
||||||
|
|
||||||
// Prompt for PaddleOCR-VL table extraction
|
|
||||||
const PADDLEOCR_VL_TABLE_PROMPT = `Table Recognition:`;
|
|
||||||
|
|
||||||
// Post-processing prompt to convert PaddleOCR-VL output to JSON
|
|
||||||
const PADDLEOCR_VL_CONVERT_PROMPT = `/nothink
|
|
||||||
Convert the following bank statement table data to JSON.
|
|
||||||
|
|
||||||
Read the Amount values carefully:
|
|
||||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
|
||||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
|
||||||
- European format: comma = decimal point
|
|
||||||
|
|
||||||
For each transaction output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
|
||||||
|
|
||||||
Return ONLY the JSON array, no explanation.
|
|
||||||
|
|
||||||
Table data:
|
|
||||||
---
|
|
||||||
{TABLE_DATA}
|
|
||||||
---`;
|
|
||||||
|
|
||||||
interface ITransaction {
|
|
||||||
date: string;
|
|
||||||
counterparty: string;
|
|
||||||
amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
execSync(
|
|
||||||
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract using MiniCPM-V via Ollama
|
|
||||||
*/
|
|
||||||
async function extractWithMiniCPM(images: string[], passLabel: string): Promise<ITransaction[]> {
|
|
||||||
const payload = {
|
|
||||||
model: MINICPM_MODEL,
|
|
||||||
prompt: MINICPM_EXTRACT_PROMPT,
|
|
||||||
images,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 16384,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
let lineBuffer = '';
|
|
||||||
|
|
||||||
console.log(`[${passLabel}] Extracting with MiniCPM-V...`);
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
lineBuffer += json.response;
|
|
||||||
|
|
||||||
if (lineBuffer.includes('\n')) {
|
|
||||||
const parts = lineBuffer.split('\n');
|
|
||||||
for (let i = 0; i < parts.length - 1; i++) {
|
|
||||||
console.log(parts[i]);
|
|
||||||
}
|
|
||||||
lineBuffer = parts[parts.length - 1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lineBuffer) {
|
|
||||||
console.log(lineBuffer);
|
|
||||||
}
|
|
||||||
console.log('');
|
|
||||||
|
|
||||||
const startIdx = fullText.indexOf('[');
|
|
||||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error('No JSON array found in response');
|
|
||||||
}
|
|
||||||
|
|
||||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract table using PaddleOCR-VL via OpenAI-compatible API
|
|
||||||
*/
|
|
||||||
async function extractTableWithPaddleOCRVL(imageBase64: string): Promise<string> {
|
|
||||||
const payload = {
|
|
||||||
model: PADDLEOCR_VL_MODEL,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'image_url',
|
|
||||||
image_url: { url: `data:image/png;base64,${imageBase64}` },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: PADDLEOCR_VL_TABLE_PROMPT,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
temperature: 0.0,
|
|
||||||
max_tokens: 8192,
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${PADDLEOCR_VL_URL}/v1/chat/completions`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
const text = await response.text();
|
|
||||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
return data.choices?.[0]?.message?.content || '';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PaddleOCR-VL table output to transactions using MiniCPM-V
|
|
||||||
*/
|
|
||||||
async function convertTableToTransactions(
|
|
||||||
tableData: string,
|
|
||||||
passLabel: string
|
|
||||||
): Promise<ITransaction[]> {
|
|
||||||
const prompt = PADDLEOCR_VL_CONVERT_PROMPT.replace('{TABLE_DATA}', tableData);
|
|
||||||
|
|
||||||
const payload = {
|
|
||||||
model: MINICPM_MODEL,
|
|
||||||
prompt,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 16384,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
console.log(`[${passLabel}] Converting table data to JSON...`);
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const startIdx = fullText.indexOf('[');
|
|
||||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error('No JSON array found in response');
|
|
||||||
}
|
|
||||||
|
|
||||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract using PaddleOCR-VL (table recognition) + conversion
|
|
||||||
*/
|
|
||||||
async function extractWithPaddleOCRVL(
|
|
||||||
images: string[],
|
|
||||||
passLabel: string
|
|
||||||
): Promise<ITransaction[]> {
|
|
||||||
console.log(`[${passLabel}] Extracting tables with PaddleOCR-VL...`);
|
|
||||||
|
|
||||||
// Extract table data from each page
|
|
||||||
const tableDataParts: string[] = [];
|
|
||||||
for (let i = 0; i < images.length; i++) {
|
|
||||||
console.log(`[${passLabel}] Processing page ${i + 1}/${images.length}...`);
|
|
||||||
const tableData = await extractTableWithPaddleOCRVL(images[i]);
|
|
||||||
if (tableData.trim()) {
|
|
||||||
tableDataParts.push(`--- Page ${i + 1} ---\n${tableData}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const combinedTableData = tableDataParts.join('\n\n');
|
|
||||||
console.log(`[${passLabel}] Got ${combinedTableData.length} chars of table data`);
|
|
||||||
|
|
||||||
// Convert to transactions
|
|
||||||
return convertTableToTransactions(combinedTableData, passLabel);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a hash of transactions for comparison
|
|
||||||
*/
|
|
||||||
function hashTransactions(transactions: ITransaction[]): string {
|
|
||||||
return transactions
|
|
||||||
.map((t) => `${t.date}|${t.amount.toFixed(2)}`)
|
|
||||||
.sort()
|
|
||||||
.join(';');
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if PaddleOCR-VL service is available
|
|
||||||
*/
|
|
||||||
async function isPaddleOCRVLAvailable(): Promise<boolean> {
|
|
||||||
try {
|
|
||||||
const response = await fetch(`${PADDLEOCR_VL_URL}/health`, {
|
|
||||||
method: 'GET',
|
|
||||||
signal: AbortSignal.timeout(5000),
|
|
||||||
});
|
|
||||||
return response.ok;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract with dual-VLM consensus
|
|
||||||
* Strategy:
|
|
||||||
* Pass 1 = MiniCPM-V visual extraction
|
|
||||||
* Pass 2 = PaddleOCR-VL table recognition (if available)
|
|
||||||
* Pass 3+ = MiniCPM-V visual (fallback)
|
|
||||||
*/
|
|
||||||
async function extractWithConsensus(
|
|
||||||
images: string[],
|
|
||||||
maxPasses: number = 5
|
|
||||||
): Promise<ITransaction[]> {
|
|
||||||
const results: Array<{ transactions: ITransaction[]; hash: string }> = [];
|
|
||||||
const hashCounts: Map<string, number> = new Map();
|
|
||||||
|
|
||||||
const addResult = (transactions: ITransaction[], passLabel: string): number => {
|
|
||||||
const hash = hashTransactions(transactions);
|
|
||||||
results.push({ transactions, hash });
|
|
||||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
|
||||||
console.log(
|
|
||||||
`[${passLabel}] Got ${transactions.length} transactions (hash: ${hash.substring(0, 20)}...)`
|
|
||||||
);
|
|
||||||
return hashCounts.get(hash)!;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if PaddleOCR-VL is available
|
|
||||||
const paddleOCRVLAvailable = await isPaddleOCRVLAvailable();
|
|
||||||
if (paddleOCRVLAvailable) {
|
|
||||||
console.log('[Setup] PaddleOCR-VL service available - using dual-VLM consensus');
|
|
||||||
} else {
|
|
||||||
console.log('[Setup] PaddleOCR-VL not available - using MiniCPM-V only');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass 1: MiniCPM-V visual extraction
|
|
||||||
try {
|
|
||||||
const pass1Result = await extractWithMiniCPM(images, 'Pass 1 MiniCPM-V');
|
|
||||||
addResult(pass1Result, 'Pass 1 MiniCPM-V');
|
|
||||||
} catch (err) {
|
|
||||||
console.log(`[Pass 1] Error: ${err}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass 2: PaddleOCR-VL table recognition (if available)
|
|
||||||
if (paddleOCRVLAvailable) {
|
|
||||||
try {
|
|
||||||
const pass2Result = await extractWithPaddleOCRVL(images, 'Pass 2 PaddleOCR-VL');
|
|
||||||
const count = addResult(pass2Result, 'Pass 2 PaddleOCR-VL');
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log('[Consensus] MiniCPM-V and PaddleOCR-VL extractions match!');
|
|
||||||
return pass2Result;
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.log(`[Pass 2 PaddleOCR-VL] Error: ${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass 3+: Continue with MiniCPM-V visual passes
|
|
||||||
const startPass = paddleOCRVLAvailable ? 3 : 2;
|
|
||||||
for (let pass = startPass; pass <= maxPasses; pass++) {
|
|
||||||
try {
|
|
||||||
const transactions = await extractWithMiniCPM(images, `Pass ${pass} MiniCPM-V`);
|
|
||||||
const count = addResult(transactions, `Pass ${pass} MiniCPM-V`);
|
|
||||||
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(`[Consensus] Reached after ${pass} passes`);
|
|
||||||
return transactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`[Pass ${pass}] No consensus yet, trying again...`);
|
|
||||||
} catch (err) {
|
|
||||||
console.log(`[Pass ${pass}] Error: ${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No consensus reached - return the most common result
|
|
||||||
let bestHash = '';
|
|
||||||
let bestCount = 0;
|
|
||||||
for (const [hash, count] of hashCounts) {
|
|
||||||
if (count > bestCount) {
|
|
||||||
bestCount = count;
|
|
||||||
bestHash = hash;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bestHash) {
|
|
||||||
throw new Error('No valid results obtained');
|
|
||||||
}
|
|
||||||
|
|
||||||
const best = results.find((r) => r.hash === bestHash)!;
|
|
||||||
console.log(`[No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
|
||||||
return best.transactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare extracted transactions against expected
|
|
||||||
*/
|
|
||||||
function compareTransactions(
|
|
||||||
extracted: ITransaction[],
|
|
||||||
expected: ITransaction[]
|
|
||||||
): { matches: number; total: number; errors: string[] } {
|
|
||||||
const errors: string[] = [];
|
|
||||||
let matches = 0;
|
|
||||||
|
|
||||||
for (let i = 0; i < expected.length; i++) {
|
|
||||||
const exp = expected[i];
|
|
||||||
const ext = extracted[i];
|
|
||||||
|
|
||||||
if (!ext) {
|
|
||||||
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const dateMatch = ext.date === exp.date;
|
|
||||||
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
|
||||||
|
|
||||||
if (dateMatch && amountMatch) {
|
|
||||||
matches++;
|
|
||||||
} else {
|
|
||||||
errors.push(
|
|
||||||
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (extracted.length > expected.length) {
|
|
||||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return { matches, total: expected.length, errors };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find all test cases (PDF + JSON pairs) in .nogit/
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit');
|
|
||||||
if (!fs.existsSync(testDir)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const pdfFiles = files.filter((f: string) => f.endsWith('.pdf'));
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of pdfFiles) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return testCases;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
|
||||||
|
|
||||||
// Ensure PaddleOCR-VL is running (auto-detects GPU/CPU)
|
|
||||||
const paddleOk = await ensurePaddleOcrVl();
|
|
||||||
expect(paddleOk).toBeTrue();
|
|
||||||
|
|
||||||
// Ensure MiniCPM is running
|
|
||||||
const minicpmOk = await ensureMiniCpm();
|
|
||||||
expect(minicpmOk).toBeTrue();
|
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
|
||||||
const data = await response.json();
|
|
||||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
|
||||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should check PaddleOCR-VL availability', async () => {
|
|
||||||
const available = await isPaddleOCRVLAvailable();
|
|
||||||
console.log(`PaddleOCR-VL available: ${available}`);
|
|
||||||
expect(available).toBeTrue();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract transactions from ${testCase.name}`, async () => {
|
|
||||||
// Load expected transactions
|
|
||||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.length} transactions`);
|
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
console.log('Converting PDF to images...');
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(`Converted: ${images.length} pages\n`);
|
|
||||||
|
|
||||||
// Extract with dual-VLM consensus
|
|
||||||
const extracted = await extractWithConsensus(images);
|
|
||||||
console.log(`\nFinal: ${extracted.length} transactions`);
|
|
||||||
|
|
||||||
// Compare results
|
|
||||||
const result = compareTransactions(extracted, expected);
|
|
||||||
console.log(`Accuracy: ${result.matches}/${result.total}`);
|
|
||||||
|
|
||||||
if (result.errors.length > 0) {
|
|
||||||
console.log('Errors:');
|
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert high accuracy
|
|
||||||
const accuracy = result.matches / result.total;
|
|
||||||
expect(accuracy).toBeGreaterThan(0.95);
|
|
||||||
expect(extracted.length).toEqual(expected.length);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
/**
|
/**
|
||||||
* Bank statement extraction test using MiniCPM-V only (visual extraction)
|
* Bank statement extraction using MiniCPM-V (visual extraction)
|
||||||
*
|
*
|
||||||
* This tests MiniCPM-V's ability to extract bank transactions directly from images
|
* JSON per-page approach:
|
||||||
* without any OCR augmentation.
|
* 1. Ask for structured JSON of all transactions per page
|
||||||
|
* 2. Consensus: extract twice, compare, retry if mismatch
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -11,24 +12,8 @@ import { execSync } from 'child_process';
|
|||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
|
||||||
// Service URL
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
// Model
|
|
||||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
|
||||||
|
|
||||||
// Prompt for MiniCPM-V visual extraction
|
|
||||||
const MINICPM_EXTRACT_PROMPT = `/nothink
|
|
||||||
You are a bank statement parser. Extract EVERY transaction from the table.
|
|
||||||
|
|
||||||
Read the Amount column carefully:
|
|
||||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
|
||||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
|
||||||
- European format: comma = decimal point
|
|
||||||
|
|
||||||
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
|
||||||
|
|
||||||
Do not skip any rows. Return ONLY the JSON array, no explanation.`;
|
|
||||||
|
|
||||||
interface ITransaction {
|
interface ITransaction {
|
||||||
date: string;
|
date: string;
|
||||||
@@ -36,6 +21,22 @@ interface ITransaction {
|
|||||||
amount: number;
|
amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const JSON_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
||||||
|
|
||||||
|
IMPORTANT RULES:
|
||||||
|
1. Each transaction has: date, description/counterparty, and an amount
|
||||||
|
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
||||||
|
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
||||||
|
4. Date format: YYYY-MM-DD
|
||||||
|
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
||||||
|
6. Only include actual transactions with a specific date and amount
|
||||||
|
|
||||||
|
Return ONLY this JSON format, no explanation:
|
||||||
|
[
|
||||||
|
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
||||||
|
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
||||||
|
]`;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
*/
|
*/
|
||||||
@@ -65,149 +66,330 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract using MiniCPM-V via Ollama
|
* Query for JSON extraction
|
||||||
*/
|
*/
|
||||||
async function extractWithMiniCPM(images: string[], passLabel: string): Promise<ITransaction[]> {
|
async function queryJson(image: string, queryId: string): Promise<string> {
|
||||||
const payload = {
|
console.log(` [${queryId}] Sending request to ${MODEL}...`);
|
||||||
model: MINICPM_MODEL,
|
const startTime = Date.now();
|
||||||
prompt: MINICPM_EXTRACT_PROMPT,
|
|
||||||
images,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 16384,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
body: JSON.stringify(payload),
|
body: JSON.stringify({
|
||||||
|
model: MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: JSON_PROMPT,
|
||||||
|
images: [image],
|
||||||
|
}],
|
||||||
|
stream: false,
|
||||||
|
options: {
|
||||||
|
num_predict: 4000,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
}),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
|
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
const data = await response.json();
|
||||||
if (!reader) {
|
const content = (data.message?.content || '').trim();
|
||||||
throw new Error('No response body');
|
console.log(` [${queryId}] Response received (${elapsed}s, ${content.length} chars)`);
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitize JSON string - fix common issues from vision model output
|
||||||
|
*/
|
||||||
|
function sanitizeJson(jsonStr: string): string {
|
||||||
|
let s = jsonStr;
|
||||||
|
|
||||||
|
// Fix +number (e.g., +93.80 -> 93.80) - JSON doesn't allow + prefix
|
||||||
|
// Handle various whitespace patterns
|
||||||
|
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
|
||||||
|
s = s.replace(/:\s*\+(\d)/g, ': $1');
|
||||||
|
|
||||||
|
// Fix European number format with thousands separator (e.g., 1.000.00 -> 1000.00)
|
||||||
|
// Pattern: "amount": X.XXX.XX where X.XXX is thousands and .XX is decimal
|
||||||
|
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
|
||||||
|
// Also handle larger numbers like 10.000.00
|
||||||
|
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3$4.$5');
|
||||||
|
|
||||||
|
// Fix trailing commas before ] or }
|
||||||
|
s = s.replace(/,\s*([}\]])/g, '$1');
|
||||||
|
|
||||||
|
// Fix unescaped newlines inside strings (replace with space)
|
||||||
|
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
|
||||||
|
|
||||||
|
// Fix unescaped tabs inside strings
|
||||||
|
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
|
||||||
|
|
||||||
|
// Fix unescaped backslashes (but not already escaped ones)
|
||||||
|
s = s.replace(/\\(?!["\\/bfnrtu])/g, '\\\\');
|
||||||
|
|
||||||
|
// Fix common issues with counterparty names containing special chars
|
||||||
|
s = s.replace(/"counterparty":\s*"([^"]*)'([^"]*)"/g, '"counterparty": "$1$2"');
|
||||||
|
|
||||||
|
// Remove control characters except newlines (which we handle above)
|
||||||
|
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse JSON response into transactions
|
||||||
|
*/
|
||||||
|
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
|
||||||
|
console.log(` [${queryId}] Parsing response...`);
|
||||||
|
|
||||||
|
// Try to find JSON in markdown code block
|
||||||
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
|
|
||||||
|
if (codeBlockMatch) {
|
||||||
|
console.log(` [${queryId}] Found JSON in code block`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
// Sanitize JSON (fix +number issue)
|
||||||
let fullText = '';
|
jsonStr = sanitizeJson(jsonStr);
|
||||||
let lineBuffer = '';
|
|
||||||
|
|
||||||
console.log(`[${passLabel}] Extracting with MiniCPM-V...`);
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) {
|
||||||
|
const txs = parsed.map(tx => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
console.log(` [${queryId}] Parsed ${txs.length} transactions (direct)`);
|
||||||
|
return txs;
|
||||||
|
}
|
||||||
|
console.log(` [${queryId}] Parsed JSON is not an array`);
|
||||||
|
} catch (e) {
|
||||||
|
const errMsg = (e as Error).message;
|
||||||
|
console.log(` [${queryId}] Direct parse failed: ${errMsg}`);
|
||||||
|
|
||||||
while (true) {
|
// Log problematic section with context
|
||||||
const { done, value } = await reader.read();
|
const posMatch = errMsg.match(/position (\d+)/);
|
||||||
if (done) break;
|
if (posMatch) {
|
||||||
|
const pos = parseInt(posMatch[1]);
|
||||||
|
const start = Math.max(0, pos - 40);
|
||||||
|
const end = Math.min(jsonStr.length, pos + 40);
|
||||||
|
const context = jsonStr.substring(start, end);
|
||||||
|
const marker = ' '.repeat(pos - start) + '^';
|
||||||
|
console.log(` [${queryId}] Context around error position ${pos}:`);
|
||||||
|
console.log(` [${queryId}] ...${context}...`);
|
||||||
|
console.log(` [${queryId}] ${marker}`);
|
||||||
|
}
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
// Try to find JSON array pattern
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (arrayMatch) {
|
||||||
for (const line of lines) {
|
console.log(` [${queryId}] Found array pattern, trying to parse...`);
|
||||||
|
const sanitizedArray = sanitizeJson(arrayMatch[0]);
|
||||||
try {
|
try {
|
||||||
const json = JSON.parse(line);
|
const parsed = JSON.parse(sanitizedArray);
|
||||||
if (json.response) {
|
if (Array.isArray(parsed)) {
|
||||||
fullText += json.response;
|
const txs = parsed.map(tx => ({
|
||||||
lineBuffer += json.response;
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
if (lineBuffer.includes('\n')) {
|
amount: parseAmount(tx.amount),
|
||||||
const parts = lineBuffer.split('\n');
|
}));
|
||||||
for (let i = 0; i < parts.length - 1; i++) {
|
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
|
||||||
console.log(parts[i]);
|
return txs;
|
||||||
}
|
}
|
||||||
lineBuffer = parts[parts.length - 1];
|
} catch (e2) {
|
||||||
}
|
const errMsg2 = (e2 as Error).message;
|
||||||
|
console.log(` [${queryId}] Array parse failed: ${errMsg2}`);
|
||||||
|
const posMatch2 = errMsg2.match(/position (\d+)/);
|
||||||
|
if (posMatch2) {
|
||||||
|
const pos2 = parseInt(posMatch2[1]);
|
||||||
|
console.log(` [${queryId}] Context around error: ...${sanitizedArray.substring(Math.max(0, pos2 - 30), pos2 + 30)}...`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to extract individual objects from the malformed array
|
||||||
|
console.log(` [${queryId}] Attempting object-by-object extraction...`);
|
||||||
|
const extracted = extractTransactionsFromMalformedJson(sanitizedArray, queryId);
|
||||||
|
if (extracted.length > 0) {
|
||||||
|
console.log(` [${queryId}] Recovered ${extracted.length} transactions via object extraction`);
|
||||||
|
return extracted;
|
||||||
}
|
}
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
console.log(` [${queryId}] No array pattern found in response`);
|
||||||
|
console.log(` [${queryId}] Raw response preview: ${response.substring(0, 200)}...`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lineBuffer) {
|
console.log(` [${queryId}] PARSE FAILED - returning empty array`);
|
||||||
console.log(lineBuffer);
|
return [];
|
||||||
}
|
|
||||||
console.log('');
|
|
||||||
|
|
||||||
const startIdx = fullText.indexOf('[');
|
|
||||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error('No JSON array found in response');
|
|
||||||
}
|
|
||||||
|
|
||||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a hash of transactions for comparison
|
* Extract transactions from malformed JSON by parsing objects individually
|
||||||
*/
|
*/
|
||||||
function hashTransactions(transactions: ITransaction[]): string {
|
function extractTransactionsFromMalformedJson(jsonStr: string, queryId: string): ITransaction[] {
|
||||||
return transactions
|
const transactions: ITransaction[] = [];
|
||||||
.map((t) => `${t.date}|${t.amount.toFixed(2)}`)
|
|
||||||
.sort()
|
// Match individual transaction objects
|
||||||
.join(';');
|
const objectPattern = /\{\s*"date"\s*:\s*"([^"]+)"\s*,\s*"counterparty"\s*:\s*"([^"]+)"\s*,\s*"amount"\s*:\s*([+-]?\d+\.?\d*)\s*\}/g;
|
||||||
|
let match;
|
||||||
|
|
||||||
|
while ((match = objectPattern.exec(jsonStr)) !== null) {
|
||||||
|
transactions.push({
|
||||||
|
date: match[1],
|
||||||
|
counterparty: match[2],
|
||||||
|
amount: parseFloat(match[3]),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also try with different field orders (amount before counterparty, etc.)
|
||||||
|
if (transactions.length === 0) {
|
||||||
|
const altPattern = /\{\s*"date"\s*:\s*"([^"]+)"[^}]*"amount"\s*:\s*([+-]?\d+\.?\d*)[^}]*\}/g;
|
||||||
|
while ((match = altPattern.exec(jsonStr)) !== null) {
|
||||||
|
// Try to extract counterparty from the match
|
||||||
|
const counterpartyMatch = match[0].match(/"counterparty"\s*:\s*"([^"]+)"/);
|
||||||
|
const descMatch = match[0].match(/"description"\s*:\s*"([^"]+)"/);
|
||||||
|
transactions.push({
|
||||||
|
date: match[1],
|
||||||
|
counterparty: counterpartyMatch?.[1] || descMatch?.[1] || 'UNKNOWN',
|
||||||
|
amount: parseFloat(match[2]),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return transactions;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract with consensus voting using MiniCPM-V only
|
* Parse amount from various formats
|
||||||
*/
|
*/
|
||||||
async function extractWithConsensus(
|
function parseAmount(value: unknown): number {
|
||||||
images: string[],
|
if (typeof value === 'number') return value;
|
||||||
maxPasses: number = 5
|
if (typeof value !== 'string') return 0;
|
||||||
): Promise<ITransaction[]> {
|
|
||||||
const results: Array<{ transactions: ITransaction[]; hash: string }> = [];
|
|
||||||
const hashCounts: Map<string, number> = new Map();
|
|
||||||
|
|
||||||
const addResult = (transactions: ITransaction[], passLabel: string): number => {
|
let s = value.replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||||
const hash = hashTransactions(transactions);
|
// European format: comma is decimal
|
||||||
results.push({ transactions, hash });
|
if (s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')) {
|
||||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
s = s.replace(/\./g, '').replace(',', '.');
|
||||||
console.log(
|
} else {
|
||||||
`[${passLabel}] Got ${transactions.length} transactions (hash: ${hash.substring(0, 20)}...)`
|
s = s.replace(/,/g, '');
|
||||||
);
|
}
|
||||||
return hashCounts.get(hash)!;
|
return parseFloat(s) || 0;
|
||||||
};
|
}
|
||||||
|
|
||||||
console.log('[Setup] Using MiniCPM-V only');
|
/**
|
||||||
|
* Compare two transaction arrays for consensus
|
||||||
|
*/
|
||||||
|
function transactionArraysMatch(a: ITransaction[], b: ITransaction[]): boolean {
|
||||||
|
if (a.length !== b.length) return false;
|
||||||
|
|
||||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
for (let i = 0; i < a.length; i++) {
|
||||||
try {
|
const dateMatch = a[i].date === b[i].date;
|
||||||
const transactions = await extractWithMiniCPM(images, `Pass ${pass} MiniCPM-V`);
|
const amountMatch = Math.abs(a[i].amount - b[i].amount) < 0.01;
|
||||||
const count = addResult(transactions, `Pass ${pass} MiniCPM-V`);
|
if (!dateMatch || !amountMatch) return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (count >= 2) {
|
return true;
|
||||||
console.log(`[Consensus] Reached after ${pass} passes`);
|
}
|
||||||
return transactions;
|
|
||||||
|
/**
|
||||||
|
* Compare two transaction arrays and log differences
|
||||||
|
*/
|
||||||
|
function compareAndLogDifferences(txs1: ITransaction[], txs2: ITransaction[], pageNum: number): void {
|
||||||
|
if (txs1.length !== txs2.length) {
|
||||||
|
console.log(` [Page ${pageNum}] Length mismatch: Q1=${txs1.length}, Q2=${txs2.length}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < txs1.length; i++) {
|
||||||
|
const dateMatch = txs1[i].date === txs2[i].date;
|
||||||
|
const amountMatch = Math.abs(txs1[i].amount - txs2[i].amount) < 0.01;
|
||||||
|
|
||||||
|
if (!dateMatch || !amountMatch) {
|
||||||
|
console.log(` [Page ${pageNum}] Tx ${i + 1} differs:`);
|
||||||
|
console.log(` Q1: ${txs1[i].date} | ${txs1[i].amount}`);
|
||||||
|
console.log(` Q2: ${txs2[i].date} | ${txs2[i].amount}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions from a single page with consensus
|
||||||
|
*/
|
||||||
|
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||||
|
const MAX_ATTEMPTS = 5;
|
||||||
|
console.log(`\n ======== Page ${pageNum} ========`);
|
||||||
|
console.log(` [Page ${pageNum}] Starting JSON extraction...`);
|
||||||
|
|
||||||
|
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
|
||||||
|
console.log(`\n [Page ${pageNum}] --- Attempt ${attempt}/${MAX_ATTEMPTS} ---`);
|
||||||
|
|
||||||
|
// Extract twice in parallel
|
||||||
|
const q1Id = `P${pageNum}A${attempt}Q1`;
|
||||||
|
const q2Id = `P${pageNum}A${attempt}Q2`;
|
||||||
|
|
||||||
|
const [response1, response2] = await Promise.all([
|
||||||
|
queryJson(image, q1Id),
|
||||||
|
queryJson(image, q2Id),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const txs1 = parseJsonResponse(response1, q1Id);
|
||||||
|
const txs2 = parseJsonResponse(response2, q2Id);
|
||||||
|
|
||||||
|
console.log(` [Page ${pageNum}] Results: Q1=${txs1.length} txs, Q2=${txs2.length} txs`);
|
||||||
|
|
||||||
|
if (txs1.length > 0 && transactionArraysMatch(txs1, txs2)) {
|
||||||
|
console.log(` [Page ${pageNum}] ✓ CONSENSUS REACHED: ${txs1.length} transactions`);
|
||||||
|
console.log(` [Page ${pageNum}] Transactions:`);
|
||||||
|
for (let i = 0; i < txs1.length; i++) {
|
||||||
|
const tx = txs1[i];
|
||||||
|
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
}
|
}
|
||||||
|
return txs1;
|
||||||
|
}
|
||||||
|
|
||||||
console.log(`[Pass ${pass}] No consensus yet, trying again...`);
|
console.log(` [Page ${pageNum}] ✗ NO CONSENSUS`);
|
||||||
} catch (err) {
|
compareAndLogDifferences(txs1, txs2, pageNum);
|
||||||
console.log(`[Pass ${pass}] Error: ${err}`);
|
|
||||||
|
if (attempt < MAX_ATTEMPTS) {
|
||||||
|
console.log(` [Page ${pageNum}] Retrying...`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No consensus reached - return the most common result
|
// Fallback: use last response
|
||||||
let bestHash = '';
|
console.log(`\n [Page ${pageNum}] === FALLBACK (no consensus after ${MAX_ATTEMPTS} attempts) ===`);
|
||||||
let bestCount = 0;
|
const fallbackId = `P${pageNum}FALLBACK`;
|
||||||
for (const [hash, count] of hashCounts) {
|
const fallbackResponse = await queryJson(image, fallbackId);
|
||||||
if (count > bestCount) {
|
const fallback = parseJsonResponse(fallbackResponse, fallbackId);
|
||||||
bestCount = count;
|
console.log(` [Page ${pageNum}] ~ FALLBACK RESULT: ${fallback.length} transactions`);
|
||||||
bestHash = hash;
|
for (let i = 0; i < fallback.length; i++) {
|
||||||
}
|
const tx = fallback[i];
|
||||||
|
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
|
}
|
||||||
|
return fallback;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract all transactions from bank statement
|
||||||
|
*/
|
||||||
|
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||||
|
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (JSON consensus)`);
|
||||||
|
|
||||||
|
const allTransactions: ITransaction[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < images.length; i++) {
|
||||||
|
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
|
||||||
|
allTransactions.push(...pageTransactions);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bestHash) {
|
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
|
||||||
throw new Error('No valid results obtained');
|
return allTransactions;
|
||||||
}
|
|
||||||
|
|
||||||
const best = results.find((r) => r.hash === bestHash)!;
|
|
||||||
console.log(`[No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
|
||||||
return best.transactions;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -216,8 +398,9 @@ async function extractWithConsensus(
|
|||||||
function compareTransactions(
|
function compareTransactions(
|
||||||
extracted: ITransaction[],
|
extracted: ITransaction[],
|
||||||
expected: ITransaction[]
|
expected: ITransaction[]
|
||||||
): { matches: number; total: number; errors: string[] } {
|
): { matches: number; total: number; errors: string[]; variations: string[] } {
|
||||||
const errors: string[] = [];
|
const errors: string[] = [];
|
||||||
|
const variations: string[] = [];
|
||||||
let matches = 0;
|
let matches = 0;
|
||||||
|
|
||||||
for (let i = 0; i < expected.length; i++) {
|
for (let i = 0; i < expected.length; i++) {
|
||||||
@@ -234,6 +417,12 @@ function compareTransactions(
|
|||||||
|
|
||||||
if (dateMatch && amountMatch) {
|
if (dateMatch && amountMatch) {
|
||||||
matches++;
|
matches++;
|
||||||
|
// Track counterparty variations (date and amount match but name differs)
|
||||||
|
if (ext.counterparty !== exp.counterparty) {
|
||||||
|
variations.push(
|
||||||
|
`[${i}] "${exp.counterparty}" → "${ext.counterparty}"`
|
||||||
|
);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
errors.push(
|
errors.push(
|
||||||
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
|
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
|
||||||
@@ -245,7 +434,7 @@ function compareTransactions(
|
|||||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
return { matches, total: expected.length, errors };
|
return { matches, total: expected.length, errors, variations };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -273,62 +462,75 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return testCases;
|
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests
|
// Tests
|
||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
tap.test('setup: ensure Docker containers are running', async () => {
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
console.log('\n[Setup] Checking Docker containers...\n');
|
||||||
|
|
||||||
// Ensure MiniCPM is running
|
|
||||||
const minicpmOk = await ensureMiniCpm();
|
const minicpmOk = await ensureMiniCpm();
|
||||||
expect(minicpmOk).toBeTrue();
|
expect(minicpmOk).toBeTrue();
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
|
||||||
});
|
});
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V only)\n`);
|
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V)\n`);
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
for (const testCase of testCases) {
|
||||||
tap.test(`should extract transactions from ${testCase.name}`, async () => {
|
tap.test(`should extract: ${testCase.name}`, async () => {
|
||||||
// Load expected transactions
|
|
||||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
console.log(`\n=== ${testCase.name} ===`);
|
||||||
console.log(`Expected: ${expected.length} transactions`);
|
console.log(`Expected: ${expected.length} transactions`);
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
console.log('Converting PDF to images...');
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
const images = convertPdfToImages(testCase.pdfPath);
|
||||||
console.log(`Converted: ${images.length} pages\n`);
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
// Extract with consensus (MiniCPM-V only)
|
const extracted = await extractTransactions(images);
|
||||||
const extracted = await extractWithConsensus(images);
|
console.log(` Extracted: ${extracted.length} transactions`);
|
||||||
console.log(`\nFinal: ${extracted.length} transactions`);
|
|
||||||
|
|
||||||
// Compare results
|
|
||||||
const result = compareTransactions(extracted, expected);
|
const result = compareTransactions(extracted, expected);
|
||||||
console.log(`Accuracy: ${result.matches}/${result.total}`);
|
const perfectMatch = result.matches === result.total && extracted.length === expected.length;
|
||||||
|
|
||||||
if (result.errors.length > 0) {
|
if (perfectMatch) {
|
||||||
console.log('Errors:');
|
passedCount++;
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||||
|
result.errors.slice(0, 10).forEach((e) => console.log(` - ${e}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert high accuracy
|
// Log counterparty variations (names that differ but date/amount matched)
|
||||||
const accuracy = result.matches / result.total;
|
if (result.variations.length > 0) {
|
||||||
expect(accuracy).toBeGreaterThan(0.95);
|
console.log(` Counterparty variations (${result.variations.length}):`);
|
||||||
|
result.variations.forEach((v) => console.log(` ${v}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(result.matches).toEqual(result.total);
|
||||||
expect(extracted.length).toEqual(expected.length);
|
expect(extracted.length).toEqual(expected.length);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tap.test('summary', async () => {
|
||||||
|
const total = testCases.length;
|
||||||
|
console.log(`\n======================================================`);
|
||||||
|
console.log(` Bank Statement Summary (${MODEL})`);
|
||||||
|
console.log(`======================================================`);
|
||||||
|
console.log(` Method: JSON per-page + consensus`);
|
||||||
|
console.log(` Passed: ${passedCount}/${total}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${total}`);
|
||||||
|
console.log(`======================================================\n`);
|
||||||
|
});
|
||||||
|
|
||||||
export default tap.start();
|
export default tap.start();
|
||||||
|
|||||||
@@ -1,348 +0,0 @@
|
|||||||
/**
|
|
||||||
* Bank Statement extraction using Ministral 3 Vision (Direct)
|
|
||||||
*
|
|
||||||
* NO OCR pipeline needed - Ministral 3 has built-in vision encoder:
|
|
||||||
* 1. Convert PDF to images
|
|
||||||
* 2. Send images directly to Ministral 3 via Ollama
|
|
||||||
* 3. Extract transactions as structured JSON
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensureMinistral3 } from './helpers/docker.js';
|
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
const VISION_MODEL = 'ministral-3:8b';
|
|
||||||
|
|
||||||
interface ITransaction {
|
|
||||||
date: string;
|
|
||||||
counterparty: string;
|
|
||||||
amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
execSync(
|
|
||||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract transactions from a single page image using Ministral 3 Vision
|
|
||||||
*/
|
|
||||||
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
|
||||||
console.log(` [Vision] Processing page ${pageNum}`);
|
|
||||||
|
|
||||||
// JSON schema for array of transactions
|
|
||||||
const transactionSchema = {
|
|
||||||
type: 'array',
|
|
||||||
items: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
date: { type: 'string', description: 'Transaction date in YYYY-MM-DD format' },
|
|
||||||
counterparty: { type: 'string', description: 'Name of the other party' },
|
|
||||||
amount: { type: 'number', description: 'Amount (negative for debits, positive for credits)' },
|
|
||||||
},
|
|
||||||
required: ['date', 'counterparty', 'amount'],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const prompt = `Extract ALL bank transactions from this bank statement page.
|
|
||||||
|
|
||||||
For each transaction, extract:
|
|
||||||
- date: Transaction date in YYYY-MM-DD format
|
|
||||||
- counterparty: The name/description of the other party (merchant, payee, etc.)
|
|
||||||
- amount: The amount as a number (NEGATIVE for debits/expenses, POSITIVE for credits/income)
|
|
||||||
|
|
||||||
Return a JSON array of transactions. If no transactions visible, return empty array [].
|
|
||||||
Example: [{"date":"2021-06-01","counterparty":"AMAZON","amount":-50.00}]`;
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: VISION_MODEL,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: prompt,
|
|
||||||
images: [image],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
format: transactionSchema,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 4096, // Bank statements can have many transactions
|
|
||||||
temperature: 0.0,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.message?.content) {
|
|
||||||
fullText += json.message.content;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON response
|
|
||||||
let jsonStr = fullText.trim();
|
|
||||||
|
|
||||||
if (jsonStr.startsWith('```json')) jsonStr = jsonStr.slice(7);
|
|
||||||
else if (jsonStr.startsWith('```')) jsonStr = jsonStr.slice(3);
|
|
||||||
if (jsonStr.endsWith('```')) jsonStr = jsonStr.slice(0, -3);
|
|
||||||
jsonStr = jsonStr.trim();
|
|
||||||
|
|
||||||
// Find array boundaries
|
|
||||||
const startIdx = jsonStr.indexOf('[');
|
|
||||||
const endIdx = jsonStr.lastIndexOf(']') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
console.log(` [Page ${pageNum}] No transactions found`);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(jsonStr.substring(startIdx, endIdx));
|
|
||||||
console.log(` [Page ${pageNum}] Found ${parsed.length} transactions`);
|
|
||||||
return parsed.map((t: { date?: string; counterparty?: string; amount?: number }) => ({
|
|
||||||
date: t.date || '',
|
|
||||||
counterparty: t.counterparty || '',
|
|
||||||
amount: parseFloat(String(t.amount)) || 0,
|
|
||||||
}));
|
|
||||||
} catch (e) {
|
|
||||||
console.log(` [Page ${pageNum}] Parse error: ${e}`);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract all transactions from all pages
|
|
||||||
*/
|
|
||||||
async function extractAllTransactions(images: string[]): Promise<ITransaction[]> {
|
|
||||||
const allTransactions: ITransaction[] = [];
|
|
||||||
|
|
||||||
for (let i = 0; i < images.length; i++) {
|
|
||||||
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
|
|
||||||
allTransactions.push(...pageTransactions);
|
|
||||||
}
|
|
||||||
|
|
||||||
return allTransactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Normalize date to YYYY-MM-DD
|
|
||||||
*/
|
|
||||||
function normalizeDate(dateStr: string): string {
|
|
||||||
if (!dateStr) return '';
|
|
||||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
|
||||||
|
|
||||||
// Handle DD/MM/YYYY or DD.MM.YYYY
|
|
||||||
const match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
|
||||||
if (match) {
|
|
||||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dateStr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare extracted transactions vs expected
|
|
||||||
*/
|
|
||||||
function compareTransactions(
|
|
||||||
extracted: ITransaction[],
|
|
||||||
expected: ITransaction[]
|
|
||||||
): { matchRate: number; matched: number; missed: number; extra: number; errors: string[] } {
|
|
||||||
const errors: string[] = [];
|
|
||||||
let matched = 0;
|
|
||||||
|
|
||||||
// Normalize all dates
|
|
||||||
const normalizedExtracted = extracted.map((t) => ({
|
|
||||||
...t,
|
|
||||||
date: normalizeDate(t.date),
|
|
||||||
counterparty: t.counterparty.toUpperCase().trim(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
const normalizedExpected = expected.map((t) => ({
|
|
||||||
...t,
|
|
||||||
date: normalizeDate(t.date),
|
|
||||||
counterparty: t.counterparty.toUpperCase().trim(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Try to match each expected transaction
|
|
||||||
const matchedIndices = new Set<number>();
|
|
||||||
|
|
||||||
for (const exp of normalizedExpected) {
|
|
||||||
let found = false;
|
|
||||||
|
|
||||||
for (let i = 0; i < normalizedExtracted.length; i++) {
|
|
||||||
if (matchedIndices.has(i)) continue;
|
|
||||||
|
|
||||||
const ext = normalizedExtracted[i];
|
|
||||||
|
|
||||||
// Match by date + amount (counterparty names can vary)
|
|
||||||
if (ext.date === exp.date && Math.abs(ext.amount - exp.amount) < 0.02) {
|
|
||||||
matched++;
|
|
||||||
matchedIndices.add(i);
|
|
||||||
found = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!found) {
|
|
||||||
errors.push(`Missing: ${exp.date} | ${exp.counterparty} | ${exp.amount}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const missed = expected.length - matched;
|
|
||||||
const extra = extracted.length - matched;
|
|
||||||
const matchRate = expected.length > 0 ? (matched / expected.length) * 100 : 0;
|
|
||||||
|
|
||||||
return { matchRate, matched, missed, extra, errors };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find test cases (PDF + JSON pairs in .nogit/)
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit');
|
|
||||||
if (!fs.existsSync(testDir)) return [];
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
// Skip invoice files - only bank statements
|
|
||||||
if (!baseName.includes('invoice')) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Ministral 3 is running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Ministral 3...\n');
|
|
||||||
const ok = await ensureMinistral3();
|
|
||||||
expect(ok).toBeTrue();
|
|
||||||
console.log('\n[Setup] Ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
const testCases = findTestCases();
|
|
||||||
console.log(`\nFound ${testCases.length} bank statement test cases (Ministral 3 Vision)\n`);
|
|
||||||
|
|
||||||
let totalMatched = 0;
|
|
||||||
let totalExpected = 0;
|
|
||||||
const times: number[] = [];
|
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract bank statement: ${testCase.name}`, async () => {
|
|
||||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.length} transactions`);
|
|
||||||
|
|
||||||
const start = Date.now();
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(` Pages: ${images.length}`);
|
|
||||||
|
|
||||||
const extracted = await extractAllTransactions(images);
|
|
||||||
const elapsed = Date.now() - start;
|
|
||||||
times.push(elapsed);
|
|
||||||
|
|
||||||
console.log(` Extracted: ${extracted.length} transactions`);
|
|
||||||
|
|
||||||
const result = compareTransactions(extracted, expected);
|
|
||||||
totalMatched += result.matched;
|
|
||||||
totalExpected += expected.length;
|
|
||||||
|
|
||||||
console.log(` Match rate: ${result.matchRate.toFixed(1)}% (${result.matched}/${expected.length})`);
|
|
||||||
console.log(` Missed: ${result.missed}, Extra: ${result.extra}`);
|
|
||||||
console.log(` Time: ${(elapsed / 1000).toFixed(1)}s`);
|
|
||||||
|
|
||||||
if (result.errors.length > 0 && result.errors.length <= 5) {
|
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
|
||||||
} else if (result.errors.length > 5) {
|
|
||||||
console.log(` (${result.errors.length} missing transactions)`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consider it a pass if we match at least 70% of transactions
|
|
||||||
expect(result.matchRate).toBeGreaterThan(70);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
|
||||||
const overallMatchRate = totalExpected > 0 ? (totalMatched / totalExpected) * 100 : 0;
|
|
||||||
const totalTime = times.reduce((a, b) => a + b, 0) / 1000;
|
|
||||||
const avgTime = times.length > 0 ? totalTime / times.length : 0;
|
|
||||||
|
|
||||||
console.log(`\n======================================================`);
|
|
||||||
console.log(` Bank Statement Extraction Summary (Ministral 3)`);
|
|
||||||
console.log(`======================================================`);
|
|
||||||
console.log(` Method: Ministral 3 8B Vision (Direct)`);
|
|
||||||
console.log(` Statements: ${testCases.length}`);
|
|
||||||
console.log(` Matched: ${totalMatched}/${totalExpected} transactions`);
|
|
||||||
console.log(` Match rate: ${overallMatchRate.toFixed(1)}%`);
|
|
||||||
console.log(`------------------------------------------------------`);
|
|
||||||
console.log(` Total time: ${totalTime.toFixed(1)}s`);
|
|
||||||
console.log(` Avg per stmt: ${avgTime.toFixed(1)}s`);
|
|
||||||
console.log(`======================================================\n`);
|
|
||||||
});
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,346 +0,0 @@
|
|||||||
/**
|
|
||||||
* Bank statement extraction test using PaddleOCR-VL Full Pipeline
|
|
||||||
*
|
|
||||||
* This tests the complete PaddleOCR-VL pipeline for bank statements:
|
|
||||||
* 1. PP-DocLayoutV2 for layout detection
|
|
||||||
* 2. PaddleOCR-VL for recognition (tables with proper structure)
|
|
||||||
* 3. Structured Markdown output with tables
|
|
||||||
* 4. MiniCPM extracts transactions from structured tables
|
|
||||||
*
|
|
||||||
* The structured Markdown has properly formatted tables,
|
|
||||||
* making it much easier for MiniCPM to extract transaction data.
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensurePaddleOcrVlFull, ensureMiniCpm } from './helpers/docker.js';
|
|
||||||
|
|
||||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
|
||||||
|
|
||||||
interface ITransaction {
|
|
||||||
date: string;
|
|
||||||
counterparty: string;
|
|
||||||
amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
execSync(
|
|
||||||
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse document using PaddleOCR-VL Full Pipeline (returns structured Markdown)
|
|
||||||
*/
|
|
||||||
async function parseDocument(imageBase64: string): Promise<string> {
|
|
||||||
const response = await fetch(`${PADDLEOCR_VL_URL}/parse`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
image: imageBase64,
|
|
||||||
output_format: 'markdown',
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
const text = await response.text();
|
|
||||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
|
|
||||||
if (!data.success) {
|
|
||||||
throw new Error(`PaddleOCR-VL error: ${data.error}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return data.result?.markdown || '';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract transactions from structured Markdown using MiniCPM
|
|
||||||
*/
|
|
||||||
async function extractTransactionsFromMarkdown(markdown: string): Promise<ITransaction[]> {
|
|
||||||
console.log(` [Extract] Processing ${markdown.length} chars of Markdown`);
|
|
||||||
|
|
||||||
const prompt = `/nothink
|
|
||||||
Convert this bank statement to a JSON array of transactions.
|
|
||||||
|
|
||||||
Read the Amount values carefully:
|
|
||||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
|
||||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
|
||||||
- European format: comma = decimal point, dot = thousands
|
|
||||||
|
|
||||||
For each transaction output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
|
||||||
|
|
||||||
Return ONLY the JSON array, no explanation.
|
|
||||||
|
|
||||||
Document:
|
|
||||||
${markdown}`;
|
|
||||||
|
|
||||||
const payload = {
|
|
||||||
model: MINICPM_MODEL,
|
|
||||||
prompt,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 16384,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract JSON array from response
|
|
||||||
const startIdx = fullText.indexOf('[');
|
|
||||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON array found in response: ${fullText.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
|
||||||
return JSON.parse(jsonStr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract transactions from all pages of a bank statement
|
|
||||||
*/
|
|
||||||
async function extractAllTransactions(images: string[]): Promise<ITransaction[]> {
|
|
||||||
const allTransactions: ITransaction[] = [];
|
|
||||||
|
|
||||||
for (let i = 0; i < images.length; i++) {
|
|
||||||
console.log(` Processing page ${i + 1}/${images.length}...`);
|
|
||||||
|
|
||||||
// Parse with full pipeline
|
|
||||||
const markdown = await parseDocument(images[i]);
|
|
||||||
console.log(` [Parse] Got ${markdown.split('\n').length} lines of Markdown`);
|
|
||||||
|
|
||||||
// Extract transactions
|
|
||||||
try {
|
|
||||||
const transactions = await extractTransactionsFromMarkdown(markdown);
|
|
||||||
console.log(` [Extracted] ${transactions.length} transactions`);
|
|
||||||
allTransactions.push(...transactions);
|
|
||||||
} catch (err) {
|
|
||||||
console.log(` [Error] ${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return allTransactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare transactions - find matching transaction in expected list
|
|
||||||
*/
|
|
||||||
function findMatchingTransaction(
|
|
||||||
tx: ITransaction,
|
|
||||||
expectedList: ITransaction[]
|
|
||||||
): ITransaction | undefined {
|
|
||||||
return expectedList.find((exp) => {
|
|
||||||
const dateMatch = tx.date === exp.date;
|
|
||||||
const amountMatch = Math.abs(tx.amount - exp.amount) < 0.02;
|
|
||||||
const counterpartyMatch =
|
|
||||||
tx.counterparty?.toLowerCase().includes(exp.counterparty?.toLowerCase().slice(0, 10)) ||
|
|
||||||
exp.counterparty?.toLowerCase().includes(tx.counterparty?.toLowerCase().slice(0, 10));
|
|
||||||
return dateMatch && amountMatch && counterpartyMatch;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculate extraction accuracy
|
|
||||||
*/
|
|
||||||
function calculateAccuracy(
|
|
||||||
extracted: ITransaction[],
|
|
||||||
expected: ITransaction[]
|
|
||||||
): { matched: number; total: number; accuracy: number } {
|
|
||||||
let matched = 0;
|
|
||||||
const usedExpected = new Set<number>();
|
|
||||||
|
|
||||||
for (const tx of extracted) {
|
|
||||||
for (let i = 0; i < expected.length; i++) {
|
|
||||||
if (usedExpected.has(i)) continue;
|
|
||||||
|
|
||||||
const exp = expected[i];
|
|
||||||
const dateMatch = tx.date === exp.date;
|
|
||||||
const amountMatch = Math.abs(tx.amount - exp.amount) < 0.02;
|
|
||||||
|
|
||||||
if (dateMatch && amountMatch) {
|
|
||||||
matched++;
|
|
||||||
usedExpected.add(i);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
matched,
|
|
||||||
total: expected.length,
|
|
||||||
accuracy: expected.length > 0 ? (matched / expected.length) * 100 : 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find all test cases (PDF + JSON pairs) in .nogit/bankstatements/
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit/bankstatements');
|
|
||||||
if (!fs.existsSync(testDir)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of pdfFiles) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
|
||||||
return testCases;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
|
||||||
|
|
||||||
// Ensure PaddleOCR-VL Full Pipeline is running
|
|
||||||
const paddleOk = await ensurePaddleOcrVlFull();
|
|
||||||
expect(paddleOk).toBeTrue();
|
|
||||||
|
|
||||||
// Ensure MiniCPM is running (for field extraction from Markdown)
|
|
||||||
const minicpmOk = await ensureMiniCpm();
|
|
||||||
expect(minicpmOk).toBeTrue();
|
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
|
||||||
console.log(`\nFound ${testCases.length} bank statement test cases (PaddleOCR-VL Full Pipeline)\n`);
|
|
||||||
|
|
||||||
const results: Array<{ name: string; accuracy: number; matched: number; total: number }> = [];
|
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract bank statement: ${testCase.name}`, async () => {
|
|
||||||
// Load expected data
|
|
||||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.length} transactions`);
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(` Pages: ${images.length}`);
|
|
||||||
|
|
||||||
// Extract all transactions
|
|
||||||
const extracted = await extractAllTransactions(images);
|
|
||||||
|
|
||||||
const endTime = Date.now();
|
|
||||||
const elapsedMs = endTime - startTime;
|
|
||||||
|
|
||||||
// Calculate accuracy
|
|
||||||
const accuracy = calculateAccuracy(extracted, expected);
|
|
||||||
results.push({
|
|
||||||
name: testCase.name,
|
|
||||||
accuracy: accuracy.accuracy,
|
|
||||||
matched: accuracy.matched,
|
|
||||||
total: accuracy.total,
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(` Extracted: ${extracted.length} transactions`);
|
|
||||||
console.log(` Matched: ${accuracy.matched}/${accuracy.total} (${accuracy.accuracy.toFixed(1)}%)`);
|
|
||||||
console.log(` Time: ${(elapsedMs / 1000).toFixed(1)}s`);
|
|
||||||
|
|
||||||
// We expect at least 50% accuracy
|
|
||||||
expect(accuracy.accuracy).toBeGreaterThan(50);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
|
||||||
const totalStatements = results.length;
|
|
||||||
const avgAccuracy =
|
|
||||||
results.length > 0 ? results.reduce((a, b) => a + b.accuracy, 0) / results.length : 0;
|
|
||||||
const totalMatched = results.reduce((a, b) => a + b.matched, 0);
|
|
||||||
const totalExpected = results.reduce((a, b) => a + b.total, 0);
|
|
||||||
|
|
||||||
console.log(`\n======================================================`);
|
|
||||||
console.log(` Bank Statement Extraction Summary (PaddleOCR-VL Full)`);
|
|
||||||
console.log(`======================================================`);
|
|
||||||
console.log(` Method: PaddleOCR-VL Full Pipeline -> MiniCPM`);
|
|
||||||
console.log(` Statements: ${totalStatements}`);
|
|
||||||
console.log(` Transactions: ${totalMatched}/${totalExpected} matched`);
|
|
||||||
console.log(` Avg accuracy: ${avgAccuracy.toFixed(1)}%`);
|
|
||||||
console.log(`======================================================\n`);
|
|
||||||
});
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
345
test/test.bankstatements.qwen3vl.ts
Normal file
345
test/test.bankstatements.qwen3vl.ts
Normal file
@@ -0,0 +1,345 @@
|
|||||||
|
/**
|
||||||
|
* Bank statement extraction using Qwen3-VL 8B Vision (Direct)
|
||||||
|
*
|
||||||
|
* Multi-query approach:
|
||||||
|
* 1. First ask how many transactions on each page
|
||||||
|
* 2. Then query each transaction individually
|
||||||
|
* Single pass, no consensus voting.
|
||||||
|
*/
|
||||||
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
|
||||||
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
|
const VISION_MODEL = 'qwen3-vl:8b';
|
||||||
|
|
||||||
|
interface ITransaction {
|
||||||
|
date: string;
|
||||||
|
counterparty: string;
|
||||||
|
amount: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert PDF to PNG images
|
||||||
|
*/
|
||||||
|
function convertPdfToImages(pdfPath: string): string[] {
|
||||||
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||||
|
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||||
|
|
||||||
|
try {
|
||||||
|
execSync(
|
||||||
|
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||||
|
{ stdio: 'pipe' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
||||||
|
const images: string[] = [];
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
const imagePath = path.join(tempDir, file);
|
||||||
|
const imageData = fs.readFileSync(imagePath);
|
||||||
|
images.push(imageData.toString('base64'));
|
||||||
|
}
|
||||||
|
|
||||||
|
return images;
|
||||||
|
} finally {
|
||||||
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query Qwen3-VL with a simple prompt
|
||||||
|
*/
|
||||||
|
async function queryVision(image: string, prompt: string): Promise<string> {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: VISION_MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: prompt,
|
||||||
|
images: [image],
|
||||||
|
}],
|
||||||
|
stream: false,
|
||||||
|
options: {
|
||||||
|
num_predict: 500,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
return (data.message?.content || '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Count transactions on a page
|
||||||
|
*/
|
||||||
|
async function countTransactions(image: string, pageNum: number): Promise<number> {
|
||||||
|
const response = await queryVision(image,
|
||||||
|
`How many transaction rows are in this bank statement table?
|
||||||
|
Count only the data rows (with dates like "01.01.2024" and amounts like "- 50,00 €").
|
||||||
|
Do NOT count the header row or summary/total rows.
|
||||||
|
Answer with just the number, for example: 7`
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(` [Page ${pageNum}] Count query response: "${response}"`);
|
||||||
|
const match = response.match(/(\d+)/);
|
||||||
|
const count = match ? parseInt(match[1], 10) : 0;
|
||||||
|
console.log(` [Page ${pageNum}] Parsed count: ${count}`);
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a single transaction by index (logs immediately when complete)
|
||||||
|
*/
|
||||||
|
async function getTransaction(image: string, index: number, pageNum: number): Promise<ITransaction | null> {
|
||||||
|
const response = await queryVision(image,
|
||||||
|
`This is a bank statement. Look at transaction row #${index} in the table (counting from top, excluding headers).
|
||||||
|
|
||||||
|
Extract this transaction's details:
|
||||||
|
- Date in YYYY-MM-DD format
|
||||||
|
- Counterparty/description name
|
||||||
|
- Amount as number (negative for debits like "- 21,47 €" = -21.47, positive for credits like "+ 100,00 €" = 100.00)
|
||||||
|
|
||||||
|
Answer in format: DATE|COUNTERPARTY|AMOUNT
|
||||||
|
Example: 2024-01-15|Amazon|−25.99`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Parse the response
|
||||||
|
const lines = response.split('\n').filter(l => l.includes('|'));
|
||||||
|
const line = lines[lines.length - 1] || response;
|
||||||
|
const parts = line.split('|').map(p => p.trim());
|
||||||
|
|
||||||
|
if (parts.length >= 3) {
|
||||||
|
// Parse amount - handle various formats
|
||||||
|
let amountStr = parts[2].replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||||
|
// European format: comma is decimal
|
||||||
|
if (amountStr.includes(',')) {
|
||||||
|
amountStr = amountStr.replace(/\./g, '').replace(',', '.');
|
||||||
|
}
|
||||||
|
const amount = parseFloat(amountStr) || 0;
|
||||||
|
|
||||||
|
const tx = {
|
||||||
|
date: parts[0],
|
||||||
|
counterparty: parts[1],
|
||||||
|
amount: amount,
|
||||||
|
};
|
||||||
|
// Log immediately as this transaction completes
|
||||||
|
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] ${tx.date} | ${tx.counterparty.substring(0, 25).padEnd(25)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
|
return tx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log raw response on parse failure
|
||||||
|
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] PARSE FAILED: "${response.replace(/\n/g, ' ').substring(0, 60)}..."`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions from a single page using multi-query approach
|
||||||
|
*/
|
||||||
|
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||||
|
// Step 1: Count transactions
|
||||||
|
const count = await countTransactions(image, pageNum);
|
||||||
|
|
||||||
|
if (count === 0) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Query each transaction (in batches to avoid overwhelming)
|
||||||
|
// Each transaction logs itself as it completes
|
||||||
|
const transactions: ITransaction[] = [];
|
||||||
|
const batchSize = 5;
|
||||||
|
|
||||||
|
for (let start = 1; start <= count; start += batchSize) {
|
||||||
|
const end = Math.min(start + batchSize - 1, count);
|
||||||
|
const indices = Array.from({ length: end - start + 1 }, (_, i) => start + i);
|
||||||
|
|
||||||
|
// Query batch in parallel - each logs as it completes
|
||||||
|
const results = await Promise.all(
|
||||||
|
indices.map(i => getTransaction(image, i, pageNum))
|
||||||
|
);
|
||||||
|
|
||||||
|
for (const tx of results) {
|
||||||
|
if (tx) {
|
||||||
|
transactions.push(tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Page ${pageNum}] Complete: ${transactions.length}/${count} extracted`);
|
||||||
|
return transactions;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract all transactions from bank statement
|
||||||
|
*/
|
||||||
|
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||||
|
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
|
||||||
|
|
||||||
|
const allTransactions: ITransaction[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < images.length; i++) {
|
||||||
|
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
|
||||||
|
allTransactions.push(...pageTransactions);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
|
||||||
|
return allTransactions;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare transactions
|
||||||
|
*/
|
||||||
|
function compareTransactions(
|
||||||
|
extracted: ITransaction[],
|
||||||
|
expected: ITransaction[]
|
||||||
|
): { matches: number; total: number; errors: string[] } {
|
||||||
|
const errors: string[] = [];
|
||||||
|
let matches = 0;
|
||||||
|
|
||||||
|
for (let i = 0; i < expected.length; i++) {
|
||||||
|
const exp = expected[i];
|
||||||
|
const ext = extracted[i];
|
||||||
|
|
||||||
|
if (!ext) {
|
||||||
|
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dateMatch = ext.date === exp.date;
|
||||||
|
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
||||||
|
|
||||||
|
if (dateMatch && amountMatch) {
|
||||||
|
matches++;
|
||||||
|
} else {
|
||||||
|
errors.push(`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extracted.length > expected.length) {
|
||||||
|
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { matches, total: expected.length, errors };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find test cases in .nogit/
|
||||||
|
*/
|
||||||
|
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||||
|
const testDir = path.join(process.cwd(), '.nogit');
|
||||||
|
if (!fs.existsSync(testDir)) return [];
|
||||||
|
|
||||||
|
const files = fs.readdirSync(testDir);
|
||||||
|
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||||
|
|
||||||
|
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
|
||||||
|
const baseName = pdf.replace('.pdf', '');
|
||||||
|
const jsonFile = `${baseName}.json`;
|
||||||
|
if (files.includes(jsonFile)) {
|
||||||
|
testCases.push({
|
||||||
|
name: baseName,
|
||||||
|
pdfPath: path.join(testDir, pdf),
|
||||||
|
jsonPath: path.join(testDir, jsonFile),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure Qwen3-VL model is available
|
||||||
|
*/
|
||||||
|
async function ensureQwen3Vl(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
|
||||||
|
console.log(`[Ollama] Model available: ${VISION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[Ollama] Pulling ${VISION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests
|
||||||
|
|
||||||
|
tap.test('setup: ensure Qwen3-VL is running', async () => {
|
||||||
|
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
const visionOk = await ensureQwen3Vl();
|
||||||
|
expect(visionOk).toBeTrue();
|
||||||
|
console.log('\n[Setup] Ready!\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
const testCases = findTestCases();
|
||||||
|
console.log(`\nFound ${testCases.length} bank statement test cases (Qwen3-VL)\n`);
|
||||||
|
|
||||||
|
let passedCount = 0;
|
||||||
|
let failedCount = 0;
|
||||||
|
|
||||||
|
for (const testCase of testCases) {
|
||||||
|
tap.test(`should extract: ${testCase.name}`, async () => {
|
||||||
|
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||||
|
console.log(`\n=== ${testCase.name} ===`);
|
||||||
|
console.log(`Expected: ${expected.length} transactions`);
|
||||||
|
|
||||||
|
const images = convertPdfToImages(testCase.pdfPath);
|
||||||
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
|
const extracted = await extractTransactions(images);
|
||||||
|
console.log(` Extracted: ${extracted.length} transactions`);
|
||||||
|
|
||||||
|
const result = compareTransactions(extracted, expected);
|
||||||
|
const accuracy = result.total > 0 ? result.matches / result.total : 0;
|
||||||
|
|
||||||
|
if (accuracy >= 0.95 && extracted.length === expected.length) {
|
||||||
|
passedCount++;
|
||||||
|
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||||
|
} else {
|
||||||
|
failedCount++;
|
||||||
|
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||||
|
result.errors.slice(0, 5).forEach((e) => console.log(` - ${e}`));
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(accuracy).toBeGreaterThan(0.95);
|
||||||
|
expect(extracted.length).toEqual(expected.length);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tap.test('summary', async () => {
|
||||||
|
const total = testCases.length;
|
||||||
|
console.log(`\n======================================================`);
|
||||||
|
console.log(` Bank Statement Summary (Qwen3-VL Vision)`);
|
||||||
|
console.log(`======================================================`);
|
||||||
|
console.log(` Method: Multi-query (count then extract each)`);
|
||||||
|
console.log(` Passed: ${passedCount}/${total}`);
|
||||||
|
console.log(` Failed: ${failedCount}/${total}`);
|
||||||
|
console.log(`======================================================\n`);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -1,455 +0,0 @@
|
|||||||
/**
|
|
||||||
* Invoice extraction test using MiniCPM-V (visual) + PaddleOCR-VL (OCR augmentation)
|
|
||||||
*
|
|
||||||
* This is the combined approach that uses both models for best accuracy:
|
|
||||||
* - MiniCPM-V for visual understanding
|
|
||||||
* - PaddleOCR-VL for OCR text to augment prompts
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensurePaddleOcrVl, ensureMiniCpm } from './helpers/docker.js';
|
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
const MODEL = 'minicpm-v:latest';
|
|
||||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
|
||||||
|
|
||||||
interface IInvoice {
|
|
||||||
invoice_number: string;
|
|
||||||
invoice_date: string;
|
|
||||||
vendor_name: string;
|
|
||||||
currency: string;
|
|
||||||
net_amount: number;
|
|
||||||
vat_amount: number;
|
|
||||||
total_amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract OCR text from an image using PaddleOCR-VL (OpenAI-compatible API)
|
|
||||||
*/
|
|
||||||
async function extractOcrText(imageBase64: string): Promise<string> {
|
|
||||||
try {
|
|
||||||
const response = await fetch(`${PADDLEOCR_VL_URL}/v1/chat/completions`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: 'paddleocr-vl',
|
|
||||||
messages: [{
|
|
||||||
role: 'user',
|
|
||||||
content: [
|
|
||||||
{ type: 'image_url', image_url: { url: `data:image/png;base64,${imageBase64}` } },
|
|
||||||
{ type: 'text', text: 'OCR:' }
|
|
||||||
]
|
|
||||||
}],
|
|
||||||
temperature: 0.0,
|
|
||||||
max_tokens: 4096
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) return '';
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
return data.choices?.[0]?.message?.content || '';
|
|
||||||
} catch {
|
|
||||||
// PaddleOCR-VL unavailable
|
|
||||||
}
|
|
||||||
return '';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Build prompt with optional OCR text
|
|
||||||
*/
|
|
||||||
function buildPrompt(ocrText: string): string {
|
|
||||||
const base = `/nothink
|
|
||||||
You are an invoice parser. Extract the following fields from this invoice:
|
|
||||||
|
|
||||||
1. invoice_number: The invoice/receipt number
|
|
||||||
2. invoice_date: Date in YYYY-MM-DD format
|
|
||||||
3. vendor_name: Company that issued the invoice
|
|
||||||
4. currency: EUR, USD, etc.
|
|
||||||
5. net_amount: Amount before tax (if shown)
|
|
||||||
6. vat_amount: Tax/VAT amount (if shown, 0 if reverse charge or no tax)
|
|
||||||
7. total_amount: Final amount due
|
|
||||||
|
|
||||||
Return ONLY valid JSON in this exact format:
|
|
||||||
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company Name","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}
|
|
||||||
|
|
||||||
If a field is not visible, use null for strings or 0 for numbers.
|
|
||||||
No explanation, just the JSON object.`;
|
|
||||||
|
|
||||||
if (ocrText) {
|
|
||||||
// Limit OCR text to prevent context overflow
|
|
||||||
const maxOcrLength = 4000;
|
|
||||||
const truncatedOcr = ocrText.length > maxOcrLength
|
|
||||||
? ocrText.substring(0, maxOcrLength) + '\n... (truncated)'
|
|
||||||
: ocrText;
|
|
||||||
|
|
||||||
return `${base}
|
|
||||||
|
|
||||||
OCR text extracted from the invoice (use for reference):
|
|
||||||
---
|
|
||||||
${truncatedOcr}
|
|
||||||
---
|
|
||||||
|
|
||||||
Cross-reference the image with the OCR text above for accuracy.`;
|
|
||||||
}
|
|
||||||
return base;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
execSync(
|
|
||||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Single extraction pass
|
|
||||||
*/
|
|
||||||
async function extractOnce(images: string[], passNum: number, ocrText: string = ''): Promise<IInvoice> {
|
|
||||||
const payload = {
|
|
||||||
model: MODEL,
|
|
||||||
prompt: buildPrompt(ocrText),
|
|
||||||
images,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 2048,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract JSON from response
|
|
||||||
const startIdx = fullText.indexOf('{');
|
|
||||||
const endIdx = fullText.lastIndexOf('}') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
|
||||||
return JSON.parse(jsonStr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a hash of invoice for comparison (using key fields)
|
|
||||||
*/
|
|
||||||
function hashInvoice(invoice: IInvoice): string {
|
|
||||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${invoice.total_amount.toFixed(2)}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract with majority voting - run until 2 passes match
|
|
||||||
* Optimization: Run Pass 1, OCR, and Pass 2 (after OCR) in parallel
|
|
||||||
*/
|
|
||||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
|
||||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
|
||||||
const hashCounts: Map<string, number> = new Map();
|
|
||||||
|
|
||||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
|
||||||
const hash = hashInvoice(invoice);
|
|
||||||
results.push({ invoice, hash });
|
|
||||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
|
||||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
|
||||||
return hashCounts.get(hash)!;
|
|
||||||
};
|
|
||||||
|
|
||||||
// OPTIMIZATION: Run Pass 1 (no OCR) in parallel with OCR -> Pass 2 (with OCR)
|
|
||||||
let ocrText = '';
|
|
||||||
const pass1Promise = extractOnce(images, 1, '').catch((err) => ({ error: err }));
|
|
||||||
|
|
||||||
// OCR then immediately Pass 2
|
|
||||||
const ocrThenPass2Promise = (async () => {
|
|
||||||
ocrText = await extractOcrText(images[0]);
|
|
||||||
if (ocrText) {
|
|
||||||
console.log(` [OCR] Extracted ${ocrText.split('\n').length} text lines`);
|
|
||||||
}
|
|
||||||
return extractOnce(images, 2, ocrText).catch((err) => ({ error: err }));
|
|
||||||
})();
|
|
||||||
|
|
||||||
// Wait for both to complete
|
|
||||||
const [pass1Result, pass2Result] = await Promise.all([pass1Promise, ocrThenPass2Promise]);
|
|
||||||
|
|
||||||
// Process Pass 1 result
|
|
||||||
if ('error' in pass1Result) {
|
|
||||||
console.log(` [Pass 1] Error: ${(pass1Result as {error: unknown}).error}`);
|
|
||||||
} else {
|
|
||||||
const count = addResult(pass1Result as IInvoice, 'Pass 1');
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(` [Consensus] Reached after parallel passes`);
|
|
||||||
return pass1Result as IInvoice;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process Pass 2 result
|
|
||||||
if ('error' in pass2Result) {
|
|
||||||
console.log(` [Pass 2+OCR] Error: ${(pass2Result as {error: unknown}).error}`);
|
|
||||||
} else {
|
|
||||||
const count = addResult(pass2Result as IInvoice, 'Pass 2+OCR');
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(` [Consensus] Reached after parallel passes`);
|
|
||||||
return pass2Result as IInvoice;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Continue with passes 3+ using OCR text if no consensus yet
|
|
||||||
for (let pass = 3; pass <= maxPasses; pass++) {
|
|
||||||
try {
|
|
||||||
const invoice = await extractOnce(images, pass, ocrText);
|
|
||||||
const count = addResult(invoice, `Pass ${pass}+OCR`);
|
|
||||||
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
|
||||||
return invoice;
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No consensus reached - return the most common result
|
|
||||||
let bestHash = '';
|
|
||||||
let bestCount = 0;
|
|
||||||
for (const [hash, count] of hashCounts) {
|
|
||||||
if (count > bestCount) {
|
|
||||||
bestCount = count;
|
|
||||||
bestHash = hash;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bestHash) {
|
|
||||||
throw new Error(`No valid results for ${invoiceName}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const best = results.find((r) => r.hash === bestHash)!;
|
|
||||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
|
||||||
return best.invoice;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare extracted invoice against expected
|
|
||||||
*/
|
|
||||||
function compareInvoice(
|
|
||||||
extracted: IInvoice,
|
|
||||||
expected: IInvoice
|
|
||||||
): { match: boolean; errors: string[] } {
|
|
||||||
const errors: string[] = [];
|
|
||||||
|
|
||||||
// Compare invoice number (normalize by removing spaces and case)
|
|
||||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
if (extNum !== expNum) {
|
|
||||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare date
|
|
||||||
if (extracted.invoice_date !== expected.invoice_date) {
|
|
||||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare total amount (with tolerance)
|
|
||||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
|
||||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare currency
|
|
||||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
|
||||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return { match: errors.length === 0, errors };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
|
|
||||||
* Priority invoices (like vodafone) run first for quick feedback
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
|
||||||
if (!fs.existsSync(testDir)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of pdfFiles) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort with priority invoices first, then alphabetically
|
|
||||||
const priorityPrefixes = ['vodafone'];
|
|
||||||
testCases.sort((a, b) => {
|
|
||||||
const aPriority = priorityPrefixes.findIndex((p) => a.name.startsWith(p));
|
|
||||||
const bPriority = priorityPrefixes.findIndex((p) => b.name.startsWith(p));
|
|
||||||
|
|
||||||
// Both have priority - sort by priority order
|
|
||||||
if (aPriority >= 0 && bPriority >= 0) return aPriority - bPriority;
|
|
||||||
// Only a has priority - a comes first
|
|
||||||
if (aPriority >= 0) return -1;
|
|
||||||
// Only b has priority - b comes first
|
|
||||||
if (bPriority >= 0) return 1;
|
|
||||||
// Neither has priority - alphabetical
|
|
||||||
return a.name.localeCompare(b.name);
|
|
||||||
});
|
|
||||||
|
|
||||||
return testCases;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
|
||||||
|
|
||||||
// Ensure PaddleOCR-VL is running (auto-detects GPU/CPU)
|
|
||||||
const paddleOk = await ensurePaddleOcrVl();
|
|
||||||
expect(paddleOk).toBeTrue();
|
|
||||||
|
|
||||||
// Ensure MiniCPM is running
|
|
||||||
const minicpmOk = await ensureMiniCpm();
|
|
||||||
expect(minicpmOk).toBeTrue();
|
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
|
||||||
const data = await response.json();
|
|
||||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
|
||||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
|
||||||
|
|
||||||
let passedCount = 0;
|
|
||||||
let failedCount = 0;
|
|
||||||
const processingTimes: number[] = [];
|
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
|
||||||
// Load expected data
|
|
||||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(` Pages: ${images.length}`);
|
|
||||||
|
|
||||||
// Extract with consensus voting
|
|
||||||
const extracted = await extractWithConsensus(images, testCase.name);
|
|
||||||
|
|
||||||
const endTime = Date.now();
|
|
||||||
const elapsedMs = endTime - startTime;
|
|
||||||
processingTimes.push(elapsedMs);
|
|
||||||
|
|
||||||
// Compare results
|
|
||||||
const result = compareInvoice(extracted, expected);
|
|
||||||
|
|
||||||
if (result.match) {
|
|
||||||
passedCount++;
|
|
||||||
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
||||||
} else {
|
|
||||||
failedCount++;
|
|
||||||
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert match
|
|
||||||
expect(result.match).toBeTrue();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
|
||||||
const totalInvoices = testCases.length;
|
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
|
||||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
|
||||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
|
||||||
const avgTimeSec = avgTimeMs / 1000;
|
|
||||||
const totalTimeSec = totalTimeMs / 1000;
|
|
||||||
|
|
||||||
console.log(`\n========================================`);
|
|
||||||
console.log(` Invoice Extraction Summary`);
|
|
||||||
console.log(`========================================`);
|
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
|
||||||
console.log(`----------------------------------------`);
|
|
||||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
|
||||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
|
||||||
console.log(`========================================\n`);
|
|
||||||
});
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,8 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* Invoice extraction test using MiniCPM-V only (visual extraction)
|
* Invoice extraction test using MiniCPM-V (visual extraction)
|
||||||
*
|
*
|
||||||
* This tests MiniCPM-V's ability to extract invoice data directly from images
|
* Consensus approach:
|
||||||
* without any OCR augmentation.
|
* 1. Pass 1: Fast JSON extraction
|
||||||
|
* 2. Pass 2: Confirm with thinking enabled
|
||||||
|
* 3. If mismatch: repeat until consensus or max attempts
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -12,7 +14,7 @@ import * as os from 'os';
|
|||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const MODEL = 'minicpm-v:latest';
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
|
|
||||||
interface IInvoice {
|
interface IInvoice {
|
||||||
invoice_number: string;
|
invoice_number: string;
|
||||||
@@ -24,28 +26,6 @@ interface IInvoice {
|
|||||||
total_amount: number;
|
total_amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Build extraction prompt (MiniCPM-V only, no OCR augmentation)
|
|
||||||
*/
|
|
||||||
function buildPrompt(): string {
|
|
||||||
return `/nothink
|
|
||||||
You are an invoice parser. Extract the following fields from this invoice:
|
|
||||||
|
|
||||||
1. invoice_number: The invoice/receipt number
|
|
||||||
2. invoice_date: Date in YYYY-MM-DD format
|
|
||||||
3. vendor_name: Company that issued the invoice
|
|
||||||
4. currency: EUR, USD, etc.
|
|
||||||
5. net_amount: Amount before tax (if shown)
|
|
||||||
6. vat_amount: Tax/VAT amount (if shown, 0 if reverse charge or no tax)
|
|
||||||
7. total_amount: Final amount due
|
|
||||||
|
|
||||||
Return ONLY valid JSON in this exact format:
|
|
||||||
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company Name","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}
|
|
||||||
|
|
||||||
If a field is not visible, use null for strings or 0 for numbers.
|
|
||||||
No explanation, just the JSON object.`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
*/
|
*/
|
||||||
@@ -55,7 +35,7 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
execSync(
|
execSync(
|
||||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
`convert -density 300 -quality 95 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||||
{ stdio: 'pipe' }
|
{ stdio: 'pipe' }
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -74,123 +54,288 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON object with these exact fields:
|
||||||
* Single extraction pass with MiniCPM-V
|
{
|
||||||
*/
|
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
||||||
async function extractOnce(images: string[], passNum: number): Promise<IInvoice> {
|
"invoice_date": "YYYY-MM-DD format",
|
||||||
const payload = {
|
"vendor_name": "company that issued the invoice",
|
||||||
model: MODEL,
|
"currency": "EUR, USD, or GBP",
|
||||||
prompt: buildPrompt(),
|
"net_amount": 0.00,
|
||||||
images,
|
"vat_amount": 0.00,
|
||||||
stream: true,
|
"total_amount": 0.00
|
||||||
options: {
|
}
|
||||||
num_predict: 2048,
|
Return only the JSON, no explanation.`;
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
/**
|
||||||
|
* Query MiniCPM-V for JSON output (fast, no thinking)
|
||||||
|
*/
|
||||||
|
async function queryJsonFast(images: string[]): Promise<string> {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
body: JSON.stringify(payload),
|
body: JSON.stringify({
|
||||||
|
model: MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: JSON_PROMPT,
|
||||||
|
images: images,
|
||||||
|
}],
|
||||||
|
stream: false,
|
||||||
|
options: {
|
||||||
|
num_predict: 1000,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
}),
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
const data = await response.json();
|
||||||
if (!reader) {
|
return (data.message?.content || '').trim();
|
||||||
throw new Error('No response body');
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query MiniCPM-V for JSON output with thinking enabled (slower, more accurate)
|
||||||
|
*/
|
||||||
|
async function queryJsonWithThinking(images: string[]): Promise<string> {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: MODEL,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: `Think carefully about this invoice image, then ${JSON_PROMPT}`,
|
||||||
|
images: images,
|
||||||
|
}],
|
||||||
|
stream: false,
|
||||||
|
options: {
|
||||||
|
num_predict: 2000,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
const data = await response.json();
|
||||||
let fullText = '';
|
return (data.message?.content || '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
while (true) {
|
/**
|
||||||
const { done, value } = await reader.read();
|
* Parse amount from string (handles European format)
|
||||||
if (done) break;
|
*/
|
||||||
|
function parseAmount(s: string | number | undefined): number {
|
||||||
|
if (s === undefined || s === null) return 0;
|
||||||
|
if (typeof s === 'number') return s;
|
||||||
|
const match = s.match(/([\d.,]+)/);
|
||||||
|
if (!match) return 0;
|
||||||
|
const numStr = match[1];
|
||||||
|
// Handle European format: 1.234,56 → 1234.56
|
||||||
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||||
|
? numStr.replace(/\./g, '').replace(',', '.')
|
||||||
|
: numStr.replace(/,/g, '');
|
||||||
|
return parseFloat(normalized) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
/**
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
* Extract invoice number from potentially verbose response
|
||||||
|
*/
|
||||||
|
function extractInvoiceNumber(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const patterns = [
|
||||||
|
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
|
||||||
|
/\b([A-Z]\d{8,})\b/i, // R0014359508
|
||||||
|
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
|
||||||
|
/\b(\d{7,})\b/, // 1579087430
|
||||||
|
];
|
||||||
|
for (const pattern of patterns) {
|
||||||
|
const match = clean.match(pattern);
|
||||||
|
if (match) return match[1];
|
||||||
|
}
|
||||||
|
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
|
||||||
|
}
|
||||||
|
|
||||||
for (const line of lines) {
|
/**
|
||||||
|
* Extract date (YYYY-MM-DD) from response
|
||||||
|
*/
|
||||||
|
function extractDate(s: string | undefined): string {
|
||||||
|
if (!s) return '';
|
||||||
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||||
|
if (isoMatch) return isoMatch[1];
|
||||||
|
// Try DD/MM/YYYY or DD.MM.YYYY
|
||||||
|
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||||
|
if (dmyMatch) {
|
||||||
|
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||||
|
}
|
||||||
|
return clean.replace(/[^\d-]/g, '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract currency
|
||||||
|
*/
|
||||||
|
function extractCurrency(s: string | undefined): string {
|
||||||
|
if (!s) return 'EUR';
|
||||||
|
const upper = s.toUpperCase();
|
||||||
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||||
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||||
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||||
|
return 'EUR';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract JSON from response (handles markdown code blocks)
|
||||||
|
*/
|
||||||
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||||
|
// Try to find JSON in markdown code block
|
||||||
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
|
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON object pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
try {
|
try {
|
||||||
const json = JSON.parse(line);
|
return JSON.parse(jsonMatch[0]);
|
||||||
if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
}
|
|
||||||
} catch {
|
} catch {
|
||||||
// Skip invalid JSON lines
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract JSON from response
|
|
||||||
const startIdx = fullText.indexOf('{');
|
|
||||||
const endIdx = fullText.lastIndexOf('}') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
|
||||||
return JSON.parse(jsonStr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a hash of invoice for comparison (using key fields)
|
* Parse JSON response into IInvoice
|
||||||
*/
|
*/
|
||||||
function hashInvoice(invoice: IInvoice): string {
|
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${invoice.total_amount.toFixed(2)}`;
|
const parsed = extractJsonFromResponse(response);
|
||||||
|
if (!parsed) return null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||||
|
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||||
|
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||||
|
currency: extractCurrency(String(parsed.currency || '')),
|
||||||
|
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||||
|
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||||
|
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract with consensus voting using MiniCPM-V only
|
* Compare two invoices for consensus (key fields must match)
|
||||||
*/
|
*/
|
||||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
|
||||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
|
||||||
const hashCounts: Map<string, number> = new Map();
|
const dateMatch = a.invoice_date === b.invoice_date;
|
||||||
|
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
|
||||||
|
return numMatch && dateMatch && totalMatch;
|
||||||
|
}
|
||||||
|
|
||||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
/**
|
||||||
const hash = hashInvoice(invoice);
|
* Extract invoice data using consensus approach:
|
||||||
results.push({ invoice, hash });
|
* 1. Pass 1: Fast JSON extraction
|
||||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
* 2. Pass 2: Confirm with thinking enabled
|
||||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
* 3. If mismatch: repeat until consensus or max 5 attempts
|
||||||
return hashCounts.get(hash)!;
|
*/
|
||||||
|
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||||
|
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (consensus)`);
|
||||||
|
|
||||||
|
const MAX_ATTEMPTS = 5;
|
||||||
|
let attempt = 0;
|
||||||
|
|
||||||
|
while (attempt < MAX_ATTEMPTS) {
|
||||||
|
attempt++;
|
||||||
|
console.log(` [Attempt ${attempt}/${MAX_ATTEMPTS}]`);
|
||||||
|
|
||||||
|
// PASS 1: Fast JSON extraction
|
||||||
|
console.log(` [Pass 1] Fast extraction...`);
|
||||||
|
const fastResponse = await queryJsonFast(images);
|
||||||
|
const fastInvoice = parseJsonToInvoice(fastResponse);
|
||||||
|
|
||||||
|
if (!fastInvoice) {
|
||||||
|
console.log(` [Pass 1] JSON parsing failed, retrying...`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
console.log(` [Pass 1] Result: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount} ${fastInvoice.currency}`);
|
||||||
|
|
||||||
|
// PASS 2: Confirm with thinking
|
||||||
|
console.log(` [Pass 2] Thinking confirmation...`);
|
||||||
|
const thinkResponse = await queryJsonWithThinking(images);
|
||||||
|
const thinkInvoice = parseJsonToInvoice(thinkResponse);
|
||||||
|
|
||||||
|
if (!thinkInvoice) {
|
||||||
|
console.log(` [Pass 2] JSON parsing failed, retrying...`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
console.log(` [Pass 2] Result: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount} ${thinkInvoice.currency}`);
|
||||||
|
|
||||||
|
// Check consensus
|
||||||
|
if (invoicesMatch(fastInvoice, thinkInvoice)) {
|
||||||
|
console.log(` [Consensus] MATCH - using result`);
|
||||||
|
return thinkInvoice; // Prefer thinking result
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [Consensus] MISMATCH - repeating...`);
|
||||||
|
console.log(` Fast: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount}`);
|
||||||
|
console.log(` Think: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max attempts reached - do one final thinking pass and use that
|
||||||
|
console.log(` [Final] Max attempts reached, using final thinking pass`);
|
||||||
|
const finalResponse = await queryJsonWithThinking(images);
|
||||||
|
const finalInvoice = parseJsonToInvoice(finalResponse);
|
||||||
|
|
||||||
|
if (finalInvoice) {
|
||||||
|
console.log(` [Final] Result: ${finalInvoice.invoice_number} | ${finalInvoice.invoice_date} | ${finalInvoice.total_amount} ${finalInvoice.currency}`);
|
||||||
|
return finalInvoice;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return empty invoice if all else fails
|
||||||
|
console.log(` [Final] All parsing failed, returning empty`);
|
||||||
|
return {
|
||||||
|
invoice_number: '',
|
||||||
|
invoice_date: '',
|
||||||
|
vendor_name: '',
|
||||||
|
currency: 'EUR',
|
||||||
|
net_amount: 0,
|
||||||
|
vat_amount: 0,
|
||||||
|
total_amount: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize date to YYYY-MM-DD
|
||||||
|
*/
|
||||||
|
function normalizeDate(dateStr: string | null): string {
|
||||||
|
if (!dateStr) return '';
|
||||||
|
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||||
|
|
||||||
|
const monthMap: Record<string, string> = {
|
||||||
|
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||||
|
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||||
};
|
};
|
||||||
|
|
||||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||||
try {
|
if (match) {
|
||||||
const invoice = await extractOnce(images, pass);
|
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||||
const count = addResult(invoice, `Pass ${pass}`);
|
|
||||||
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
|
||||||
return invoice;
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// No consensus reached - return the most common result
|
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||||
let bestHash = '';
|
if (match) {
|
||||||
let bestCount = 0;
|
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||||
for (const [hash, count] of hashCounts) {
|
|
||||||
if (count > bestCount) {
|
|
||||||
bestCount = count;
|
|
||||||
bestHash = hash;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bestHash) {
|
return dateStr;
|
||||||
throw new Error(`No valid results for ${invoiceName}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const best = results.find((r) => r.hash === bestHash)!;
|
|
||||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
|
||||||
return best.invoice;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -210,7 +355,7 @@ function compareInvoice(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compare date
|
// Compare date
|
||||||
if (extracted.invoice_date !== expected.invoice_date) {
|
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,9 +397,7 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort alphabetically
|
|
||||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
|
|
||||||
return testCases;
|
return testCases;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,24 +405,20 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
|||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
tap.test('setup: ensure Docker containers are running', async () => {
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
console.log('\n[Setup] Checking Docker containers...\n');
|
||||||
|
|
||||||
// Ensure MiniCPM is running
|
|
||||||
const minicpmOk = await ensureMiniCpm();
|
const minicpmOk = await ensureMiniCpm();
|
||||||
expect(minicpmOk).toBeTrue();
|
expect(minicpmOk).toBeTrue();
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
|
||||||
});
|
});
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V only)\n`);
|
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V)\n`);
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
let failedCount = 0;
|
let failedCount = 0;
|
||||||
@@ -287,25 +426,20 @@ const processingTimes: number[] = [];
|
|||||||
|
|
||||||
for (const testCase of testCases) {
|
for (const testCase of testCases) {
|
||||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
||||||
// Load expected data
|
|
||||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
console.log(`\n=== ${testCase.name} ===`);
|
||||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||||
|
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
const images = convertPdfToImages(testCase.pdfPath);
|
||||||
console.log(` Pages: ${images.length}`);
|
console.log(` Pages: ${images.length}`);
|
||||||
|
|
||||||
// Extract with consensus voting (MiniCPM-V only)
|
const extracted = await extractInvoiceFromImages(images);
|
||||||
const extracted = await extractWithConsensus(images, testCase.name);
|
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||||
|
|
||||||
const endTime = Date.now();
|
const elapsedMs = Date.now() - startTime;
|
||||||
const elapsedMs = endTime - startTime;
|
|
||||||
processingTimes.push(elapsedMs);
|
processingTimes.push(elapsedMs);
|
||||||
|
|
||||||
// Compare results
|
|
||||||
const result = compareInvoice(extracted, expected);
|
const result = compareInvoice(extracted, expected);
|
||||||
|
|
||||||
if (result.match) {
|
if (result.match) {
|
||||||
@@ -317,7 +451,6 @@ for (const testCase of testCases) {
|
|||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert match
|
|
||||||
expect(result.match).toBeTrue();
|
expect(result.match).toBeTrue();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -326,18 +459,17 @@ tap.test('summary', async () => {
|
|||||||
const totalInvoices = testCases.length;
|
const totalInvoices = testCases.length;
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||||
const avgTimeSec = avgTimeMs / 1000;
|
|
||||||
const totalTimeSec = totalTimeMs / 1000;
|
|
||||||
|
|
||||||
console.log(`\n========================================`);
|
console.log(`\n========================================`);
|
||||||
console.log(` Invoice Extraction Summary (MiniCPM)`);
|
console.log(` Invoice Extraction Summary (${MODEL})`);
|
||||||
console.log(`========================================`);
|
console.log(`========================================`);
|
||||||
|
console.log(` Method: Consensus (fast + thinking)`);
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
console.log(`----------------------------------------`);
|
console.log(`----------------------------------------`);
|
||||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
||||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||||
console.log(`========================================\n`);
|
console.log(`========================================\n`);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,334 +0,0 @@
|
|||||||
/**
|
|
||||||
* Invoice extraction using Ministral 3 Vision (Direct)
|
|
||||||
*
|
|
||||||
* NO PaddleOCR needed - Ministral 3 has built-in vision encoder:
|
|
||||||
* 1. Convert PDF to images
|
|
||||||
* 2. Send images directly to Ministral 3 via Ollama
|
|
||||||
* 3. Extract structured JSON with native schema support
|
|
||||||
*
|
|
||||||
* This is the simplest possible pipeline.
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensureMinistral3 } from './helpers/docker.js';
|
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
const VISION_MODEL = 'ministral-3:8b';
|
|
||||||
|
|
||||||
interface IInvoice {
|
|
||||||
invoice_number: string;
|
|
||||||
invoice_date: string;
|
|
||||||
vendor_name: string;
|
|
||||||
currency: string;
|
|
||||||
net_amount: number;
|
|
||||||
vat_amount: number;
|
|
||||||
total_amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
// High quality conversion: 300 DPI, max quality, sharpen for better OCR
|
|
||||||
execSync(
|
|
||||||
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove -sharpen 0x1 "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract invoice data directly from images using Ministral 3 Vision
|
|
||||||
*/
|
|
||||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with Ministral 3`);
|
|
||||||
|
|
||||||
// JSON schema for structured output
|
|
||||||
const invoiceSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
invoice_number: { type: 'string' },
|
|
||||||
invoice_date: { type: 'string' },
|
|
||||||
vendor_name: { type: 'string' },
|
|
||||||
currency: { type: 'string' },
|
|
||||||
net_amount: { type: 'number' },
|
|
||||||
vat_amount: { type: 'number' },
|
|
||||||
total_amount: { type: 'number' },
|
|
||||||
},
|
|
||||||
required: ['invoice_number', 'invoice_date', 'vendor_name', 'currency', 'net_amount', 'vat_amount', 'total_amount'],
|
|
||||||
};
|
|
||||||
|
|
||||||
const prompt = `You are an expert invoice data extraction system. Carefully analyze this invoice document and extract the following fields with high precision.
|
|
||||||
|
|
||||||
INVOICE NUMBER:
|
|
||||||
- Look for labels: "Invoice No", "Invoice #", "Invoice Number", "Rechnung Nr", "Rechnungsnummer", "Document No", "Bill No", "Reference"
|
|
||||||
- Usually alphanumeric, often starts with letters (e.g., R0014359508, INV-2024-001)
|
|
||||||
- Located near the top of the invoice
|
|
||||||
|
|
||||||
INVOICE DATE:
|
|
||||||
- Look for labels: "Invoice Date", "Date", "Datum", "Rechnungsdatum", "Issue Date", "Bill Date"
|
|
||||||
- Convert ANY date format to YYYY-MM-DD (e.g., 14/10/2021 → 2021-10-14, Oct 14, 2021 → 2021-10-14)
|
|
||||||
- Usually near the invoice number
|
|
||||||
|
|
||||||
VENDOR NAME:
|
|
||||||
- The company ISSUING the invoice (not the recipient)
|
|
||||||
- Found in letterhead, logo area, or header - typically the largest/most prominent company name
|
|
||||||
- Examples: "Hetzner Online GmbH", "Adobe Inc", "DigitalOcean LLC"
|
|
||||||
|
|
||||||
CURRENCY:
|
|
||||||
- Detect from symbols: € = EUR, $ = USD, £ = GBP
|
|
||||||
- Or from text: "EUR", "USD", "GBP"
|
|
||||||
- Default to EUR if unclear
|
|
||||||
|
|
||||||
AMOUNTS (Critical - read carefully!):
|
|
||||||
- total_amount: The FINAL amount due/payable - look for "Total", "Grand Total", "Amount Due", "Balance Due", "Gesamtbetrag", "Endbetrag"
|
|
||||||
- net_amount: Subtotal BEFORE tax - look for "Subtotal", "Net", "Netto", "excl. VAT"
|
|
||||||
- vat_amount: Tax amount - look for "VAT", "Tax", "MwSt", "USt", "19%", "20%"
|
|
||||||
- For multi-page invoices: the FINAL totals are usually on the LAST page
|
|
||||||
|
|
||||||
Return ONLY valid JSON with the extracted values.`;
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: VISION_MODEL,
|
|
||||||
messages: [
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: prompt,
|
|
||||||
images: images, // Send all page images
|
|
||||||
},
|
|
||||||
],
|
|
||||||
format: invoiceSchema,
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_predict: 1024,
|
|
||||||
temperature: 0.0,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.message?.content) {
|
|
||||||
fullText += json.message.content;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON response
|
|
||||||
let jsonStr = fullText.trim();
|
|
||||||
|
|
||||||
if (jsonStr.startsWith('```json')) jsonStr = jsonStr.slice(7);
|
|
||||||
else if (jsonStr.startsWith('```')) jsonStr = jsonStr.slice(3);
|
|
||||||
if (jsonStr.endsWith('```')) jsonStr = jsonStr.slice(0, -3);
|
|
||||||
jsonStr = jsonStr.trim();
|
|
||||||
|
|
||||||
const startIdx = jsonStr.indexOf('{');
|
|
||||||
const endIdx = jsonStr.lastIndexOf('}') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON found: ${fullText.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const parsed = JSON.parse(jsonStr.substring(startIdx, endIdx));
|
|
||||||
|
|
||||||
return {
|
|
||||||
invoice_number: parsed.invoice_number || null,
|
|
||||||
invoice_date: parsed.invoice_date || null,
|
|
||||||
vendor_name: parsed.vendor_name || null,
|
|
||||||
currency: parsed.currency || 'EUR',
|
|
||||||
net_amount: parseFloat(parsed.net_amount) || 0,
|
|
||||||
vat_amount: parseFloat(parsed.vat_amount) || 0,
|
|
||||||
total_amount: parseFloat(parsed.total_amount) || 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Normalize date to YYYY-MM-DD
|
|
||||||
*/
|
|
||||||
function normalizeDate(dateStr: string | null): string {
|
|
||||||
if (!dateStr) return '';
|
|
||||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
|
||||||
|
|
||||||
const monthMap: Record<string, string> = {
|
|
||||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
|
||||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
|
||||||
};
|
|
||||||
|
|
||||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
|
||||||
if (match) {
|
|
||||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
|
||||||
if (match) {
|
|
||||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dateStr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare extracted vs expected
|
|
||||||
*/
|
|
||||||
function compareInvoice(extracted: IInvoice, expected: IInvoice): { match: boolean; errors: string[] } {
|
|
||||||
const errors: string[] = [];
|
|
||||||
|
|
||||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
if (extNum !== expNum) {
|
|
||||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
|
||||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
|
||||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
|
||||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return { match: errors.length === 0, errors };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find test cases
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
|
||||||
if (!fs.existsSync(testDir)) return [];
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Ministral 3 is running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Ministral 3...\n');
|
|
||||||
const ok = await ensureMinistral3();
|
|
||||||
expect(ok).toBeTrue();
|
|
||||||
console.log('\n[Setup] Ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
const testCases = findTestCases();
|
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases (Ministral 3 Vision Direct)\n`);
|
|
||||||
|
|
||||||
let passedCount = 0;
|
|
||||||
let failedCount = 0;
|
|
||||||
const times: number[] = [];
|
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
|
||||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
|
||||||
|
|
||||||
const start = Date.now();
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(` Pages: ${images.length}`);
|
|
||||||
|
|
||||||
const extracted = await extractInvoiceFromImages(images);
|
|
||||||
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
|
||||||
const elapsed = Date.now() - start;
|
|
||||||
times.push(elapsed);
|
|
||||||
|
|
||||||
const result = compareInvoice(extracted, expected);
|
|
||||||
|
|
||||||
if (result.match) {
|
|
||||||
passedCount++;
|
|
||||||
console.log(` Result: MATCH (${(elapsed / 1000).toFixed(1)}s)`);
|
|
||||||
} else {
|
|
||||||
failedCount++;
|
|
||||||
console.log(` Result: MISMATCH (${(elapsed / 1000).toFixed(1)}s)`);
|
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(result.match).toBeTrue();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
|
||||||
const total = testCases.length;
|
|
||||||
const accuracy = total > 0 ? (passedCount / total) * 100 : 0;
|
|
||||||
const totalTime = times.reduce((a, b) => a + b, 0) / 1000;
|
|
||||||
const avgTime = times.length > 0 ? totalTime / times.length : 0;
|
|
||||||
|
|
||||||
console.log(`\n======================================================`);
|
|
||||||
console.log(` Invoice Extraction Summary (Ministral 3 Vision)`);
|
|
||||||
console.log(`======================================================`);
|
|
||||||
console.log(` Method: Ministral 3 8B Vision (Direct)`);
|
|
||||||
console.log(` Passed: ${passedCount}/${total}`);
|
|
||||||
console.log(` Failed: ${failedCount}/${total}`);
|
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
|
||||||
console.log(`------------------------------------------------------`);
|
|
||||||
console.log(` Total time: ${totalTime.toFixed(1)}s`);
|
|
||||||
console.log(` Avg per inv: ${avgTime.toFixed(1)}s`);
|
|
||||||
console.log(`======================================================\n`);
|
|
||||||
});
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,490 +0,0 @@
|
|||||||
/**
|
|
||||||
* Invoice extraction test using PaddleOCR-VL Full Pipeline
|
|
||||||
*
|
|
||||||
* This tests the complete PaddleOCR-VL pipeline:
|
|
||||||
* 1. PP-DocLayoutV2 for layout detection
|
|
||||||
* 2. PaddleOCR-VL for recognition
|
|
||||||
* 3. Structured HTML output (semantic tags with proper tables)
|
|
||||||
* 4. Qwen2.5 extracts invoice fields from structured HTML
|
|
||||||
*
|
|
||||||
* HTML output is used instead of Markdown because:
|
|
||||||
* - <table> tags are unambiguous (no parser variations)
|
|
||||||
* - LLMs are heavily trained on web/HTML data
|
|
||||||
* - Semantic tags (header, footer, section) provide clear structure
|
|
||||||
*/
|
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { ensurePaddleOcrVlFull, ensureQwen25 } from './helpers/docker.js';
|
|
||||||
|
|
||||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
|
||||||
// Use Qwen2.5 for text-only JSON extraction (not MiniCPM which is vision-focused)
|
|
||||||
const TEXT_MODEL = 'qwen2.5:7b';
|
|
||||||
|
|
||||||
interface IInvoice {
|
|
||||||
invoice_number: string;
|
|
||||||
invoice_date: string;
|
|
||||||
vendor_name: string;
|
|
||||||
currency: string;
|
|
||||||
net_amount: number;
|
|
||||||
vat_amount: number;
|
|
||||||
total_amount: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert PDF to PNG images using ImageMagick
|
|
||||||
*/
|
|
||||||
function convertPdfToImages(pdfPath: string): string[] {
|
|
||||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
||||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
|
||||||
|
|
||||||
try {
|
|
||||||
execSync(
|
|
||||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
||||||
{ stdio: 'pipe' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
|
||||||
const images: string[] = [];
|
|
||||||
|
|
||||||
for (const file of files) {
|
|
||||||
const imagePath = path.join(tempDir, file);
|
|
||||||
const imageData = fs.readFileSync(imagePath);
|
|
||||||
images.push(imageData.toString('base64'));
|
|
||||||
}
|
|
||||||
|
|
||||||
return images;
|
|
||||||
} finally {
|
|
||||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse document using PaddleOCR-VL Full Pipeline (returns structured HTML)
|
|
||||||
*/
|
|
||||||
async function parseDocument(imageBase64: string): Promise<string> {
|
|
||||||
const response = await fetch(`${PADDLEOCR_VL_URL}/parse`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
image: imageBase64,
|
|
||||||
output_format: 'html',
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
const text = await response.text();
|
|
||||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
|
|
||||||
if (!data.success) {
|
|
||||||
throw new Error(`PaddleOCR-VL error: ${data.error}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return data.result?.html || '';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract invoice fields using simple direct prompt
|
|
||||||
* The OCR output has clearly labeled fields - just ask the LLM to read them
|
|
||||||
*/
|
|
||||||
async function extractInvoiceFromHtml(html: string): Promise<IInvoice> {
|
|
||||||
// OCR output is already good - just truncate if too long
|
|
||||||
const truncated = html.length > 32000 ? html.slice(0, 32000) : html;
|
|
||||||
console.log(` [Extract] ${truncated.length} chars of HTML`);
|
|
||||||
|
|
||||||
// JSON schema for structured output
|
|
||||||
const invoiceSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
invoice_number: { type: 'string' },
|
|
||||||
invoice_date: { type: 'string' },
|
|
||||||
vendor_name: { type: 'string' },
|
|
||||||
currency: { type: 'string' },
|
|
||||||
net_amount: { type: 'number' },
|
|
||||||
vat_amount: { type: 'number' },
|
|
||||||
total_amount: { type: 'number' },
|
|
||||||
},
|
|
||||||
required: ['invoice_number', 'invoice_date', 'vendor_name', 'currency', 'net_amount', 'vat_amount', 'total_amount'],
|
|
||||||
};
|
|
||||||
|
|
||||||
// Simple, direct prompt - the OCR output already has labeled fields
|
|
||||||
const systemPrompt = `You read invoice HTML and extract labeled fields. Return JSON only.`;
|
|
||||||
|
|
||||||
const userPrompt = `Extract from this invoice HTML:
|
|
||||||
- invoice_number: Find "Invoice no.", "Invoice #", "Invoice", "Rechnung", "Document No" and extract the value
|
|
||||||
- invoice_date: Find "Invoice date", "Date", "Datum" and convert to YYYY-MM-DD format
|
|
||||||
- vendor_name: The company name issuing the invoice (in header/letterhead)
|
|
||||||
- currency: EUR, USD, or GBP (look for € $ £ symbols or text)
|
|
||||||
- total_amount: Find "Total", "Grand Total", "Amount Due", "Gesamtbetrag" - the FINAL total amount
|
|
||||||
- net_amount: Amount before VAT/tax (Subtotal, Net)
|
|
||||||
- vat_amount: VAT/tax amount
|
|
||||||
|
|
||||||
HTML:
|
|
||||||
${truncated}
|
|
||||||
|
|
||||||
Return ONLY valid JSON: {"invoice_number":"...", "invoice_date":"YYYY-MM-DD", "vendor_name":"...", "currency":"EUR", "net_amount":0, "vat_amount":0, "total_amount":0}`;
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: TEXT_MODEL,
|
|
||||||
messages: [
|
|
||||||
{ role: 'system', content: systemPrompt },
|
|
||||||
{ role: 'user', content: userPrompt },
|
|
||||||
],
|
|
||||||
format: invoiceSchema,
|
|
||||||
stream: true,
|
|
||||||
options: { num_predict: 512, temperature: 0.0 },
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
|
||||||
if (!reader) {
|
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.message?.content) {
|
|
||||||
fullText += json.message.content;
|
|
||||||
} else if (json.response) {
|
|
||||||
fullText += json.response;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract JSON from response
|
|
||||||
let jsonStr = fullText.trim();
|
|
||||||
|
|
||||||
// Remove markdown code block if present
|
|
||||||
if (jsonStr.startsWith('```json')) {
|
|
||||||
jsonStr = jsonStr.slice(7);
|
|
||||||
} else if (jsonStr.startsWith('```')) {
|
|
||||||
jsonStr = jsonStr.slice(3);
|
|
||||||
}
|
|
||||||
if (jsonStr.endsWith('```')) {
|
|
||||||
jsonStr = jsonStr.slice(0, -3);
|
|
||||||
}
|
|
||||||
jsonStr = jsonStr.trim();
|
|
||||||
|
|
||||||
// Find JSON object boundaries
|
|
||||||
const startIdx = jsonStr.indexOf('{');
|
|
||||||
const endIdx = jsonStr.lastIndexOf('}') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonStr = jsonStr.substring(startIdx, endIdx);
|
|
||||||
|
|
||||||
let parsed;
|
|
||||||
try {
|
|
||||||
parsed = JSON.parse(jsonStr);
|
|
||||||
} catch (e) {
|
|
||||||
throw new Error(`Invalid JSON: ${jsonStr.substring(0, 200)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize response to expected format
|
|
||||||
return {
|
|
||||||
invoice_number: parsed.invoice_number || null,
|
|
||||||
invoice_date: parsed.invoice_date || null,
|
|
||||||
vendor_name: parsed.vendor_name || null,
|
|
||||||
currency: parsed.currency || 'EUR',
|
|
||||||
net_amount: parseFloat(parsed.net_amount) || 0,
|
|
||||||
vat_amount: parseFloat(parsed.vat_amount) || 0,
|
|
||||||
total_amount: parseFloat(parsed.total_amount) || 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Single extraction pass: Parse with PaddleOCR-VL Full, extract with Qwen2.5 (text-only)
|
|
||||||
* Processes ALL pages and concatenates HTML for multi-page invoice support
|
|
||||||
*/
|
|
||||||
async function extractOnce(images: string[], passNum: number): Promise<IInvoice> {
|
|
||||||
// Parse ALL pages and concatenate HTML with page markers
|
|
||||||
const htmlParts: string[] = [];
|
|
||||||
|
|
||||||
for (let i = 0; i < images.length; i++) {
|
|
||||||
const pageHtml = await parseDocument(images[i]);
|
|
||||||
// Add page marker for context
|
|
||||||
htmlParts.push(`<!-- Page ${i + 1} -->\n${pageHtml}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const fullHtml = htmlParts.join('\n\n');
|
|
||||||
console.log(` [Parse] Got ${fullHtml.split('\n').length} lines from ${images.length} page(s)`);
|
|
||||||
|
|
||||||
// Extract invoice fields from HTML using text-only model (no images)
|
|
||||||
return extractInvoiceFromHtml(fullHtml);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a hash of invoice for comparison (using key fields)
|
|
||||||
*/
|
|
||||||
function hashInvoice(invoice: IInvoice): string {
|
|
||||||
// Ensure total_amount is a number
|
|
||||||
const amount = typeof invoice.total_amount === 'number'
|
|
||||||
? invoice.total_amount.toFixed(2)
|
|
||||||
: String(invoice.total_amount || 0);
|
|
||||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${amount}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract with consensus voting
|
|
||||||
*/
|
|
||||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
|
||||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
|
||||||
const hashCounts: Map<string, number> = new Map();
|
|
||||||
|
|
||||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
|
||||||
const hash = hashInvoice(invoice);
|
|
||||||
results.push({ invoice, hash });
|
|
||||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
|
||||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
|
||||||
return hashCounts.get(hash)!;
|
|
||||||
};
|
|
||||||
|
|
||||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
|
||||||
try {
|
|
||||||
const invoice = await extractOnce(images, pass);
|
|
||||||
const count = addResult(invoice, `Pass ${pass}`);
|
|
||||||
|
|
||||||
if (count >= 2) {
|
|
||||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
|
||||||
return invoice;
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No consensus reached - return the most common result
|
|
||||||
let bestHash = '';
|
|
||||||
let bestCount = 0;
|
|
||||||
for (const [hash, count] of hashCounts) {
|
|
||||||
if (count > bestCount) {
|
|
||||||
bestCount = count;
|
|
||||||
bestHash = hash;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!bestHash) {
|
|
||||||
throw new Error(`No valid results for ${invoiceName}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const best = results.find((r) => r.hash === bestHash)!;
|
|
||||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
|
||||||
return best.invoice;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Normalize date to YYYY-MM-DD format
|
|
||||||
*/
|
|
||||||
function normalizeDate(dateStr: string | null): string {
|
|
||||||
if (!dateStr) return '';
|
|
||||||
|
|
||||||
// Already in correct format
|
|
||||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) {
|
|
||||||
return dateStr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle DD-MMM-YYYY format (e.g., "28-JUN-2022")
|
|
||||||
const monthMap: Record<string, string> = {
|
|
||||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
|
||||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
|
||||||
};
|
|
||||||
|
|
||||||
const match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
|
||||||
if (match) {
|
|
||||||
const day = match[1].padStart(2, '0');
|
|
||||||
const month = monthMap[match[2].toUpperCase()] || '01';
|
|
||||||
const year = match[3];
|
|
||||||
return `${year}-${month}-${day}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle DD/MM/YYYY or DD.MM.YYYY
|
|
||||||
const match2 = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
|
||||||
if (match2) {
|
|
||||||
const day = match2[1].padStart(2, '0');
|
|
||||||
const month = match2[2].padStart(2, '0');
|
|
||||||
const year = match2[3];
|
|
||||||
return `${year}-${month}-${day}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dateStr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compare extracted invoice against expected
|
|
||||||
*/
|
|
||||||
function compareInvoice(
|
|
||||||
extracted: IInvoice,
|
|
||||||
expected: IInvoice
|
|
||||||
): { match: boolean; errors: string[] } {
|
|
||||||
const errors: string[] = [];
|
|
||||||
|
|
||||||
// Compare invoice number (normalize by removing spaces and case)
|
|
||||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
|
||||||
if (extNum !== expNum) {
|
|
||||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare date (normalize format first)
|
|
||||||
const extDate = normalizeDate(extracted.invoice_date);
|
|
||||||
const expDate = normalizeDate(expected.invoice_date);
|
|
||||||
if (extDate !== expDate) {
|
|
||||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare total amount (with tolerance)
|
|
||||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
|
||||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare currency
|
|
||||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
|
||||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return { match: errors.length === 0, errors };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
|
|
||||||
*/
|
|
||||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
|
||||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
|
||||||
if (!fs.existsSync(testDir)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const files = fs.readdirSync(testDir);
|
|
||||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
|
||||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
|
||||||
|
|
||||||
for (const pdf of pdfFiles) {
|
|
||||||
const baseName = pdf.replace('.pdf', '');
|
|
||||||
const jsonFile = `${baseName}.json`;
|
|
||||||
if (files.includes(jsonFile)) {
|
|
||||||
testCases.push({
|
|
||||||
name: baseName,
|
|
||||||
pdfPath: path.join(testDir, pdf),
|
|
||||||
jsonPath: path.join(testDir, jsonFile),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort alphabetically
|
|
||||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
|
||||||
|
|
||||||
return testCases;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests
|
|
||||||
|
|
||||||
tap.test('setup: ensure Docker containers are running', async () => {
|
|
||||||
console.log('\n[Setup] Checking Docker containers...\n');
|
|
||||||
|
|
||||||
// Ensure PaddleOCR-VL Full Pipeline is running
|
|
||||||
const paddleOk = await ensurePaddleOcrVlFull();
|
|
||||||
expect(paddleOk).toBeTrue();
|
|
||||||
|
|
||||||
// Ensure Qwen2.5 is available (for text-only JSON extraction)
|
|
||||||
const qwenOk = await ensureQwen25();
|
|
||||||
expect(qwenOk).toBeTrue();
|
|
||||||
|
|
||||||
console.log('\n[Setup] All containers ready!\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
// Dynamic test for each PDF/JSON pair
|
|
||||||
const testCases = findTestCases();
|
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases (PaddleOCR-VL Full Pipeline)\n`);
|
|
||||||
|
|
||||||
let passedCount = 0;
|
|
||||||
let failedCount = 0;
|
|
||||||
const processingTimes: number[] = [];
|
|
||||||
|
|
||||||
for (const testCase of testCases) {
|
|
||||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
|
||||||
// Load expected data
|
|
||||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
|
||||||
console.log(`\n=== ${testCase.name} ===`);
|
|
||||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
// Convert PDF to images
|
|
||||||
const images = convertPdfToImages(testCase.pdfPath);
|
|
||||||
console.log(` Pages: ${images.length}`);
|
|
||||||
|
|
||||||
// Extract with consensus voting (PaddleOCR-VL Full -> MiniCPM)
|
|
||||||
const extracted = await extractWithConsensus(images, testCase.name);
|
|
||||||
|
|
||||||
const endTime = Date.now();
|
|
||||||
const elapsedMs = endTime - startTime;
|
|
||||||
processingTimes.push(elapsedMs);
|
|
||||||
|
|
||||||
// Compare results
|
|
||||||
const result = compareInvoice(extracted, expected);
|
|
||||||
|
|
||||||
if (result.match) {
|
|
||||||
passedCount++;
|
|
||||||
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
||||||
} else {
|
|
||||||
failedCount++;
|
|
||||||
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
||||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert match
|
|
||||||
expect(result.match).toBeTrue();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
|
||||||
const totalInvoices = testCases.length;
|
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
|
||||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
|
||||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
|
||||||
const avgTimeSec = avgTimeMs / 1000;
|
|
||||||
const totalTimeSec = totalTimeMs / 1000;
|
|
||||||
|
|
||||||
console.log(`\n======================================================`);
|
|
||||||
console.log(` Invoice Extraction Summary (PaddleOCR-VL Full)`);
|
|
||||||
console.log(`======================================================`);
|
|
||||||
console.log(` Method: PaddleOCR-VL Full Pipeline (HTML) -> Qwen2.5 (text-only)`);
|
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
|
||||||
console.log(`------------------------------------------------------`);
|
|
||||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
|
||||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
|
||||||
console.log(`======================================================\n`);
|
|
||||||
});
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -1,18 +1,15 @@
|
|||||||
/**
|
/**
|
||||||
* Invoice extraction using Qwen3-VL-8B Vision (Direct)
|
* Invoice extraction using Qwen3-VL 8B Vision (Direct)
|
||||||
*
|
*
|
||||||
* Qwen3-VL 8B is a capable vision-language model that fits in 15GB VRAM:
|
* Multi-query approach: 5 parallel simple queries to avoid token exhaustion.
|
||||||
* - Q4_K_M quantization (~5GB)
|
* Single pass, no consensus voting.
|
||||||
* - Good balance of speed and accuracy
|
|
||||||
*
|
|
||||||
* Pipeline: PDF → Images → Qwen3-VL → JSON
|
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureQwen3Vl } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const VISION_MODEL = 'qwen3-vl:8b';
|
const VISION_MODEL = 'qwen3-vl:8b';
|
||||||
@@ -57,25 +54,24 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Single extraction attempt
|
* Query Qwen3-VL for a single field
|
||||||
|
* Uses simple prompts to minimize thinking tokens
|
||||||
*/
|
*/
|
||||||
async function tryExtractOnce(images: string[], prompt: string): Promise<string> {
|
async function queryField(images: string[], question: string): Promise<string> {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
model: VISION_MODEL,
|
model: VISION_MODEL,
|
||||||
messages: [
|
messages: [{
|
||||||
{
|
role: 'user',
|
||||||
role: 'user',
|
content: `${question} Reply with just the value, nothing else.`,
|
||||||
content: prompt,
|
images: images,
|
||||||
images: images,
|
}],
|
||||||
},
|
stream: false,
|
||||||
],
|
|
||||||
stream: true,
|
|
||||||
options: {
|
options: {
|
||||||
num_predict: 1024,
|
num_predict: 500,
|
||||||
temperature: 0.1, // Slight randomness helps avoid stuck states
|
temperature: 0.1,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
@@ -84,126 +80,93 @@ async function tryExtractOnce(images: string[], prompt: string): Promise<string>
|
|||||||
throw new Error(`Ollama API error: ${response.status}`);
|
throw new Error(`Ollama API error: ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const reader = response.body?.getReader();
|
const data = await response.json();
|
||||||
if (!reader) {
|
return (data.message?.content || '').trim();
|
||||||
throw new Error('No response body');
|
|
||||||
}
|
|
||||||
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
let fullText = '';
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
if (json.message?.content) {
|
|
||||||
fullText += json.message.content;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Skip invalid JSON lines
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fullText;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract invoice data directly from images using Qwen3-VL Vision
|
* Extract invoice data using multiple simple queries
|
||||||
* Includes retry logic for empty responses
|
* Each query asks for 1-2 fields to minimize thinking tokens
|
||||||
|
* (Qwen3's thinking mode uses all tokens on complex prompts)
|
||||||
*/
|
*/
|
||||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL`);
|
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
|
||||||
|
|
||||||
// JSON schema for structured output - force the model to output valid JSON
|
// Query each field separately to avoid excessive thinking tokens
|
||||||
const invoiceSchema = {
|
// Use explicit questions to avoid confusion between similar fields
|
||||||
type: 'object',
|
// Log each result as it comes in (not waiting for all to complete)
|
||||||
properties: {
|
const queryAndLog = async (name: string, question: string): Promise<string> => {
|
||||||
invoice_number: { type: 'string' },
|
const result = await queryField(images, question);
|
||||||
invoice_date: { type: 'string' },
|
console.log(` [Query] ${name}: "${result}"`);
|
||||||
vendor_name: { type: 'string' },
|
return result;
|
||||||
currency: { type: 'string' },
|
|
||||||
net_amount: { type: 'number' },
|
|
||||||
vat_amount: { type: 'number' },
|
|
||||||
total_amount: { type: 'number' },
|
|
||||||
},
|
|
||||||
required: ['invoice_number', 'invoice_date', 'vendor_name', 'currency', 'net_amount', 'vat_amount', 'total_amount'],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simple, direct prompt - don't overthink, just read the labeled fields
|
const [invoiceNum, invoiceDate, vendor, currency, totalAmount, netAmount, vatAmount] = await Promise.all([
|
||||||
const prompt = `Extract invoice data from this image. Return JSON only.
|
queryAndLog('Invoice Number', 'What is the INVOICE NUMBER (not VAT number, not customer ID)? Look for "Invoice No", "Invoice #", "Rechnung Nr", "Facture". Just the number/code.'),
|
||||||
|
queryAndLog('Invoice Date ', 'What is the INVOICE DATE (not due date, not delivery date)? The date the invoice was issued. Format: YYYY-MM-DD'),
|
||||||
|
queryAndLog('Vendor ', 'What company ISSUED this invoice (the seller/vendor, not the buyer)? Look at the letterhead or "From" section.'),
|
||||||
|
queryAndLog('Currency ', 'What CURRENCY is used? Look for € (EUR), $ (USD), or £ (GBP). Answer with 3-letter code: EUR, USD, or GBP'),
|
||||||
|
queryAndLog('Total Amount ', 'What is the TOTAL AMOUNT INCLUDING TAX (the final amount to pay, with VAT/tax included)? Just the number, e.g. 24.99'),
|
||||||
|
queryAndLog('Net Amount ', 'What is the NET AMOUNT (subtotal before VAT/tax)? Just the number, e.g. 20.99'),
|
||||||
|
queryAndLog('VAT Amount ', 'What is the VAT/TAX AMOUNT? Just the number, e.g. 4.00'),
|
||||||
|
]);
|
||||||
|
|
||||||
Find these fields:
|
// Parse amount from string (handles European format)
|
||||||
- invoice_number: The invoice/document number
|
const parseAmount = (s: string): number => {
|
||||||
- invoice_date: Date in YYYY-MM-DD format
|
if (!s) return 0;
|
||||||
- vendor_name: Company issuing the invoice
|
// Extract number from the response
|
||||||
- currency: EUR, USD, or GBP
|
const match = s.match(/([\d.,]+)/);
|
||||||
- net_amount: Amount before tax
|
if (!match) return 0;
|
||||||
- vat_amount: Tax/VAT amount
|
const numStr = match[1];
|
||||||
- total_amount: Final total amount
|
// Handle European format: 1.234,56 → 1234.56
|
||||||
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||||
|
? numStr.replace(/\./g, '').replace(',', '.')
|
||||||
|
: numStr.replace(/,/g, '');
|
||||||
|
return parseFloat(normalized) || 0;
|
||||||
|
};
|
||||||
|
|
||||||
Return: {"invoice_number":"...", "invoice_date":"YYYY-MM-DD", "vendor_name":"...", "currency":"EUR", "net_amount":0.00, "vat_amount":0.00, "total_amount":0.00}`;
|
// Extract invoice number from potentially verbose response
|
||||||
|
const extractInvoiceNumber = (s: string): string => {
|
||||||
// Retry logic for empty responses (model sometimes returns nothing)
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
const MAX_RETRIES = 3;
|
// Look for common invoice number patterns
|
||||||
let fullText = '';
|
const patterns = [
|
||||||
|
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
|
||||||
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
/\b([A-Z]\d{8,})\b/i, // R0014359508
|
||||||
fullText = await tryExtractOnce(images, prompt);
|
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
|
||||||
|
/\b(\d{7,})\b/, // 1579087430
|
||||||
if (fullText.trim().length > 0) {
|
];
|
||||||
console.log(` [Attempt ${attempt}] Got ${fullText.length} chars`);
|
for (const pattern of patterns) {
|
||||||
break;
|
const match = clean.match(pattern);
|
||||||
|
if (match) return match[1];
|
||||||
}
|
}
|
||||||
|
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
|
||||||
|
};
|
||||||
|
|
||||||
console.log(` [Attempt ${attempt}] Empty response, retrying...`);
|
// Extract date (YYYY-MM-DD) from response
|
||||||
// Small delay before retry
|
const extractDate = (s: string): string => {
|
||||||
await new Promise((r) => setTimeout(r, 1000));
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||||
}
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||||
|
if (isoMatch) return isoMatch[1];
|
||||||
|
return clean.replace(/[^\d-]/g, '').trim();
|
||||||
|
};
|
||||||
|
|
||||||
if (fullText.trim().length === 0) {
|
// Extract currency
|
||||||
throw new Error(`Model returned empty response after ${MAX_RETRIES} attempts`);
|
const extractCurrency = (s: string): string => {
|
||||||
}
|
const upper = s.toUpperCase();
|
||||||
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||||
// Parse JSON response
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||||
let jsonStr = fullText.trim();
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||||
|
return 'EUR';
|
||||||
if (jsonStr.startsWith('```json')) jsonStr = jsonStr.slice(7);
|
};
|
||||||
else if (jsonStr.startsWith('```')) jsonStr = jsonStr.slice(3);
|
|
||||||
if (jsonStr.endsWith('```')) jsonStr = jsonStr.slice(0, -3);
|
|
||||||
jsonStr = jsonStr.trim();
|
|
||||||
|
|
||||||
const startIdx = jsonStr.indexOf('{');
|
|
||||||
const endIdx = jsonStr.lastIndexOf('}') + 1;
|
|
||||||
|
|
||||||
if (startIdx < 0 || endIdx <= startIdx) {
|
|
||||||
throw new Error(`No JSON found in: ${fullText.substring(0, 500)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const extractedJson = jsonStr.substring(startIdx, endIdx);
|
|
||||||
console.log(` [Debug] Extracted JSON: ${extractedJson.substring(0, 200)}...`);
|
|
||||||
|
|
||||||
let parsed;
|
|
||||||
try {
|
|
||||||
parsed = JSON.parse(extractedJson);
|
|
||||||
} catch (e) {
|
|
||||||
throw new Error(`Invalid JSON: ${extractedJson.substring(0, 500)}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
invoice_number: parsed.invoice_number || null,
|
invoice_number: extractInvoiceNumber(invoiceNum),
|
||||||
invoice_date: parsed.invoice_date || null,
|
invoice_date: extractDate(invoiceDate),
|
||||||
vendor_name: parsed.vendor_name || null,
|
vendor_name: vendor.replace(/\*\*/g, '').replace(/`/g, '').trim() || '',
|
||||||
currency: parsed.currency || 'EUR',
|
currency: extractCurrency(currency),
|
||||||
net_amount: parseFloat(parsed.net_amount) || 0,
|
net_amount: parseAmount(netAmount),
|
||||||
vat_amount: parseFloat(parsed.vat_amount) || 0,
|
vat_amount: parseAmount(vatAmount),
|
||||||
total_amount: parseFloat(parsed.total_amount) || 0,
|
total_amount: parseAmount(totalAmount),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -284,12 +247,48 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
|||||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure Qwen3-VL 8B model is available
|
||||||
|
*/
|
||||||
|
async function ensureQwen3Vl(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
const models = data.models || [];
|
||||||
|
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
|
||||||
|
console.log(`[Ollama] Model already available: ${VISION_MODEL}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
console.log('[Ollama] Cannot check models');
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[Ollama] Pulling model: ${VISION_MODEL}...`);
|
||||||
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
return pullResponse.ok;
|
||||||
|
}
|
||||||
|
|
||||||
// Tests
|
// Tests
|
||||||
|
|
||||||
tap.test('setup: ensure Qwen3-VL is running', async () => {
|
tap.test('setup: ensure Qwen3-VL is running', async () => {
|
||||||
console.log('\n[Setup] Checking Qwen3-VL 8B (~5GB)...\n');
|
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
|
||||||
const ok = await ensureQwen3Vl();
|
|
||||||
expect(ok).toBeTrue();
|
// Ensure Ollama service is running
|
||||||
|
const ollamaOk = await ensureMiniCpm();
|
||||||
|
expect(ollamaOk).toBeTrue();
|
||||||
|
|
||||||
|
// Ensure Qwen3-VL 8B model
|
||||||
|
const visionOk = await ensureQwen3Vl();
|
||||||
|
expect(visionOk).toBeTrue();
|
||||||
|
|
||||||
console.log('\n[Setup] Ready!\n');
|
console.log('\n[Setup] Ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -339,7 +338,7 @@ tap.test('summary', async () => {
|
|||||||
console.log(`\n======================================================`);
|
console.log(`\n======================================================`);
|
||||||
console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`);
|
console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`);
|
||||||
console.log(`======================================================`);
|
console.log(`======================================================`);
|
||||||
console.log(` Method: Qwen3-VL 8B (Direct Vision)`);
|
console.log(` Method: Multi-query (single pass)`);
|
||||||
console.log(` Passed: ${passedCount}/${total}`);
|
console.log(` Passed: ${passedCount}/${total}`);
|
||||||
console.log(` Failed: ${failedCount}/${total}`);
|
console.log(` Failed: ${failedCount}/${total}`);
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
|||||||
Reference in New Issue
Block a user