Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cf282b2437 | |||
| 77d57e80bd | |||
| b202e024a4 | |||
| 2210611f70 | |||
| d8bdb18841 | |||
| d384c1d79b | |||
| 6bd672da61 | |||
| 44d6dc3336 | |||
| d1ff95bd94 | |||
| 09770d3177 | |||
| 235aa1352b | |||
| 08728ada4d | |||
| b58bcabc76 | |||
| 6dbd06073b | |||
| ae28a64902 | |||
| 09ea7440e8 | |||
| 177e87d3b8 | |||
| 17ea7717eb | |||
| bd5bb5d874 | |||
| d91df70fff | |||
| d6c97a9625 | |||
| 76b21f1f7b | |||
| 4c368dfef9 | |||
| e76768da55 | |||
| 63d72a52c9 | |||
| 386122c8c7 | |||
| 7c8f10497e | |||
| 9f9ec0a671 | |||
| 3780105c6f | |||
| d237ad19f4 | |||
| 7652a2df52 |
@@ -1,27 +0,0 @@
|
||||
# MiniCPM-V 4.5 CPU Variant
|
||||
# Vision-Language Model optimized for CPU-only inference
|
||||
FROM ollama/ollama:latest
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="MiniCPM-V 4.5 Vision-Language Model - CPU optimized (GGUF)"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration for CPU-only mode
|
||||
ENV MODEL_NAME="minicpm-v"
|
||||
ENV OLLAMA_HOST="0.0.0.0"
|
||||
ENV OLLAMA_ORIGINS="*"
|
||||
# Disable GPU usage for CPU-only variant
|
||||
ENV CUDA_VISIBLE_DEVICES=""
|
||||
|
||||
# Copy and setup entrypoint
|
||||
COPY image_support_files/minicpm45v_entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Expose Ollama API port
|
||||
EXPOSE 11434
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
|
||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
34
Dockerfile_nanonets_vllm_gpu_VRAM10GB
Normal file
34
Dockerfile_nanonets_vllm_gpu_VRAM10GB
Normal file
@@ -0,0 +1,34 @@
|
||||
# Nanonets-OCR2-3B Vision Language Model
|
||||
# Based on Qwen2.5-VL-3B, fine-tuned for document OCR (Oct 2025 release)
|
||||
# Improvements over OCR-s: better semantic tagging, LaTeX equations, flowcharts
|
||||
# ~12-16GB VRAM with 30K context, outputs structured markdown with semantic tags
|
||||
#
|
||||
# Build: docker build -f Dockerfile_nanonets_vllm_gpu_VRAM10GB -t nanonets-ocr .
|
||||
# Run: docker run --gpus all -p 8000:8000 -v ht-huggingface-cache:/root/.cache/huggingface nanonets-ocr
|
||||
|
||||
FROM vllm/vllm-openai:latest
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="Nanonets-OCR2-3B - Document OCR optimized Vision Language Model"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV MODEL_NAME="nanonets/Nanonets-OCR2-3B"
|
||||
ENV HOST="0.0.0.0"
|
||||
ENV PORT="8000"
|
||||
ENV MAX_MODEL_LEN="30000"
|
||||
ENV GPU_MEMORY_UTILIZATION="0.9"
|
||||
|
||||
# Expose OpenAI-compatible API port
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check - vLLM exposes /health endpoint
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=5 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
# Start vLLM server with Nanonets-OCR2-3B model
|
||||
CMD ["--model", "nanonets/Nanonets-OCR2-3B", \
|
||||
"--trust-remote-code", \
|
||||
"--max-model-len", "30000", \
|
||||
"--host", "0.0.0.0", \
|
||||
"--port", "8000"]
|
||||
@@ -1,57 +0,0 @@
|
||||
# PaddleOCR-VL CPU Variant
|
||||
# Vision-Language Model for document parsing using transformers (slower, no GPU required)
|
||||
FROM python:3.11-slim-bookworm
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="PaddleOCR-VL 0.9B CPU - Vision-Language Model for document parsing"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV HF_HOME=/root/.cache/huggingface
|
||||
ENV CUDA_VISIBLE_DEVICES=""
|
||||
ENV SERVER_PORT=8000
|
||||
ENV SERVER_HOST=0.0.0.0
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
libgomp1 \
|
||||
curl \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir \
|
||||
torch==2.5.1 torchvision==0.20.1 --index-url https://download.pytorch.org/whl/cpu && \
|
||||
pip install --no-cache-dir \
|
||||
transformers \
|
||||
accelerate \
|
||||
safetensors \
|
||||
pillow \
|
||||
fastapi \
|
||||
uvicorn[standard] \
|
||||
python-multipart \
|
||||
httpx \
|
||||
protobuf \
|
||||
sentencepiece \
|
||||
einops
|
||||
|
||||
# Copy server files
|
||||
COPY image_support_files/paddleocr_vl_server.py /app/paddleocr_vl_server.py
|
||||
COPY image_support_files/paddleocr_vl_entrypoint.sh /usr/local/bin/paddleocr-vl-cpu-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/paddleocr-vl-cpu-entrypoint.sh
|
||||
|
||||
# Expose API port
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check (longer start-period for CPU + model download)
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/paddleocr-vl-cpu-entrypoint.sh"]
|
||||
@@ -1,90 +0,0 @@
|
||||
# PaddleOCR-VL Full Pipeline (PP-DocLayoutV2 + PaddleOCR-VL + Structured Output)
|
||||
# Self-contained GPU image with complete document parsing pipeline
|
||||
FROM nvidia/cuda:12.4.0-devel-ubuntu22.04
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="PaddleOCR-VL Full Pipeline - Layout Detection + VL Recognition + JSON/Markdown Output"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV HF_HOME=/root/.cache/huggingface
|
||||
ENV PADDLEOCR_HOME=/root/.paddleocr
|
||||
ENV SERVER_PORT=8000
|
||||
ENV SERVER_HOST=0.0.0.0
|
||||
ENV VLM_PORT=8080
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.11 \
|
||||
python3.11-venv \
|
||||
python3.11-dev \
|
||||
python3-pip \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
libgomp1 \
|
||||
libsm6 \
|
||||
libxext6 \
|
||||
libxrender1 \
|
||||
curl \
|
||||
git \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 \
|
||||
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
# Create and activate virtual environment
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Upgrade pip
|
||||
RUN pip install --no-cache-dir --upgrade pip setuptools wheel
|
||||
|
||||
# Install PaddlePaddle GPU (CUDA 12.x)
|
||||
RUN pip install --no-cache-dir \
|
||||
paddlepaddle-gpu==3.2.1 \
|
||||
--extra-index-url https://www.paddlepaddle.org.cn/packages/stable/cu126/
|
||||
|
||||
# Install PaddleOCR with doc-parser (includes PP-DocLayoutV2)
|
||||
RUN pip install --no-cache-dir \
|
||||
"paddleocr[doc-parser]" \
|
||||
safetensors
|
||||
|
||||
# Install PyTorch with CUDA support
|
||||
RUN pip install --no-cache-dir \
|
||||
torch==2.5.1 \
|
||||
torchvision \
|
||||
--index-url https://download.pytorch.org/whl/cu124
|
||||
|
||||
# Install transformers for PaddleOCR-VL inference (no vLLM - use local inference)
|
||||
# PaddleOCR-VL requires transformers>=4.55.0 for use_kernel_forward_from_hub
|
||||
RUN pip install --no-cache-dir \
|
||||
transformers>=4.55.0 \
|
||||
accelerate \
|
||||
hf-kernels
|
||||
|
||||
# Install our API server dependencies
|
||||
RUN pip install --no-cache-dir \
|
||||
fastapi \
|
||||
uvicorn[standard] \
|
||||
python-multipart \
|
||||
httpx \
|
||||
pillow
|
||||
|
||||
# Copy server files
|
||||
COPY image_support_files/paddleocr_vl_full_server.py /app/server.py
|
||||
COPY image_support_files/paddleocr_vl_full_entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Expose ports (8000 = API, 8080 = internal VLM server)
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
@@ -1,71 +0,0 @@
|
||||
# PaddleOCR-VL GPU Variant (Transformers-based, not vLLM)
|
||||
# Vision-Language Model for document parsing using transformers with CUDA
|
||||
FROM nvidia/cuda:12.4.0-runtime-ubuntu22.04
|
||||
|
||||
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
|
||||
LABEL description="PaddleOCR-VL 0.9B GPU - Vision-Language Model using transformers"
|
||||
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
|
||||
|
||||
# Environment configuration
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV HF_HOME=/root/.cache/huggingface
|
||||
ENV SERVER_PORT=8000
|
||||
ENV SERVER_HOST=0.0.0.0
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.11 \
|
||||
python3.11-venv \
|
||||
python3.11-dev \
|
||||
python3-pip \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
libgomp1 \
|
||||
curl \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 \
|
||||
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
# Create and activate virtual environment
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install PyTorch with CUDA support
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir \
|
||||
torch==2.5.1 \
|
||||
torchvision \
|
||||
--index-url https://download.pytorch.org/whl/cu124
|
||||
|
||||
# Install Python dependencies (transformers-based, not vLLM)
|
||||
RUN pip install --no-cache-dir \
|
||||
transformers \
|
||||
accelerate \
|
||||
safetensors \
|
||||
pillow \
|
||||
fastapi \
|
||||
uvicorn[standard] \
|
||||
python-multipart \
|
||||
httpx \
|
||||
protobuf \
|
||||
sentencepiece \
|
||||
einops
|
||||
|
||||
# Copy server files (same as CPU variant - it auto-detects CUDA)
|
||||
COPY image_support_files/paddleocr_vl_server.py /app/paddleocr_vl_server.py
|
||||
COPY image_support_files/paddleocr_vl_entrypoint.sh /usr/local/bin/paddleocr-vl-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/paddleocr-vl-entrypoint.sh
|
||||
|
||||
# Expose API port
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=300s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/paddleocr-vl-entrypoint.sh"]
|
||||
26
Dockerfile_qwen3vl_ollama_gpu_VRAM20GB
Normal file
26
Dockerfile_qwen3vl_ollama_gpu_VRAM20GB
Normal file
@@ -0,0 +1,26 @@
|
||||
# Qwen3-VL-30B-A3B Vision Language Model
|
||||
# Q4_K_M quantization (~20GB model)
|
||||
#
|
||||
# Most powerful Qwen vision model:
|
||||
# - 256K context (expandable to 1M)
|
||||
# - Visual agent capabilities
|
||||
# - Code generation from images
|
||||
#
|
||||
# Build: docker build -f Dockerfile_qwen3vl -t qwen3vl .
|
||||
# Run: docker run --gpus all -p 11434:11434 -v ht-ollama-models:/root/.ollama qwen3vl
|
||||
|
||||
FROM ollama/ollama:latest
|
||||
|
||||
# Pre-pull the model during build (optional - can also pull at runtime)
|
||||
# This makes the image larger but faster to start
|
||||
# RUN ollama serve & sleep 5 && ollama pull qwen3-vl:30b-a3b && pkill ollama
|
||||
|
||||
# Expose Ollama API port
|
||||
EXPOSE 11434
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||
|
||||
# Start Ollama server
|
||||
CMD ["serve"]
|
||||
@@ -13,46 +13,38 @@ NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${BLUE}Building ht-docker-ai images...${NC}"
|
||||
|
||||
# Build GPU variant
|
||||
# Build MiniCPM-V 4.5 GPU variant
|
||||
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_minicpm45v_gpu \
|
||||
-f Dockerfile_minicpm45v_ollama_gpu_VRAM9GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
|
||||
.
|
||||
|
||||
# Build CPU variant
|
||||
echo -e "${GREEN}Building MiniCPM-V 4.5 CPU variant...${NC}"
|
||||
# Build Qwen3-VL GPU variant
|
||||
echo -e "${GREEN}Building Qwen3-VL-30B-A3B GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_minicpm45v_cpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
|
||||
-f Dockerfile_qwen3vl_ollama_gpu_VRAM20GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl \
|
||||
.
|
||||
|
||||
# Build PaddleOCR-VL GPU variant
|
||||
echo -e "${GREEN}Building PaddleOCR-VL GPU variant...${NC}"
|
||||
# Build Nanonets-OCR GPU variant
|
||||
echo -e "${GREEN}Building Nanonets-OCR-s GPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_paddleocr_vl_gpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu \
|
||||
.
|
||||
|
||||
# Build PaddleOCR-VL CPU variant
|
||||
echo -e "${GREEN}Building PaddleOCR-VL CPU variant...${NC}"
|
||||
docker build \
|
||||
-f Dockerfile_paddleocr_vl_cpu \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu \
|
||||
-f Dockerfile_nanonets_vllm_gpu_VRAM10GB \
|
||||
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr \
|
||||
.
|
||||
|
||||
echo -e "${GREEN}All images built successfully!${NC}"
|
||||
echo ""
|
||||
echo "Available images:"
|
||||
echo " MiniCPM-V 4.5:"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
|
||||
echo " MiniCPM-V 4.5 (Ollama, ~9GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest"
|
||||
echo ""
|
||||
echo " PaddleOCR-VL (Vision-Language Model):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl (GPU/vLLM)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu (GPU/vLLM)"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu (CPU)"
|
||||
echo " Qwen3-VL-30B-A3B (Ollama, ~20GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:qwen3vl"
|
||||
echo ""
|
||||
echo " Nanonets-OCR-s (vLLM, ~10GB VRAM):"
|
||||
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:nanonets-ocr"
|
||||
|
||||
119
changelog.md
119
changelog.md
@@ -1,5 +1,124 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-01-20 - 1.15.0 - feat(tests)
|
||||
integrate SmartAi/DualAgentOrchestrator into extraction tests and add JSON self-validation
|
||||
|
||||
- Integrate SmartAi and DualAgentOrchestrator into bankstatement and invoice tests to perform structured extraction with streaming
|
||||
- Register and use JsonValidatorTool to validate outputs (json.validate) and enforce validation before task completion
|
||||
- Add tryExtractJson parsing fallback, improved extraction prompts, retries and clearer parsing/logging
|
||||
- Initialize and teardown SmartAi and orchestrator in test setup/summary, and enable onToken streaming handlers for real-time output
|
||||
- Bump devDependencies: @push.rocks/smartagent to ^1.3.0 and @push.rocks/smartai to ^0.12.0
|
||||
|
||||
## 2026-01-20 - 1.14.3 - fix(repo)
|
||||
no changes detected in the diff; no files modified and no release required
|
||||
|
||||
- Diff contained no changes
|
||||
- No files were added, removed, or modified
|
||||
- No code, dependency, or documentation updates to release
|
||||
|
||||
## 2026-01-19 - 1.14.2 - fix(readme)
|
||||
update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command
|
||||
|
||||
- Renamed Nanonets-OCR-s -> Nanonets-OCR2-3B throughout README and examples
|
||||
- Updated Nanonets VRAM guidance from ~10GB to ~12-16GB and documented 30K context
|
||||
- Changed documented MAX_MODEL_LEN default from 8192 to 30000
|
||||
- Updated example model identifiers (model strings and curl/example snippets) to nanonets/Nanonets-OCR2-3B
|
||||
- Added MiniCPM and Qwen feature bullets (multilingual, multi-image, flowchart support, expanded context notes)
|
||||
- Replaced README test command from ./test-images.sh to pnpm test
|
||||
|
||||
## 2026-01-19 - 1.14.1 - fix(extraction)
|
||||
improve JSON extraction prompts and model options for invoice and bank statement tests
|
||||
|
||||
- Refactor JSON extraction prompts to be sent after the document text and add explicit 'WHERE TO FIND DATA' and 'RULES' sections for clearer extraction guidance
|
||||
- Change chat message flow to: send document, assistant acknowledgement, then the JSON extraction prompt (avoids concatenating large prompts into one message)
|
||||
- Add model options (num_ctx: 32768, temperature: 0) to give larger context windows and deterministic JSON output
|
||||
- Simplify logging to avoid printing full prompt contents; log document and prompt lengths instead
|
||||
- Increase timeouts for large documents to 600000ms (10 minutes) where applicable
|
||||
|
||||
## 2026-01-19 - 1.14.0 - feat(docker-images)
|
||||
add vLLM-based Nanonets-OCR2-3B image, Qwen3-VL Ollama image and refactor build/docs/tests to use new runtime/layout
|
||||
|
||||
- Add new Dockerfiles for Nanonets (Dockerfile_nanonets_vllm_gpu_VRAM10GB), Qwen3 (Dockerfile_qwen3vl_ollama_gpu_VRAM20GB) and a clarified MiniCPM Ollama variant (Dockerfile_minicpm45v_ollama_gpu_VRAM9GB); remove older, redundant Dockerfiles.
|
||||
- Update build-images.sh to build the new image tags (minicpm45v, qwen3vl, nanonets-ocr) and adjust messaging/targets accordingly.
|
||||
- Documentation overhaul: readme.md and readme.hints.md updated to reflect vLLM vs Ollama runtimes, corrected ports/VRAM estimates, volume recommendations, and API endpoint details.
|
||||
- Tests updated to target the new model ID (nanonets/Nanonets-OCR2-3B), to process one page per batch, and to include a 10-minute AbortSignal timeout for OCR requests.
|
||||
- Added focused extraction test suites (test/test.invoices.extraction.ts and test/test.invoices.failed.ts) for faster iteration and debugging of invoice extraction.
|
||||
- Bump devDependencies: @git.zone/tsrun -> ^2.0.1 and @git.zone/tstest -> ^3.1.5.
|
||||
- Misc: test helper references and docker compose/test port mapping fixed (nanonets uses 8000), and various README sections cleaned and reorganized.
|
||||
|
||||
## 2026-01-18 - 1.13.2 - fix(tests)
|
||||
stabilize OCR extraction tests and manage GPU containers
|
||||
|
||||
- Add stopAllGpuContainers() and call it before starting GPU images to free GPU memory.
|
||||
- Remove PaddleOCR-VL image configs and associated ensure helpers from docker test helper to simplify images list.
|
||||
- Split invoice/bankstatement tests into two sequential stages: Stage 1 runs Nanonets OCR to produce markdown files, Stage 2 stops Nanonets and runs model extraction from saved markdown (avoids GPU contention).
|
||||
- Introduce temporary markdown directory handling and cleanup; add stopNanonets() and container running checks in tests.
|
||||
- Switch bank statement extraction model from qwen3:8b to gpt-oss:20b; add request timeout and improved logging/console output across tests.
|
||||
- Refactor extractWithConsensus and extraction functions to accept document identifiers, improve error messages and JSON extraction robustness.
|
||||
|
||||
## 2026-01-18 - 1.13.1 - fix(image_support_files)
|
||||
remove PaddleOCR-VL server scripts from image_support_files
|
||||
|
||||
- Deleted files: image_support_files/paddleocr_vl_full_server.py (approx. 636 lines) and image_support_files/paddleocr_vl_server.py (approx. 465 lines)
|
||||
- Cleanup/removal of legacy PaddleOCR-VL FastAPI server implementations — may affect users who relied on these local scripts
|
||||
|
||||
## 2026-01-18 - 1.13.0 - feat(tests)
|
||||
revamp tests and remove legacy Dockerfiles: adopt JSON/consensus workflows, switch MiniCPM model, and delete deprecated Docker/test variants
|
||||
|
||||
- Removed multiple Dockerfiles and related entrypoints for MiniCPM and PaddleOCR-VL (cpu/gpu/full), cleaning up legacy image recipes.
|
||||
- Pruned many older test files (combined, ministral3, paddleocr-vl, and several invoice/test variants) to consolidate the test suite.
|
||||
- Updated bank statement MiniCPM test: now uses MODEL='openbmb/minicpm-v4.5:q8_0', JSON per-page extraction prompt, consensus retry logic, expanded logging, and stricter result matching.
|
||||
- Updated invoice MiniCPM test: switched to a consensus flow (fast JSON pass + thinking pass), increased PDF conversion quality, endpoints migrated to chat-style API calls with image-in-message payloads, and improved finalization logic.
|
||||
- API usage changed from /api/generate to /api/chat with message-based payloads and embedded images — CI and local test runners will need model availability and possible pipeline adjustments.
|
||||
|
||||
## 2026-01-18 - 1.12.0 - feat(tests)
|
||||
switch vision tests to multi-query extraction (count then per-row/field queries) and add logging/summaries
|
||||
|
||||
- Replace streaming + consensus pipeline with multi-query approach: count rows per page, then query each transaction/field individually (batched parallel queries).
|
||||
- Introduce unified helpers (queryVision / queryField / getTransaction / countTransactions) and simplify Ollama requests (stream:false, reduced num_predict, /no_think prompts).
|
||||
- Improve parsing and normalization for amounts (European formats), invoice numbers, dates and currency extraction.
|
||||
- Adjust model checks to look for generic 'minicpm' and update test names/messages; add pass/fail counters and a summary test output.
|
||||
- Remove previous consensus voting and streaming JSON accumulation logic, and add immediate per-transaction logging and batching.
|
||||
|
||||
## 2026-01-18 - 1.11.0 - feat(vision)
|
||||
process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction
|
||||
|
||||
- Bank statements: split extraction into extractTransactionsFromPage and sequentially process pages to avoid thinking-token exhaustion
|
||||
- Bank statements: reduced num_predict from 8000 to 4000, send single image per request, added per-page logging and non-throwing handling for empty or non-JSON responses
|
||||
- Bank statements: catch JSON.parse errors and return empty array instead of throwing
|
||||
- Invoices: introduced queryField to request single values and perform multiple simple queries (reduces model thinking usage)
|
||||
- Invoices: reduced num_predict for invoice queries from 4000 to 500 and parse amounts robustly (handles European formats like 1.234,56)
|
||||
- Invoices: normalize currency to uppercase 3-letter code, return safe defaults (empty strings / 0) instead of nulls, and parse net/vat/total with fallbacks
|
||||
- General: simplified Ollama API error messages to avoid including response body content in thrown errors
|
||||
|
||||
## 2026-01-18 - 1.10.1 - fix(tests)
|
||||
improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options
|
||||
|
||||
- Replaced streaming reader logic with direct JSON parsing of the /api/chat response
|
||||
- Added ensureQwen3Vl() to check and pull the Qwen3-VL:8b model from Ollama
|
||||
- Switched to ensureMiniCpm() to verify Ollama service is running before model checks
|
||||
- Use /no_think prompt for direct JSON output and set temperature to 0.0 and num_predict to 512
|
||||
- Removed retry loop and streaming parsing; improved error messages to include response body
|
||||
- Updated logging and test setup messages for clarity
|
||||
|
||||
## 2026-01-18 - 1.10.0 - feat(vision)
|
||||
add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting
|
||||
|
||||
- Add Dockerfile_qwen3vl to provide an Ollama-based image for Qwen3-VL and expose the Ollama API on port 11434
|
||||
- Introduce test/test.invoices.qwen3vl.ts and ensureQwen3Vl() helper to pull and test qwen3-vl:8b
|
||||
- Improve PDF->PNG conversion and prompt in ministral3 tests (higher DPI, max quality, sharpen) and increase num_predict from 512 to 1024
|
||||
- Simplify extraction pipeline: remove consensus voting, log single-pass results, and simplify OCR HTML sanitization/truncation logic
|
||||
|
||||
## 2026-01-18 - 1.9.0 - feat(tests)
|
||||
add Ministral 3 vision tests and improve invoice extraction pipeline to use Ollama chat schema, sanitization, and multi-page support
|
||||
|
||||
- Add new vision-based test suites for Ministral 3: test/test.invoices.ministral3.ts and test/test.bankstatements.ministral3.ts (model ministral-3:8b).
|
||||
- Introduce ensureMinistral3() helper to start/check Ollama/MiniCPM model in test/helpers/docker.ts.
|
||||
- Switch invoice extraction to use Ollama /api/chat with a JSON schema (format) and streaming support (reads message.content).
|
||||
- Improve HTML handling: sanitizeHtml() to remove OCR artifacts, concatenate multi-page HTML with page markers, and increase truncation limits.
|
||||
- Enhance response parsing: strip Markdown code fences, robustly locate JSON object boundaries, and provide clearer JSON parse errors.
|
||||
- Add PDF->PNG conversion (ImageMagick) and direct image-based extraction flow for vision model tests.
|
||||
|
||||
## 2026-01-18 - 1.8.0 - feat(paddleocr-vl)
|
||||
add structured HTML output and table parsing for PaddleOCR-VL, update API, tests, and README
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "==================================="
|
||||
echo "PaddleOCR-VL Server (CPU)"
|
||||
echo "==================================="
|
||||
|
||||
HOST="${SERVER_HOST:-0.0.0.0}"
|
||||
PORT="${SERVER_PORT:-8000}"
|
||||
|
||||
echo "Host: ${HOST}"
|
||||
echo "Port: ${PORT}"
|
||||
echo "Device: CPU (no GPU)"
|
||||
echo ""
|
||||
|
||||
echo "Starting PaddleOCR-VL CPU server..."
|
||||
echo "==================================="
|
||||
|
||||
exec python /app/paddleocr_vl_server.py
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Starting PaddleOCR-VL Full Pipeline Server (Transformers backend)..."
|
||||
|
||||
# Environment
|
||||
SERVER_PORT=${SERVER_PORT:-8000}
|
||||
SERVER_HOST=${SERVER_HOST:-0.0.0.0}
|
||||
|
||||
# Start our API server directly (no vLLM - uses local transformers inference)
|
||||
echo "Starting API server on port $SERVER_PORT..."
|
||||
exec python /app/server.py
|
||||
@@ -1,636 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PaddleOCR-VL Full Pipeline API Server (Transformers backend)
|
||||
|
||||
Provides REST API for document parsing using:
|
||||
- PP-DocLayoutV2 for layout detection
|
||||
- PaddleOCR-VL (transformers) for recognition
|
||||
- Structured JSON/Markdown output
|
||||
"""
|
||||
|
||||
import os
|
||||
import io
|
||||
import re
|
||||
import base64
|
||||
import logging
|
||||
import tempfile
|
||||
import time
|
||||
import json
|
||||
from typing import Optional, List, Union
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from PIL import Image
|
||||
import torch
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Environment configuration
|
||||
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
|
||||
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
|
||||
MODEL_NAME = "PaddlePaddle/PaddleOCR-VL"
|
||||
|
||||
# Device configuration
|
||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
logger.info(f"Using device: {DEVICE}")
|
||||
|
||||
# Task prompts
|
||||
TASK_PROMPTS = {
|
||||
"ocr": "OCR:",
|
||||
"table": "Table Recognition:",
|
||||
"formula": "Formula Recognition:",
|
||||
"chart": "Chart Recognition:",
|
||||
}
|
||||
|
||||
# Initialize FastAPI app
|
||||
app = FastAPI(
|
||||
title="PaddleOCR-VL Full Pipeline Server",
|
||||
description="Document parsing with PP-DocLayoutV2 + PaddleOCR-VL (transformers)",
|
||||
version="1.0.0"
|
||||
)
|
||||
|
||||
# Global model instances
|
||||
vl_model = None
|
||||
vl_processor = None
|
||||
layout_model = None
|
||||
|
||||
|
||||
def load_vl_model():
|
||||
"""Load the PaddleOCR-VL model for element recognition"""
|
||||
global vl_model, vl_processor
|
||||
|
||||
if vl_model is not None:
|
||||
return
|
||||
|
||||
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
|
||||
from transformers import AutoModelForCausalLM, AutoProcessor
|
||||
|
||||
vl_processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
||||
|
||||
if DEVICE == "cuda":
|
||||
vl_model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_NAME,
|
||||
trust_remote_code=True,
|
||||
torch_dtype=torch.bfloat16,
|
||||
).to(DEVICE).eval()
|
||||
else:
|
||||
vl_model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_NAME,
|
||||
trust_remote_code=True,
|
||||
torch_dtype=torch.float32,
|
||||
low_cpu_mem_usage=True,
|
||||
).eval()
|
||||
|
||||
logger.info("PaddleOCR-VL model loaded successfully")
|
||||
|
||||
|
||||
def load_layout_model():
|
||||
"""Load the LayoutDetection model for layout detection"""
|
||||
global layout_model
|
||||
|
||||
if layout_model is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("Loading LayoutDetection model (PP-DocLayout_plus-L)...")
|
||||
from paddleocr import LayoutDetection
|
||||
|
||||
layout_model = LayoutDetection()
|
||||
logger.info("LayoutDetection model loaded successfully")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load LayoutDetection: {e}")
|
||||
logger.info("Falling back to VL-only mode (no layout detection)")
|
||||
|
||||
|
||||
def recognize_element(image: Image.Image, task: str = "ocr") -> str:
|
||||
"""Recognize a single element using PaddleOCR-VL"""
|
||||
load_vl_model()
|
||||
|
||||
prompt = TASK_PROMPTS.get(task, TASK_PROMPTS["ocr"])
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "image": image},
|
||||
{"type": "text", "text": prompt},
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
inputs = vl_processor.apply_chat_template(
|
||||
messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt"
|
||||
)
|
||||
|
||||
if DEVICE == "cuda":
|
||||
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
||||
|
||||
with torch.inference_mode():
|
||||
outputs = vl_model.generate(
|
||||
**inputs,
|
||||
max_new_tokens=4096,
|
||||
do_sample=False,
|
||||
use_cache=True
|
||||
)
|
||||
|
||||
response = vl_processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
||||
|
||||
# Extract only the assistant's response content
|
||||
# The response format is: "User: <prompt>\nAssistant: <content>"
|
||||
# We want to extract just the content after "Assistant:"
|
||||
if "Assistant:" in response:
|
||||
parts = response.split("Assistant:")
|
||||
if len(parts) > 1:
|
||||
response = parts[-1].strip()
|
||||
elif "assistant:" in response.lower():
|
||||
# Case-insensitive fallback
|
||||
import re
|
||||
match = re.split(r'[Aa]ssistant:', response)
|
||||
if len(match) > 1:
|
||||
response = match[-1].strip()
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def detect_layout(image: Image.Image) -> List[dict]:
|
||||
"""Detect layout regions in the image"""
|
||||
load_layout_model()
|
||||
|
||||
if layout_model is None:
|
||||
# No layout model - return a single region covering the whole image
|
||||
return [{
|
||||
"type": "text",
|
||||
"bbox": [0, 0, image.width, image.height],
|
||||
"score": 1.0
|
||||
}]
|
||||
|
||||
# Save image to temp file
|
||||
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
||||
image.save(tmp.name, "PNG")
|
||||
tmp_path = tmp.name
|
||||
|
||||
try:
|
||||
results = layout_model.predict(tmp_path)
|
||||
regions = []
|
||||
|
||||
for res in results:
|
||||
# LayoutDetection returns boxes in 'boxes' key
|
||||
for box in res.get("boxes", []):
|
||||
coord = box.get("coordinate", [0, 0, image.width, image.height])
|
||||
# Convert numpy floats to regular floats
|
||||
bbox = [float(c) for c in coord]
|
||||
regions.append({
|
||||
"type": box.get("label", "text"),
|
||||
"bbox": bbox,
|
||||
"score": float(box.get("score", 1.0))
|
||||
})
|
||||
|
||||
# Sort regions by vertical position (top to bottom)
|
||||
regions.sort(key=lambda r: r["bbox"][1])
|
||||
|
||||
return regions if regions else [{
|
||||
"type": "text",
|
||||
"bbox": [0, 0, image.width, image.height],
|
||||
"score": 1.0
|
||||
}]
|
||||
|
||||
finally:
|
||||
os.unlink(tmp_path)
|
||||
|
||||
|
||||
def process_document(image: Image.Image) -> dict:
|
||||
"""Process a document through the full pipeline"""
|
||||
logger.info(f"Processing document: {image.size}")
|
||||
|
||||
# Step 1: Detect layout
|
||||
regions = detect_layout(image)
|
||||
logger.info(f"Detected {len(regions)} layout regions")
|
||||
|
||||
# Step 2: Recognize each region
|
||||
blocks = []
|
||||
for i, region in enumerate(regions):
|
||||
region_type = region["type"].lower()
|
||||
bbox = region["bbox"]
|
||||
|
||||
# Crop region from image
|
||||
x1, y1, x2, y2 = [int(c) for c in bbox]
|
||||
region_image = image.crop((x1, y1, x2, y2))
|
||||
|
||||
# Determine task based on region type
|
||||
if "table" in region_type:
|
||||
task = "table"
|
||||
elif "formula" in region_type or "math" in region_type:
|
||||
task = "formula"
|
||||
elif "chart" in region_type or "figure" in region_type:
|
||||
task = "chart"
|
||||
else:
|
||||
task = "ocr"
|
||||
|
||||
# Recognize the region
|
||||
try:
|
||||
content = recognize_element(region_image, task)
|
||||
blocks.append({
|
||||
"index": i,
|
||||
"type": region_type,
|
||||
"bbox": bbox,
|
||||
"content": content,
|
||||
"task": task
|
||||
})
|
||||
logger.info(f" Region {i} ({region_type}): {len(content)} chars")
|
||||
except Exception as e:
|
||||
logger.error(f" Region {i} error: {e}")
|
||||
blocks.append({
|
||||
"index": i,
|
||||
"type": region_type,
|
||||
"bbox": bbox,
|
||||
"content": "",
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
return {"blocks": blocks, "image_size": list(image.size)}
|
||||
|
||||
|
||||
def result_to_markdown(result: dict) -> str:
|
||||
"""Convert result to Markdown format with structural hints for LLM processing.
|
||||
|
||||
Adds positional and type-based formatting to help downstream LLMs
|
||||
understand document structure:
|
||||
- Tables are marked with **[TABLE]** prefix
|
||||
- Header zone content (top 15%) is bolded
|
||||
- Footer zone content (bottom 15%) is separated with horizontal rule
|
||||
- Titles are formatted as # headers
|
||||
- Figures/charts are marked with *[Figure: ...]*
|
||||
"""
|
||||
lines = []
|
||||
image_height = result.get("image_size", [0, 1000])[1]
|
||||
|
||||
for block in result.get("blocks", []):
|
||||
block_type = block.get("type", "text").lower()
|
||||
content = block.get("content", "").strip()
|
||||
bbox = block.get("bbox", [])
|
||||
|
||||
if not content:
|
||||
continue
|
||||
|
||||
# Determine position zone (top 15%, middle, bottom 15%)
|
||||
y_pos = bbox[1] if bbox and len(bbox) > 1 else 0
|
||||
y_end = bbox[3] if bbox and len(bbox) > 3 else y_pos
|
||||
is_header_zone = y_pos < image_height * 0.15
|
||||
is_footer_zone = y_end > image_height * 0.85
|
||||
|
||||
# Format based on type and position
|
||||
if "table" in block_type:
|
||||
lines.append(f"\n**[TABLE]**\n{content}\n")
|
||||
elif "title" in block_type:
|
||||
lines.append(f"# {content}")
|
||||
elif "formula" in block_type or "math" in block_type:
|
||||
lines.append(f"\n$$\n{content}\n$$\n")
|
||||
elif "figure" in block_type or "chart" in block_type:
|
||||
lines.append(f"*[Figure: {content}]*")
|
||||
elif is_header_zone:
|
||||
lines.append(f"**{content}**")
|
||||
elif is_footer_zone:
|
||||
lines.append(f"---\n{content}")
|
||||
else:
|
||||
lines.append(content)
|
||||
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def parse_markdown_table(content: str) -> str:
|
||||
"""Convert table content to HTML table.
|
||||
|
||||
Handles:
|
||||
- PaddleOCR-VL format: <fcel>cell<lcel>cell<nl> (detected by <fcel> tags)
|
||||
- Pipe-delimited tables: | Header | Header |
|
||||
- Separator rows: |---|---|
|
||||
- Returns HTML <table> structure
|
||||
"""
|
||||
content_stripped = content.strip()
|
||||
|
||||
# Check for PaddleOCR-VL table format (<fcel>, <lcel>, <ecel>, <nl>)
|
||||
if '<fcel>' in content_stripped or '<nl>' in content_stripped:
|
||||
return parse_paddleocr_table(content_stripped)
|
||||
|
||||
lines = content_stripped.split('\n')
|
||||
if not lines:
|
||||
return f'<pre>{content}</pre>'
|
||||
|
||||
# Check if it looks like a markdown table
|
||||
if not any('|' in line for line in lines):
|
||||
return f'<pre>{content}</pre>'
|
||||
|
||||
html_rows = []
|
||||
is_header = True
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('|') == False and '|' not in line:
|
||||
continue
|
||||
|
||||
# Skip separator rows (|---|---|)
|
||||
if re.match(r'^[\|\s\-:]+$', line):
|
||||
is_header = False
|
||||
continue
|
||||
|
||||
# Parse cells
|
||||
cells = [c.strip() for c in line.split('|')]
|
||||
cells = [c for c in cells if c] # Remove empty from edges
|
||||
|
||||
if is_header:
|
||||
row = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
|
||||
html_rows.append(f'<thead>{row}</thead>')
|
||||
is_header = False
|
||||
else:
|
||||
row = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
|
||||
html_rows.append(row)
|
||||
|
||||
if html_rows:
|
||||
# Wrap body rows in tbody
|
||||
header = html_rows[0] if '<thead>' in html_rows[0] else ''
|
||||
body_rows = [r for r in html_rows if '<thead>' not in r]
|
||||
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
|
||||
return f'<table>{header}{body}</table>'
|
||||
|
||||
return f'<pre>{content}</pre>'
|
||||
|
||||
|
||||
def parse_paddleocr_table(content: str) -> str:
|
||||
"""Convert PaddleOCR-VL table format to HTML table.
|
||||
|
||||
PaddleOCR-VL uses:
|
||||
- <fcel> = first cell in a row
|
||||
- <lcel> = subsequent cells
|
||||
- <ecel> = empty cell
|
||||
- <nl> = row separator (newline)
|
||||
|
||||
Example input:
|
||||
<fcel>Header1<lcel>Header2<nl><fcel>Value1<lcel>Value2<nl>
|
||||
"""
|
||||
# Split into rows by <nl>
|
||||
rows_raw = re.split(r'<nl>', content)
|
||||
html_rows = []
|
||||
is_first_row = True
|
||||
|
||||
for row_content in rows_raw:
|
||||
row_content = row_content.strip()
|
||||
if not row_content:
|
||||
continue
|
||||
|
||||
# Extract cells: split by <fcel>, <lcel>, or <ecel>
|
||||
# Each cell is the text between these markers
|
||||
cells = []
|
||||
|
||||
# Pattern to match cell markers and capture content
|
||||
# Content is everything between markers
|
||||
parts = re.split(r'<fcel>|<lcel>|<ecel>', row_content)
|
||||
for part in parts:
|
||||
part = part.strip()
|
||||
if part:
|
||||
cells.append(part)
|
||||
|
||||
if not cells:
|
||||
continue
|
||||
|
||||
# First row is header
|
||||
if is_first_row:
|
||||
row_html = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
|
||||
html_rows.append(f'<thead>{row_html}</thead>')
|
||||
is_first_row = False
|
||||
else:
|
||||
row_html = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
|
||||
html_rows.append(row_html)
|
||||
|
||||
if html_rows:
|
||||
header = html_rows[0] if '<thead>' in html_rows[0] else ''
|
||||
body_rows = [r for r in html_rows if '<thead>' not in r]
|
||||
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
|
||||
return f'<table>{header}{body}</table>'
|
||||
|
||||
return f'<pre>{content}</pre>'
|
||||
|
||||
|
||||
def result_to_html(result: dict) -> str:
|
||||
"""Convert result to semantic HTML for optimal LLM processing.
|
||||
|
||||
Uses semantic HTML5 tags with position metadata as data-* attributes.
|
||||
Markdown tables are converted to proper HTML <table> tags for
|
||||
unambiguous parsing by downstream LLMs.
|
||||
"""
|
||||
parts = []
|
||||
image_height = result.get("image_size", [0, 1000])[1]
|
||||
|
||||
parts.append('<!DOCTYPE html><html><body>')
|
||||
|
||||
for block in result.get("blocks", []):
|
||||
block_type = block.get("type", "text").lower()
|
||||
content = block.get("content", "").strip()
|
||||
bbox = block.get("bbox", [])
|
||||
|
||||
if not content:
|
||||
continue
|
||||
|
||||
# Position metadata
|
||||
y_pos = bbox[1] / image_height if bbox and len(bbox) > 1 else 0
|
||||
data_attrs = f'data-type="{block_type}" data-y="{y_pos:.2f}"'
|
||||
|
||||
# Format based on type
|
||||
if "table" in block_type:
|
||||
table_html = parse_markdown_table(content)
|
||||
parts.append(f'<section {data_attrs} class="table-region">{table_html}</section>')
|
||||
elif "title" in block_type:
|
||||
parts.append(f'<h1 {data_attrs}>{content}</h1>')
|
||||
elif "formula" in block_type or "math" in block_type:
|
||||
parts.append(f'<div {data_attrs} class="formula"><code>{content}</code></div>')
|
||||
elif "figure" in block_type or "chart" in block_type:
|
||||
parts.append(f'<figure {data_attrs}><figcaption>{content}</figcaption></figure>')
|
||||
elif y_pos < 0.15:
|
||||
parts.append(f'<header {data_attrs}><strong>{content}</strong></header>')
|
||||
elif y_pos > 0.85:
|
||||
parts.append(f'<footer {data_attrs}>{content}</footer>')
|
||||
else:
|
||||
parts.append(f'<p {data_attrs}>{content}</p>')
|
||||
|
||||
parts.append('</body></html>')
|
||||
return '\n'.join(parts)
|
||||
|
||||
|
||||
# Request/Response models
|
||||
class ParseRequest(BaseModel):
|
||||
image: str # base64 encoded image
|
||||
output_format: Optional[str] = "json"
|
||||
|
||||
|
||||
class ParseResponse(BaseModel):
|
||||
success: bool
|
||||
format: str
|
||||
result: Union[dict, str]
|
||||
processing_time: float
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
def decode_image(image_source: str) -> Image.Image:
|
||||
"""Decode image from base64 or data URL"""
|
||||
if image_source.startswith("data:"):
|
||||
header, data = image_source.split(",", 1)
|
||||
image_data = base64.b64decode(data)
|
||||
else:
|
||||
image_data = base64.b64decode(image_source)
|
||||
|
||||
return Image.open(io.BytesIO(image_data)).convert("RGB")
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
"""Pre-load models on startup"""
|
||||
logger.info("Starting PaddleOCR-VL Full Pipeline Server...")
|
||||
try:
|
||||
load_vl_model()
|
||||
load_layout_model()
|
||||
logger.info("Models loaded successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to pre-load models: {e}")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return {
|
||||
"status": "healthy" if vl_model is not None else "loading",
|
||||
"service": "PaddleOCR-VL Full Pipeline (Transformers)",
|
||||
"device": DEVICE,
|
||||
"vl_model_loaded": vl_model is not None,
|
||||
"layout_model_loaded": layout_model is not None
|
||||
}
|
||||
|
||||
|
||||
@app.get("/formats")
|
||||
async def supported_formats():
|
||||
"""List supported output formats"""
|
||||
return {
|
||||
"output_formats": ["json", "markdown", "html"],
|
||||
"image_formats": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
|
||||
"capabilities": [
|
||||
"Layout detection (PP-DocLayoutV2)",
|
||||
"Text recognition (OCR)",
|
||||
"Table recognition",
|
||||
"Formula recognition (LaTeX)",
|
||||
"Chart recognition",
|
||||
"Multi-language support (109 languages)"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@app.post("/parse", response_model=ParseResponse)
|
||||
async def parse_document_endpoint(request: ParseRequest):
|
||||
"""Parse a document image and return structured output"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
image = decode_image(request.image)
|
||||
result = process_document(image)
|
||||
|
||||
if request.output_format == "markdown":
|
||||
markdown = result_to_markdown(result)
|
||||
output = {"markdown": markdown}
|
||||
elif request.output_format == "html":
|
||||
html = result_to_html(result)
|
||||
output = {"html": html}
|
||||
else:
|
||||
output = result
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
logger.info(f"Processing complete in {elapsed:.2f}s")
|
||||
|
||||
return ParseResponse(
|
||||
success=True,
|
||||
format=request.output_format,
|
||||
result=output,
|
||||
processing_time=elapsed
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing document: {e}", exc_info=True)
|
||||
return ParseResponse(
|
||||
success=False,
|
||||
format=request.output_format,
|
||||
result={},
|
||||
processing_time=0,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
|
||||
@app.post("/v1/chat/completions")
|
||||
async def chat_completions(request: dict):
|
||||
"""OpenAI-compatible chat completions endpoint"""
|
||||
try:
|
||||
messages = request.get("messages", [])
|
||||
output_format = request.get("output_format", "json")
|
||||
|
||||
# Find user message with image
|
||||
image = None
|
||||
for msg in reversed(messages):
|
||||
if msg.get("role") == "user":
|
||||
content = msg.get("content", [])
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if item.get("type") == "image_url":
|
||||
url = item.get("image_url", {}).get("url", "")
|
||||
image = decode_image(url)
|
||||
break
|
||||
break
|
||||
|
||||
if image is None:
|
||||
raise HTTPException(status_code=400, detail="No image provided")
|
||||
|
||||
start_time = time.time()
|
||||
result = process_document(image)
|
||||
|
||||
if output_format == "markdown":
|
||||
content = result_to_markdown(result)
|
||||
elif output_format == "html":
|
||||
content = result_to_html(result)
|
||||
else:
|
||||
content = json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
return {
|
||||
"id": f"chatcmpl-{int(time.time()*1000)}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": "paddleocr-vl-full",
|
||||
"choices": [{
|
||||
"index": 0,
|
||||
"message": {"role": "assistant", "content": content},
|
||||
"finish_reason": "stop"
|
||||
}],
|
||||
"usage": {
|
||||
"prompt_tokens": 100,
|
||||
"completion_tokens": len(content) // 4,
|
||||
"total_tokens": 100 + len(content) // 4
|
||||
},
|
||||
"processing_time": elapsed
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error in chat completions: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)
|
||||
@@ -1,465 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PaddleOCR-VL FastAPI Server (CPU variant)
|
||||
Provides OpenAI-compatible REST API for document parsing using PaddleOCR-VL
|
||||
"""
|
||||
|
||||
import os
|
||||
import io
|
||||
import base64
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, List, Any, Dict, Union
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Environment configuration
|
||||
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
|
||||
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
|
||||
MODEL_NAME = os.environ.get('MODEL_NAME', 'PaddlePaddle/PaddleOCR-VL')
|
||||
|
||||
# Device configuration
|
||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
logger.info(f"Using device: {DEVICE}")
|
||||
|
||||
# Task prompts for PaddleOCR-VL
|
||||
TASK_PROMPTS = {
|
||||
"ocr": "OCR:",
|
||||
"table": "Table Recognition:",
|
||||
"formula": "Formula Recognition:",
|
||||
"chart": "Chart Recognition:",
|
||||
}
|
||||
|
||||
# Initialize FastAPI app
|
||||
app = FastAPI(
|
||||
title="PaddleOCR-VL Server",
|
||||
description="OpenAI-compatible REST API for document parsing using PaddleOCR-VL",
|
||||
version="1.0.0"
|
||||
)
|
||||
|
||||
# Global model instances
|
||||
model = None
|
||||
processor = None
|
||||
|
||||
|
||||
# Request/Response models (OpenAI-compatible)
|
||||
class ImageUrl(BaseModel):
|
||||
url: str
|
||||
|
||||
|
||||
class ContentItem(BaseModel):
|
||||
type: str
|
||||
text: Optional[str] = None
|
||||
image_url: Optional[ImageUrl] = None
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: Union[str, List[ContentItem]]
|
||||
|
||||
|
||||
class ChatCompletionRequest(BaseModel):
|
||||
model: str = "paddleocr-vl"
|
||||
messages: List[Message]
|
||||
temperature: Optional[float] = 0.0
|
||||
max_tokens: Optional[int] = 4096
|
||||
|
||||
|
||||
class Choice(BaseModel):
|
||||
index: int
|
||||
message: Message
|
||||
finish_reason: str
|
||||
|
||||
|
||||
class Usage(BaseModel):
|
||||
prompt_tokens: int
|
||||
completion_tokens: int
|
||||
total_tokens: int
|
||||
|
||||
|
||||
class ChatCompletionResponse(BaseModel):
|
||||
id: str
|
||||
object: str = "chat.completion"
|
||||
created: int
|
||||
model: str
|
||||
choices: List[Choice]
|
||||
usage: Usage
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
status: str
|
||||
model: str
|
||||
device: str
|
||||
|
||||
|
||||
def load_model():
|
||||
"""Load the PaddleOCR-VL model and processor"""
|
||||
global model, processor
|
||||
|
||||
if model is not None:
|
||||
return
|
||||
|
||||
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoProcessor
|
||||
|
||||
# Load processor
|
||||
processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
||||
|
||||
# Load model with appropriate settings for CPU/GPU
|
||||
if DEVICE == "cuda":
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_NAME,
|
||||
trust_remote_code=True,
|
||||
torch_dtype=torch.bfloat16,
|
||||
).to(DEVICE).eval()
|
||||
else:
|
||||
# CPU mode - use float32 for compatibility
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_NAME,
|
||||
trust_remote_code=True,
|
||||
torch_dtype=torch.float32,
|
||||
low_cpu_mem_usage=True,
|
||||
).eval()
|
||||
|
||||
logger.info("PaddleOCR-VL model loaded successfully")
|
||||
|
||||
|
||||
def optimize_image_resolution(image: Image.Image, max_size: int = 2048, min_size: int = 1080) -> Image.Image:
|
||||
"""
|
||||
Optimize image resolution for PaddleOCR-VL.
|
||||
|
||||
Best results are achieved with images in the 1080p-2K range.
|
||||
- Images larger than max_size are scaled down
|
||||
- Very small images are scaled up to min_size
|
||||
"""
|
||||
width, height = image.size
|
||||
max_dim = max(width, height)
|
||||
min_dim = min(width, height)
|
||||
|
||||
# Scale down if too large (4K+ images often miss text)
|
||||
if max_dim > max_size:
|
||||
scale = max_size / max_dim
|
||||
new_width = int(width * scale)
|
||||
new_height = int(height * scale)
|
||||
logger.info(f"Scaling down image from {width}x{height} to {new_width}x{new_height}")
|
||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
# Scale up if too small
|
||||
elif max_dim < min_size and min_dim < min_size:
|
||||
scale = min_size / max_dim
|
||||
new_width = int(width * scale)
|
||||
new_height = int(height * scale)
|
||||
logger.info(f"Scaling up image from {width}x{height} to {new_width}x{new_height}")
|
||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
else:
|
||||
logger.info(f"Image size {width}x{height} is optimal, no scaling needed")
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def decode_image(image_source: str, optimize: bool = True) -> Image.Image:
|
||||
"""
|
||||
Decode image from various sources.
|
||||
|
||||
Supported formats:
|
||||
- Base64 data URL: data:image/png;base64,... or data:image/jpeg;base64,...
|
||||
- HTTP/HTTPS URL: https://example.com/image.png
|
||||
- Raw base64 string
|
||||
- Local file path
|
||||
|
||||
Supported image types: PNG, JPEG, WebP, BMP, GIF, TIFF
|
||||
"""
|
||||
image = None
|
||||
|
||||
if image_source.startswith("data:"):
|
||||
# Base64 encoded image with MIME type header
|
||||
# Supports: data:image/png;base64,... data:image/jpeg;base64,... etc.
|
||||
header, data = image_source.split(",", 1)
|
||||
image_data = base64.b64decode(data)
|
||||
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
||||
logger.debug(f"Decoded base64 image with header: {header}")
|
||||
elif image_source.startswith("http://") or image_source.startswith("https://"):
|
||||
# URL - fetch image
|
||||
import httpx
|
||||
response = httpx.get(image_source, timeout=30.0)
|
||||
response.raise_for_status()
|
||||
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
||||
logger.debug(f"Fetched image from URL: {image_source[:50]}...")
|
||||
else:
|
||||
# Assume it's a file path or raw base64
|
||||
try:
|
||||
image_data = base64.b64decode(image_source)
|
||||
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
||||
logger.debug("Decoded raw base64 image")
|
||||
except:
|
||||
# Try as file path
|
||||
image = Image.open(image_source).convert("RGB")
|
||||
logger.debug(f"Loaded image from file: {image_source}")
|
||||
|
||||
# Optimize resolution for best OCR results
|
||||
if optimize:
|
||||
image = optimize_image_resolution(image)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def extract_image_and_text(content: Union[str, List[ContentItem]]) -> tuple:
|
||||
"""Extract image and text prompt from message content"""
|
||||
if isinstance(content, str):
|
||||
return None, content
|
||||
|
||||
image = None
|
||||
text = ""
|
||||
|
||||
for item in content:
|
||||
if item.type == "image_url" and item.image_url:
|
||||
image = decode_image(item.image_url.url)
|
||||
elif item.type == "text" and item.text:
|
||||
text = item.text
|
||||
|
||||
return image, text
|
||||
|
||||
|
||||
def generate_response(image: Image.Image, prompt: str, max_tokens: int = 4096) -> str:
|
||||
"""Generate response using PaddleOCR-VL"""
|
||||
load_model()
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "image": image},
|
||||
{"type": "text", "text": prompt},
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
inputs = processor.apply_chat_template(
|
||||
messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt"
|
||||
)
|
||||
|
||||
if DEVICE == "cuda":
|
||||
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
||||
|
||||
with torch.inference_mode():
|
||||
outputs = model.generate(
|
||||
**inputs,
|
||||
max_new_tokens=max_tokens,
|
||||
do_sample=False,
|
||||
use_cache=True
|
||||
)
|
||||
|
||||
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
||||
|
||||
# Extract the assistant's response (after the prompt)
|
||||
if "assistant" in response.lower():
|
||||
parts = response.split("assistant")
|
||||
if len(parts) > 1:
|
||||
response = parts[-1].strip()
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
"""Pre-load the model on startup"""
|
||||
logger.info("Pre-loading PaddleOCR-VL model...")
|
||||
try:
|
||||
load_model()
|
||||
logger.info("Model pre-loaded successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to pre-load model: {e}")
|
||||
# Don't fail startup - model will be loaded on first request
|
||||
|
||||
|
||||
@app.get("/health", response_model=HealthResponse)
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return HealthResponse(
|
||||
status="healthy" if model is not None else "loading",
|
||||
model=MODEL_NAME,
|
||||
device=DEVICE
|
||||
)
|
||||
|
||||
|
||||
@app.get("/formats")
|
||||
async def supported_formats():
|
||||
"""List supported image formats and input methods"""
|
||||
return {
|
||||
"image_formats": {
|
||||
"supported": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
|
||||
"recommended": ["PNG", "JPEG"],
|
||||
"mime_types": [
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/webp",
|
||||
"image/bmp",
|
||||
"image/gif",
|
||||
"image/tiff"
|
||||
]
|
||||
},
|
||||
"input_methods": {
|
||||
"base64_data_url": {
|
||||
"description": "Base64 encoded image with MIME type header",
|
||||
"example": "data:image/png;base64,iVBORw0KGgo..."
|
||||
},
|
||||
"http_url": {
|
||||
"description": "Direct HTTP/HTTPS URL to image",
|
||||
"example": "https://example.com/image.png"
|
||||
},
|
||||
"raw_base64": {
|
||||
"description": "Raw base64 string without header",
|
||||
"example": "iVBORw0KGgo..."
|
||||
}
|
||||
},
|
||||
"resolution": {
|
||||
"optimal_range": "1080p to 2K (1080-2048 pixels on longest side)",
|
||||
"auto_scaling": True,
|
||||
"note": "Images are automatically scaled to optimal range. 4K+ images are scaled down for better accuracy."
|
||||
},
|
||||
"task_prompts": TASK_PROMPTS
|
||||
}
|
||||
|
||||
|
||||
@app.get("/v1/models")
|
||||
async def list_models():
|
||||
"""List available models (OpenAI-compatible)"""
|
||||
return {
|
||||
"object": "list",
|
||||
"data": [
|
||||
{
|
||||
"id": "paddleocr-vl",
|
||||
"object": "model",
|
||||
"created": int(time.time()),
|
||||
"owned_by": "paddlepaddle"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
|
||||
async def chat_completions(request: ChatCompletionRequest):
|
||||
"""
|
||||
OpenAI-compatible chat completions endpoint for PaddleOCR-VL
|
||||
|
||||
Supports tasks:
|
||||
- "OCR:" - Text recognition
|
||||
- "Table Recognition:" - Table extraction
|
||||
- "Formula Recognition:" - Formula extraction
|
||||
- "Chart Recognition:" - Chart extraction
|
||||
"""
|
||||
try:
|
||||
# Get the last user message
|
||||
user_message = None
|
||||
for msg in reversed(request.messages):
|
||||
if msg.role == "user":
|
||||
user_message = msg
|
||||
break
|
||||
|
||||
if not user_message:
|
||||
raise HTTPException(status_code=400, detail="No user message found")
|
||||
|
||||
# Extract image and prompt
|
||||
image, prompt = extract_image_and_text(user_message.content)
|
||||
|
||||
if image is None:
|
||||
raise HTTPException(status_code=400, detail="No image provided in message")
|
||||
|
||||
# Default to OCR if no specific prompt
|
||||
if not prompt or prompt.strip() == "":
|
||||
prompt = "OCR:"
|
||||
|
||||
logger.info(f"Processing request with prompt: {prompt[:50]}...")
|
||||
|
||||
# Generate response
|
||||
start_time = time.time()
|
||||
response_text = generate_response(image, prompt, request.max_tokens or 4096)
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
logger.info(f"Generated response in {elapsed:.2f}s ({len(response_text)} chars)")
|
||||
|
||||
# Build OpenAI-compatible response
|
||||
return ChatCompletionResponse(
|
||||
id=f"chatcmpl-{int(time.time()*1000)}",
|
||||
created=int(time.time()),
|
||||
model=request.model,
|
||||
choices=[
|
||||
Choice(
|
||||
index=0,
|
||||
message=Message(role="assistant", content=response_text),
|
||||
finish_reason="stop"
|
||||
)
|
||||
],
|
||||
usage=Usage(
|
||||
prompt_tokens=100, # Approximate
|
||||
completion_tokens=len(response_text) // 4,
|
||||
total_tokens=100 + len(response_text) // 4
|
||||
)
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing request: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# Legacy endpoint for compatibility with old PaddleOCR API
|
||||
class LegacyOCRRequest(BaseModel):
|
||||
image: str
|
||||
task: Optional[str] = "ocr"
|
||||
|
||||
|
||||
class LegacyOCRResponse(BaseModel):
|
||||
success: bool
|
||||
result: str
|
||||
task: str
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
@app.post("/ocr", response_model=LegacyOCRResponse)
|
||||
async def legacy_ocr(request: LegacyOCRRequest):
|
||||
"""
|
||||
Legacy OCR endpoint for backwards compatibility
|
||||
|
||||
Tasks: ocr, table, formula, chart
|
||||
"""
|
||||
try:
|
||||
image = decode_image(request.image)
|
||||
prompt = TASK_PROMPTS.get(request.task, TASK_PROMPTS["ocr"])
|
||||
|
||||
result = generate_response(image, prompt)
|
||||
|
||||
return LegacyOCRResponse(
|
||||
success=True,
|
||||
result=result,
|
||||
task=request.task
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Legacy OCR error: {e}")
|
||||
return LegacyOCRResponse(
|
||||
success=False,
|
||||
result="",
|
||||
task=request.task,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@host.today/ht-docker-ai",
|
||||
"version": "1.8.0",
|
||||
"version": "1.15.0",
|
||||
"type": "module",
|
||||
"private": false,
|
||||
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||
@@ -13,8 +13,10 @@
|
||||
"test": "tstest test/ --verbose"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tsrun": "^1.3.3",
|
||||
"@git.zone/tstest": "^1.0.90"
|
||||
"@git.zone/tsrun": "^2.0.1",
|
||||
"@git.zone/tstest": "^3.1.5",
|
||||
"@push.rocks/smartagent": "^1.3.0",
|
||||
"@push.rocks/smartai": "^0.12.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
2013
pnpm-lock.yaml
generated
2013
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
209
readme.hints.md
209
readme.hints.md
@@ -2,12 +2,18 @@
|
||||
|
||||
## Architecture
|
||||
|
||||
This project uses **Ollama** as the runtime framework for serving AI models. This provides:
|
||||
This project uses **Ollama** and **vLLM** as runtime frameworks for serving AI models:
|
||||
|
||||
### Ollama-based Images (MiniCPM-V, Qwen3-VL)
|
||||
- Automatic model download and caching
|
||||
- Unified REST API (compatible with OpenAI format)
|
||||
- Built-in quantization support
|
||||
- GPU/CPU auto-detection
|
||||
- GPU auto-detection
|
||||
|
||||
### vLLM-based Images (Nanonets-OCR)
|
||||
- High-performance inference server
|
||||
- OpenAI-compatible API
|
||||
- Optimized for VLM workloads
|
||||
|
||||
## Model Details
|
||||
|
||||
@@ -24,18 +30,24 @@ This project uses **Ollama** as the runtime framework for serving AI models. Thi
|
||||
|------|---------------|
|
||||
| Full precision (bf16) | 18GB |
|
||||
| int4 quantized | 9GB |
|
||||
| GGUF (CPU) | 8GB RAM |
|
||||
|
||||
## Container Startup Flow
|
||||
|
||||
### Ollama-based containers
|
||||
1. `docker-entrypoint.sh` starts Ollama server in background
|
||||
2. Waits for server to be ready
|
||||
3. Checks if model already exists in volume
|
||||
4. Pulls model if not present
|
||||
5. Keeps container running
|
||||
|
||||
### vLLM-based containers
|
||||
1. vLLM server starts with model auto-download
|
||||
2. Health check endpoint available at `/health`
|
||||
3. OpenAI-compatible API at `/v1/chat/completions`
|
||||
|
||||
## Volume Persistence
|
||||
|
||||
### Ollama volumes
|
||||
Mount `/root/.ollama` to persist downloaded models:
|
||||
|
||||
```bash
|
||||
@@ -44,9 +56,16 @@ Mount `/root/.ollama` to persist downloaded models:
|
||||
|
||||
Without this volume, the model will be re-downloaded on each container start (~5GB download).
|
||||
|
||||
### vLLM/HuggingFace volumes
|
||||
Mount `/root/.cache/huggingface` for model caching:
|
||||
|
||||
```bash
|
||||
-v hf-cache:/root/.cache/huggingface
|
||||
```
|
||||
|
||||
## API Endpoints
|
||||
|
||||
All endpoints follow the Ollama API specification:
|
||||
### Ollama API (MiniCPM-V, Qwen3-VL)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
@@ -56,113 +75,137 @@ All endpoints follow the Ollama API specification:
|
||||
| `/api/pull` | POST | Pull a model |
|
||||
| `/api/show` | POST | Show model info |
|
||||
|
||||
## GPU Detection
|
||||
### vLLM API (Nanonets-OCR)
|
||||
|
||||
The GPU variant uses Ollama's automatic GPU detection. For CPU-only mode, we set:
|
||||
|
||||
```dockerfile
|
||||
ENV CUDA_VISIBLE_DEVICES=""
|
||||
```
|
||||
|
||||
This forces Ollama to use CPU inference even if GPU is available.
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
|
||||
## Health Checks
|
||||
|
||||
Both variants include Docker health checks:
|
||||
All containers include Docker health checks:
|
||||
|
||||
```dockerfile
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD curl -f http://localhost:11434/api/tags || exit 1
|
||||
```
|
||||
|
||||
CPU variant has longer `start-period` (120s) due to slower startup.
|
||||
---
|
||||
|
||||
## PaddleOCR-VL (Recommended)
|
||||
## Nanonets-OCR-s
|
||||
|
||||
### Overview
|
||||
|
||||
PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized for document parsing. It replaces the older PP-Structure approach with native VLM understanding.
|
||||
Nanonets-OCR-s is a Qwen2.5-VL-3B model fine-tuned specifically for document OCR tasks. It outputs structured markdown with semantic tags.
|
||||
|
||||
**Key advantages over PP-Structure:**
|
||||
- Native table understanding (no HTML parsing needed)
|
||||
- 109 language support
|
||||
- Better handling of complex multi-row tables
|
||||
- Structured Markdown/JSON output
|
||||
**Key features:**
|
||||
- Based on Qwen2.5-VL-3B (~4B parameters)
|
||||
- Fine-tuned for document OCR
|
||||
- Outputs markdown with semantic HTML tags
|
||||
- ~10GB VRAM
|
||||
|
||||
### Docker Images
|
||||
|
||||
| Tag | Description |
|
||||
|-----|-------------|
|
||||
| `paddleocr-vl` | GPU variant using vLLM (recommended) |
|
||||
| `paddleocr-vl-cpu` | CPU variant using transformers |
|
||||
| `nanonets-ocr` | GPU variant using vLLM (OpenAI-compatible API) |
|
||||
|
||||
### API Endpoints (OpenAI-compatible)
|
||||
### API Endpoints (OpenAI-compatible via vLLM)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check with model info |
|
||||
| `/health` | GET | Health check |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
| `/ocr` | POST | Legacy OCR endpoint |
|
||||
|
||||
### Request/Response Format
|
||||
|
||||
**POST /v1/chat/completions (OpenAI-compatible)**
|
||||
```json
|
||||
{
|
||||
"model": "paddleocr-vl",
|
||||
"model": "nanonets/Nanonets-OCR-s",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
|
||||
{"type": "text", "text": "Table Recognition:"}
|
||||
{"type": "text", "text": "Extract the text from the above document..."}
|
||||
]
|
||||
}
|
||||
],
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 8192
|
||||
"max_tokens": 4096
|
||||
}
|
||||
```
|
||||
|
||||
**Task Prompts:**
|
||||
- `"OCR:"` - Text recognition
|
||||
- `"Table Recognition:"` - Table extraction (returns markdown)
|
||||
- `"Formula Recognition:"` - Formula extraction
|
||||
- `"Chart Recognition:"` - Chart extraction
|
||||
### Nanonets OCR Prompt
|
||||
|
||||
**Response**
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-...",
|
||||
"object": "chat.completion",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "| Date | Description | Amount |\n|---|---|---|\n| 2021-06-01 | GITLAB INC | -119.96 |"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
]
|
||||
}
|
||||
The model is designed to work with a specific prompt format:
|
||||
```
|
||||
Extract the text from the above document as if you were reading it naturally.
|
||||
Return the tables in html format.
|
||||
Return the equations in LaTeX representation.
|
||||
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | Model to load |
|
||||
| `HOST` | `0.0.0.0` | Server host |
|
||||
| `PORT` | `8000` | Server port |
|
||||
| `MAX_BATCHED_TOKENS` | `16384` | vLLM max batch tokens |
|
||||
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
||||
|
||||
### Performance
|
||||
|
||||
- **GPU (vLLM)**: ~2-5 seconds per page
|
||||
- **CPU**: ~30-60 seconds per page
|
||||
- **GPU (vLLM)**: ~3-8 seconds per page
|
||||
- **VRAM usage**: ~10GB
|
||||
|
||||
### Two-Stage Pipeline (Nanonets + Qwen3)
|
||||
|
||||
The Nanonets tests use a two-stage pipeline:
|
||||
1. **Stage 1**: Nanonets-OCR-s converts images to markdown (via vLLM on port 8000)
|
||||
2. **Stage 2**: Qwen3 8B extracts structured JSON from markdown (via Ollama on port 11434)
|
||||
|
||||
**GPU Limitation**: Both vLLM and Ollama require significant GPU memory. On a single GPU system:
|
||||
- Running both simultaneously causes memory contention
|
||||
- For single GPU: Run services sequentially (stop Nanonets before Qwen3)
|
||||
- For multi-GPU: Assign each service to a different GPU
|
||||
|
||||
**Sequential Execution**:
|
||||
```bash
|
||||
# Step 1: Run Nanonets OCR (converts to markdown)
|
||||
docker start nanonets-test
|
||||
# ... perform OCR ...
|
||||
docker stop nanonets-test
|
||||
|
||||
# Step 2: Run Qwen3 extraction (from markdown)
|
||||
docker start minicpm-test
|
||||
# ... extract JSON ...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-Pass Extraction Strategy
|
||||
|
||||
The bank statement extraction uses a dual-VLM consensus approach:
|
||||
|
||||
### Architecture: Dual-VLM Consensus
|
||||
|
||||
| VLM | Model | Purpose |
|
||||
|-----|-------|---------|
|
||||
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
||||
| **Nanonets-OCR-s** | ~4B params | Document OCR with semantic output |
|
||||
|
||||
### Extraction Strategy
|
||||
|
||||
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2**: Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
||||
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
||||
|
||||
### Why Dual-VLM Works
|
||||
|
||||
- **Different architectures**: Two independent models cross-check each other
|
||||
- **Specialized strengths**: Nanonets-OCR-s optimized for document structure, MiniCPM-V for general vision
|
||||
- **No structure loss**: Both VLMs see the original images directly
|
||||
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
||||
|
||||
---
|
||||
|
||||
@@ -170,7 +213,7 @@ PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized fo
|
||||
|
||||
To add a new model variant:
|
||||
|
||||
1. Create `Dockerfile_<modelname>`
|
||||
1. Create `Dockerfile_<modelname>_<runtime>_<hardware>_VRAM<size>`
|
||||
2. Set `MODEL_NAME` environment variable
|
||||
3. Update `build-images.sh` with new build target
|
||||
4. Add documentation to `readme.md`
|
||||
@@ -188,8 +231,8 @@ The model download is ~5GB and may take several minutes.
|
||||
|
||||
### Out of memory
|
||||
|
||||
- GPU: Use int4 quantized version or add more VRAM
|
||||
- CPU: Increase container memory limit: `--memory=16g`
|
||||
- GPU: Use a lighter model variant or upgrade VRAM
|
||||
- Add more GPU memory: Consider multi-GPU setup
|
||||
|
||||
### API not responding
|
||||
|
||||
@@ -207,41 +250,6 @@ npmci docker build
|
||||
npmci docker push code.foss.global
|
||||
```
|
||||
|
||||
## Multi-Pass Extraction Strategy
|
||||
|
||||
The bank statement extraction uses a dual-VLM consensus approach:
|
||||
|
||||
### Architecture: Dual-VLM Consensus
|
||||
|
||||
| VLM | Model | Purpose |
|
||||
|-----|-------|---------|
|
||||
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
|
||||
| **PaddleOCR-VL** | 0.9B params | Table-specialized extraction |
|
||||
|
||||
### Extraction Strategy
|
||||
|
||||
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2**: PaddleOCR-VL table recognition (images → markdown → JSON)
|
||||
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
|
||||
4. **Pass 3+**: MiniCPM-V visual if no consensus
|
||||
|
||||
### Why Dual-VLM Works
|
||||
|
||||
- **Different architectures**: Two independent models cross-check each other
|
||||
- **Specialized strengths**: PaddleOCR-VL optimized for tables, MiniCPM-V for general vision
|
||||
- **No structure loss**: Both VLMs see the original images directly
|
||||
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
|
||||
|
||||
### Comparison vs Old PP-Structure Approach
|
||||
|
||||
| Approach | Bank Statement Result | Issue |
|
||||
|----------|----------------------|-------|
|
||||
| MiniCPM-V Visual | 28 transactions ✓ | - |
|
||||
| PP-Structure HTML + Visual | 13 transactions ✗ | HTML merged rows incorrectly |
|
||||
| PaddleOCR-VL Table | 28 transactions ✓ | Native table understanding |
|
||||
|
||||
**Key insight**: PP-Structure's HTML output loses structure for complex tables. PaddleOCR-VL's native VLM approach maintains table integrity.
|
||||
|
||||
---
|
||||
|
||||
## Related Resources
|
||||
@@ -249,3 +257,4 @@ The bank statement extraction uses a dual-VLM consensus approach:
|
||||
- [Ollama Documentation](https://ollama.ai/docs)
|
||||
- [MiniCPM-V GitHub](https://github.com/OpenBMB/MiniCPM-V)
|
||||
- [Ollama API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
||||
- [Nanonets-OCR-s on HuggingFace](https://huggingface.co/nanonets/Nanonets-OCR-s)
|
||||
|
||||
268
readme.md
268
readme.md
@@ -1,40 +1,52 @@
|
||||
# @host.today/ht-docker-ai 🚀
|
||||
|
||||
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
|
||||
Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration—**no cloud API keys required**.
|
||||
|
||||
> 🔥 **Three VLMs, one registry.** From high-performance document OCR to GPT-4o-level vision understanding—pick the right tool for your task.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 What's Included
|
||||
|
||||
| Model | Parameters | Best For | API |
|
||||
|-------|-----------|----------|-----|
|
||||
| **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
|
||||
| **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
|
||||
| Model | Parameters | Best For | API | Port | VRAM |
|
||||
|-------|-----------|----------|-----|------|------|
|
||||
| **MiniCPM-V 4.5** | 8B | General vision understanding, multi-image analysis | Ollama-compatible | 11434 | ~9GB |
|
||||
| **Nanonets-OCR2-3B** | ~3B | Document OCR with semantic markdown, LaTeX, flowcharts | OpenAI-compatible | 8000 | ~12-16GB |
|
||||
| **Qwen3-VL-30B** | 30B (A3B) | Advanced visual agents, code generation from images | Ollama-compatible | 11434 | ~20GB |
|
||||
|
||||
## 📦 Available Images
|
||||
---
|
||||
|
||||
## 📦 Quick Reference: All Available Images
|
||||
|
||||
```
|
||||
code.foss.global/host.today/ht-docker-ai:<tag>
|
||||
```
|
||||
|
||||
| Tag | Model | Hardware | Port |
|
||||
|-----|-------|----------|------|
|
||||
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
|
||||
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
|
||||
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
|
||||
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
|
||||
| Tag | Model | Runtime | Port | VRAM |
|
||||
|-----|-------|---------|------|------|
|
||||
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | Ollama | 11434 | ~9GB |
|
||||
| `nanonets-ocr` | Nanonets-OCR2-3B | vLLM | 8000 | ~12-16GB |
|
||||
| `qwen3vl` | Qwen3-VL-30B-A3B | Ollama | 11434 | ~20GB |
|
||||
|
||||
---
|
||||
|
||||
## 🖼️ MiniCPM-V 4.5
|
||||
|
||||
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
|
||||
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across **30+ languages**.
|
||||
|
||||
### ✨ Key Features
|
||||
|
||||
- 🌍 **Multilingual:** 30+ languages supported
|
||||
- 🖼️ **Multi-image:** Analyze multiple images in one request
|
||||
- 📊 **Versatile:** Charts, documents, photos, diagrams
|
||||
- ⚡ **Efficient:** Runs on consumer GPUs (9GB VRAM)
|
||||
|
||||
### Quick Start
|
||||
|
||||
**GPU (Recommended):**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name minicpm \
|
||||
@@ -44,15 +56,6 @@ docker run -d \
|
||||
code.foss.global/host.today/ht-docker-ai:minicpm45v
|
||||
```
|
||||
|
||||
**CPU Only:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name minicpm \
|
||||
-p 11434:11434 \
|
||||
-v ollama-data:/root/.ollama \
|
||||
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
|
||||
```
|
||||
|
||||
> 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
|
||||
|
||||
### API Examples
|
||||
@@ -85,110 +88,131 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
| Variant | VRAM/RAM | Notes |
|
||||
|---------|----------|-------|
|
||||
| GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
|
||||
| GPU (full precision) | 18GB VRAM | Maximum quality |
|
||||
| CPU (GGUF) | 8GB+ RAM | Slower but accessible |
|
||||
| Mode | VRAM Required |
|
||||
|------|---------------|
|
||||
| int4 quantized | ~9GB |
|
||||
| Full precision (bf16) | ~18GB |
|
||||
|
||||
---
|
||||
|
||||
## 📄 PaddleOCR-VL
|
||||
## 🔍 Nanonets-OCR2-3B
|
||||
|
||||
A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
|
||||
The **latest Nanonets document OCR model** (October 2025 release)—based on Qwen2.5-VL-3B, fine-tuned specifically for document extraction with significant improvements over the original OCR-s.
|
||||
|
||||
### ✨ Key Features
|
||||
|
||||
- 📝 **Semantic output:** Tables → HTML, equations → LaTeX, flowcharts → structured markup
|
||||
- 🌍 **Multilingual:** Inherits Qwen's broad language support
|
||||
- 📄 **30K context:** Handle large, multi-page documents
|
||||
- 🔌 **OpenAI-compatible:** Drop-in replacement for existing pipelines
|
||||
- 🎯 **Improved accuracy:** Better semantic tagging and LaTeX equation extraction vs. OCR-s
|
||||
|
||||
### Quick Start
|
||||
|
||||
**GPU:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name paddleocr \
|
||||
--name nanonets \
|
||||
--gpus all \
|
||||
-p 8000:8000 \
|
||||
-v hf-cache:/root/.cache/huggingface \
|
||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
||||
code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||
```
|
||||
|
||||
**CPU:**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name paddleocr \
|
||||
-p 8000:8000 \
|
||||
-v hf-cache:/root/.cache/huggingface \
|
||||
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
|
||||
```
|
||||
|
||||
### OpenAI-Compatible API
|
||||
|
||||
PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
|
||||
### API Usage
|
||||
|
||||
```bash
|
||||
curl http://localhost:8000/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "paddleocr-vl",
|
||||
"model": "nanonets/Nanonets-OCR2-3B",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
|
||||
{"type": "text", "text": "Table Recognition:"}
|
||||
{"type": "text", "text": "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation."}
|
||||
]
|
||||
}],
|
||||
"max_tokens": 8192
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 4096
|
||||
}'
|
||||
```
|
||||
|
||||
### Task Prompts
|
||||
### Output Format
|
||||
|
||||
| Prompt | Output | Use Case |
|
||||
|--------|--------|----------|
|
||||
| `OCR:` | Plain text | General text extraction |
|
||||
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
|
||||
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
|
||||
| `Chart Recognition:` | Description | Graphs and visualizations |
|
||||
Nanonets-OCR2-3B returns markdown with semantic tags:
|
||||
|
||||
### API Endpoints
|
||||
| Element | Output Format |
|
||||
|---------|---------------|
|
||||
| Tables | `<table>...</table>` (HTML) |
|
||||
| Equations | `$...$` (LaTeX) |
|
||||
| Images | `<img>description</img>` |
|
||||
| Watermarks | `<watermark>OFFICIAL COPY</watermark>` |
|
||||
| Page numbers | `<page_number>14</page_number>` |
|
||||
| Flowcharts | Structured markup |
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/health` | GET | Health check with model/device info |
|
||||
| `/formats` | GET | Supported image formats and input methods |
|
||||
| `/v1/models` | GET | List available models |
|
||||
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
|
||||
| `/ocr` | POST | Legacy OCR endpoint |
|
||||
### Hardware Requirements
|
||||
|
||||
### Image Input Methods
|
||||
| Config | VRAM |
|
||||
|--------|------|
|
||||
| 30K context (default) | ~12-16GB |
|
||||
| Speed | ~3-8 seconds per page |
|
||||
|
||||
PaddleOCR-VL accepts images in multiple formats:
|
||||
---
|
||||
|
||||
```javascript
|
||||
// Base64 data URL
|
||||
"data:image/png;base64,iVBORw0KGgo..."
|
||||
## 🧠 Qwen3-VL-30B-A3B
|
||||
|
||||
// HTTP URL
|
||||
"https://example.com/document.png"
|
||||
The **most powerful** Qwen vision model—30B parameters with 3B active (MoE architecture). Handles complex visual reasoning, code generation from screenshots, and visual agent capabilities.
|
||||
|
||||
// Raw base64
|
||||
"iVBORw0KGgo..."
|
||||
### ✨ Key Features
|
||||
|
||||
- 🚀 **256K context** (expandable to 1M tokens!)
|
||||
- 🤖 **Visual agent capabilities** — can plan and execute multi-step tasks
|
||||
- 💻 **Code generation from images** — screenshot → working code
|
||||
- 🎯 **State-of-the-art** visual reasoning
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name qwen3vl \
|
||||
--gpus all \
|
||||
-p 11434:11434 \
|
||||
-v ollama-data:/root/.ollama \
|
||||
code.foss.global/host.today/ht-docker-ai:qwen3vl
|
||||
```
|
||||
|
||||
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
|
||||
Then pull the model (one-time, ~20GB):
|
||||
```bash
|
||||
docker exec qwen3vl ollama pull qwen3-vl:30b-a3b
|
||||
```
|
||||
|
||||
**Optimal resolution:** 1080p–2K. Images are automatically scaled for best results.
|
||||
### API Usage
|
||||
|
||||
### Performance
|
||||
```bash
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "qwen3-vl:30b-a3b",
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": "Analyze this screenshot and write the code to recreate this UI",
|
||||
"images": ["<base64-encoded-image>"]
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
| Mode | Speed per Page |
|
||||
|------|----------------|
|
||||
| GPU (CUDA) | 2–5 seconds |
|
||||
| CPU | 30–60 seconds |
|
||||
### Hardware Requirements
|
||||
|
||||
| Requirement | Value |
|
||||
|-------------|-------|
|
||||
| VRAM | ~20GB (Q4_K_M quantization) |
|
||||
| Context | 256K tokens default |
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Docker Compose
|
||||
|
||||
Run multiple VLMs together for maximum flexibility:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
# General vision tasks
|
||||
minicpm:
|
||||
@@ -206,9 +230,9 @@ services:
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
||||
|
||||
# Document parsing / OCR
|
||||
paddleocr:
|
||||
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
|
||||
# Document OCR with semantic output
|
||||
nanonets:
|
||||
image: code.foss.global/host.today/ht-docker-ai:nanonets-ocr
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
@@ -231,7 +255,7 @@ volumes:
|
||||
|
||||
## ⚙️ Environment Variables
|
||||
|
||||
### MiniCPM-V 4.5
|
||||
### MiniCPM-V 4.5 & Qwen3-VL (Ollama-based)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
@@ -239,13 +263,47 @@ volumes:
|
||||
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
|
||||
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
|
||||
|
||||
### PaddleOCR-VL
|
||||
### Nanonets-OCR (vLLM-based)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
|
||||
| `SERVER_HOST` | `0.0.0.0` | API bind address |
|
||||
| `SERVER_PORT` | `8000` | API port |
|
||||
| `MODEL_NAME` | `nanonets/Nanonets-OCR2-3B` | HuggingFace model ID |
|
||||
| `HOST` | `0.0.0.0` | API bind address |
|
||||
| `PORT` | `8000` | API port |
|
||||
| `MAX_MODEL_LEN` | `30000` | Maximum sequence length |
|
||||
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture Notes
|
||||
|
||||
### Dual-VLM Consensus Strategy
|
||||
|
||||
For production document extraction, consider using multiple models together:
|
||||
|
||||
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2:** Nanonets-OCR semantic extraction (images → markdown → JSON)
|
||||
3. **Consensus:** If results match → Done (fast path)
|
||||
4. **Pass 3+:** Additional visual passes if needed
|
||||
|
||||
This dual-VLM approach catches extraction errors that single models miss.
|
||||
|
||||
### Why Multi-Model Works
|
||||
|
||||
- **Different architectures:** Independent models cross-validate each other
|
||||
- **Specialized strengths:** Nanonets-OCR2-3B excels at document structure; MiniCPM-V handles general vision
|
||||
- **Native processing:** All VLMs see original images—no intermediate structure loss
|
||||
|
||||
### Model Selection Guide
|
||||
|
||||
| Task | Recommended Model |
|
||||
|------|-------------------|
|
||||
| General image understanding | MiniCPM-V 4.5 |
|
||||
| Document OCR with structure preservation | Nanonets-OCR2-3B |
|
||||
| Complex visual reasoning / code generation | Qwen3-VL-30B |
|
||||
| Multi-image analysis | MiniCPM-V 4.5 |
|
||||
| Visual agent tasks | Qwen3-VL-30B |
|
||||
| Large documents (30K+ tokens) | Nanonets-OCR2-3B |
|
||||
|
||||
---
|
||||
|
||||
@@ -260,42 +318,21 @@ cd ht-docker-ai
|
||||
./build-images.sh
|
||||
|
||||
# Run tests
|
||||
./test-images.sh
|
||||
pnpm test
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture Notes
|
||||
|
||||
### Dual-VLM Consensus Strategy
|
||||
|
||||
For production document extraction, consider using both models together:
|
||||
|
||||
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
|
||||
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
|
||||
3. **Consensus:** If results match → Done (fast path)
|
||||
4. **Pass 3+:** Additional visual passes if needed
|
||||
|
||||
This dual-VLM approach catches extraction errors that single models miss.
|
||||
|
||||
### Why This Works
|
||||
|
||||
- **Different architectures:** Two independent models cross-validate each other
|
||||
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
|
||||
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Troubleshooting
|
||||
|
||||
### Model download hangs
|
||||
```bash
|
||||
docker logs -f <container-name>
|
||||
```
|
||||
Model downloads can take several minutes (~5GB for MiniCPM-V).
|
||||
Model downloads can take several minutes (~5GB for MiniCPM-V, ~20GB for Qwen3-VL).
|
||||
|
||||
### Out of memory
|
||||
- **GPU:** Use the CPU variant or upgrade VRAM
|
||||
- **GPU:** Use a lighter model variant or upgrade VRAM
|
||||
- **CPU:** Increase container memory: `--memory=16g`
|
||||
|
||||
### API not responding
|
||||
@@ -315,6 +352,13 @@ sudo nvidia-ctk runtime configure --runtime=docker
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
### GPU Memory Contention (Multi-Model)
|
||||
|
||||
When running multiple VLMs on a single GPU:
|
||||
- vLLM and Ollama both need significant GPU memory
|
||||
- **Single GPU:** Run services sequentially (stop one before starting another)
|
||||
- **Multi-GPU:** Assign each service to a different GPU via `CUDA_VISIBLE_DEVICES`
|
||||
|
||||
---
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
@@ -2,11 +2,8 @@ import { execSync } from 'child_process';
|
||||
|
||||
// Project container names (only manage these)
|
||||
const PROJECT_CONTAINERS = [
|
||||
'paddleocr-vl-test',
|
||||
'paddleocr-vl-gpu-test',
|
||||
'paddleocr-vl-cpu-test',
|
||||
'paddleocr-vl-full-test',
|
||||
'minicpm-test',
|
||||
'nanonets-test',
|
||||
];
|
||||
|
||||
// Image configurations
|
||||
@@ -23,30 +20,6 @@ export interface IImageConfig {
|
||||
}
|
||||
|
||||
export const IMAGES = {
|
||||
paddleocrVlGpu: {
|
||||
name: 'paddleocr-vl-gpu',
|
||||
dockerfile: 'Dockerfile_paddleocr_vl_gpu',
|
||||
buildContext: '.',
|
||||
containerName: 'paddleocr-vl-test',
|
||||
ports: ['8000:8000'],
|
||||
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
||||
gpus: true,
|
||||
healthEndpoint: 'http://localhost:8000/health',
|
||||
healthTimeout: 300000, // 5 minutes for model loading
|
||||
} as IImageConfig,
|
||||
|
||||
paddleocrVlCpu: {
|
||||
name: 'paddleocr-vl-cpu',
|
||||
dockerfile: 'Dockerfile_paddleocr_vl_cpu',
|
||||
buildContext: '.',
|
||||
containerName: 'paddleocr-vl-test',
|
||||
ports: ['8000:8000'],
|
||||
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
||||
gpus: false,
|
||||
healthEndpoint: 'http://localhost:8000/health',
|
||||
healthTimeout: 300000,
|
||||
} as IImageConfig,
|
||||
|
||||
minicpm: {
|
||||
name: 'minicpm45v',
|
||||
dockerfile: 'Dockerfile_minicpm45v_gpu',
|
||||
@@ -59,20 +32,17 @@ export const IMAGES = {
|
||||
healthTimeout: 120000,
|
||||
} as IImageConfig,
|
||||
|
||||
// Full PaddleOCR-VL pipeline with PP-DocLayoutV2 + structured JSON output
|
||||
paddleocrVlFull: {
|
||||
name: 'paddleocr-vl-full',
|
||||
dockerfile: 'Dockerfile_paddleocr_vl_full',
|
||||
// Nanonets-OCR2-3B - Document OCR optimized VLM (Qwen2.5-VL-3B fine-tuned, Oct 2025)
|
||||
nanonetsOcr: {
|
||||
name: 'nanonets-ocr',
|
||||
dockerfile: 'Dockerfile_nanonets_vllm_gpu_VRAM10GB',
|
||||
buildContext: '.',
|
||||
containerName: 'paddleocr-vl-full-test',
|
||||
containerName: 'nanonets-test',
|
||||
ports: ['8000:8000'],
|
||||
volumes: [
|
||||
'ht-huggingface-cache:/root/.cache/huggingface',
|
||||
'ht-paddleocr-cache:/root/.paddleocr',
|
||||
],
|
||||
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
|
||||
gpus: true,
|
||||
healthEndpoint: 'http://localhost:8000/health',
|
||||
healthTimeout: 600000, // 10 minutes for model loading (vLLM + PP-DocLayoutV2)
|
||||
healthTimeout: 300000, // 5 minutes for model loading
|
||||
} as IImageConfig,
|
||||
};
|
||||
|
||||
@@ -126,7 +96,7 @@ export function removeContainer(containerName: string): void {
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all project containers that conflict with the required one
|
||||
* Stop all project containers that conflict with the required one (port-based)
|
||||
*/
|
||||
export function stopConflictingContainers(requiredContainer: string, requiredPort: string): void {
|
||||
// Stop project containers using the same port
|
||||
@@ -144,6 +114,24 @@ export function stopConflictingContainers(requiredContainer: string, requiredPor
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all GPU-consuming project containers (for GPU memory management)
|
||||
* This ensures GPU memory is freed before starting a new GPU service
|
||||
*/
|
||||
export function stopAllGpuContainers(exceptContainer?: string): void {
|
||||
for (const container of PROJECT_CONTAINERS) {
|
||||
if (container === exceptContainer) continue;
|
||||
|
||||
if (isContainerRunning(container)) {
|
||||
console.log(`[Docker] Stopping GPU container: ${container}`);
|
||||
exec(`docker stop ${container}`, true);
|
||||
// Give the GPU a moment to free memory
|
||||
}
|
||||
}
|
||||
// Brief pause to allow GPU memory to be released
|
||||
execSync('sleep 2');
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a Docker image
|
||||
*/
|
||||
@@ -220,6 +208,11 @@ export async function ensureService(config: IImageConfig): Promise<boolean> {
|
||||
buildImage(config);
|
||||
}
|
||||
|
||||
// For GPU services, stop ALL other GPU containers to free GPU memory
|
||||
if (config.gpus) {
|
||||
stopAllGpuContainers(config.containerName);
|
||||
}
|
||||
|
||||
// Stop conflicting containers on the same port
|
||||
const mainPort = config.ports[0];
|
||||
stopConflictingContainers(config.containerName, mainPort);
|
||||
@@ -240,21 +233,7 @@ export async function ensureService(config: IImageConfig): Promise<boolean> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure PaddleOCR-VL GPU service is running
|
||||
*/
|
||||
export async function ensurePaddleOcrVlGpu(): Promise<boolean> {
|
||||
return ensureService(IMAGES.paddleocrVlGpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure PaddleOCR-VL CPU service is running
|
||||
*/
|
||||
export async function ensurePaddleOcrVlCpu(): Promise<boolean> {
|
||||
return ensureService(IMAGES.paddleocrVlCpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure MiniCPM service is running
|
||||
* Ensure MiniCPM service is running (Ollama with GPU)
|
||||
*/
|
||||
export async function ensureMiniCpm(): Promise<boolean> {
|
||||
return ensureService(IMAGES.minicpm);
|
||||
@@ -272,30 +251,6 @@ export function isGpuAvailable(): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure PaddleOCR-VL service (auto-detect GPU/CPU)
|
||||
*/
|
||||
export async function ensurePaddleOcrVl(): Promise<boolean> {
|
||||
if (isGpuAvailable()) {
|
||||
console.log('[Docker] GPU detected, using GPU image');
|
||||
return ensurePaddleOcrVlGpu();
|
||||
} else {
|
||||
console.log('[Docker] No GPU detected, using CPU image');
|
||||
return ensurePaddleOcrVlCpu();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure PaddleOCR-VL Full Pipeline service (PP-DocLayoutV2 + structured output)
|
||||
* This is the recommended service for production use - outputs structured JSON/Markdown
|
||||
*/
|
||||
export async function ensurePaddleOcrVlFull(): Promise<boolean> {
|
||||
if (!isGpuAvailable()) {
|
||||
console.log('[Docker] WARNING: Full pipeline requires GPU, but none detected');
|
||||
}
|
||||
return ensureService(IMAGES.paddleocrVlFull);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure an Ollama model is pulled and available
|
||||
* Uses the MiniCPM container (which runs Ollama) to pull the model
|
||||
@@ -311,9 +266,8 @@ export async function ensureOllamaModel(modelName: string): Promise<boolean> {
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
const exists = models.some((m: { name: string }) =>
|
||||
m.name === modelName || m.name.startsWith(modelName.split(':')[0])
|
||||
);
|
||||
// Exact match required - don't match on prefix
|
||||
const exists = models.some((m: { name: string }) => m.name === modelName);
|
||||
|
||||
if (exists) {
|
||||
console.log(`[Ollama] Model already available: ${modelName}`);
|
||||
@@ -358,3 +312,40 @@ export async function ensureQwen25(): Promise<boolean> {
|
||||
// Then ensure the Qwen2.5 model is pulled
|
||||
return ensureOllamaModel('qwen2.5:7b');
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Ministral 3 8B model is available (for structured JSON extraction)
|
||||
* Ministral 3 has native JSON output support and OCR-style document extraction
|
||||
*/
|
||||
export async function ensureMinistral3(): Promise<boolean> {
|
||||
// First ensure the Ollama service (MiniCPM container) is running
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
if (!ollamaOk) return false;
|
||||
|
||||
// Then ensure the Ministral 3 8B model is pulled
|
||||
return ensureOllamaModel('ministral-3:8b');
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Qwen3-VL 8B model is available (vision-language model)
|
||||
* Q4_K_M quantization (~5GB) - fits in 15GB VRAM with room to spare
|
||||
*/
|
||||
export async function ensureQwen3Vl(): Promise<boolean> {
|
||||
// First ensure the Ollama service is running
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
if (!ollamaOk) return false;
|
||||
|
||||
// Then ensure Qwen3-VL 8B is pulled
|
||||
return ensureOllamaModel('qwen3-vl:8b');
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Nanonets-OCR2-3B service is running (via vLLM)
|
||||
* Document OCR optimized VLM based on Qwen2.5-VL-3B (Oct 2025 release)
|
||||
*/
|
||||
export async function ensureNanonetsOcr(): Promise<boolean> {
|
||||
if (!isGpuAvailable()) {
|
||||
console.log('[Docker] WARNING: Nanonets-OCR2-3B requires GPU, but none detected');
|
||||
}
|
||||
return ensureService(IMAGES.nanonetsOcr);
|
||||
}
|
||||
|
||||
@@ -1,549 +0,0 @@
|
||||
/**
|
||||
* Bank statement extraction test using MiniCPM-V (visual) + PaddleOCR-VL (table recognition)
|
||||
*
|
||||
* This is the combined/dual-VLM approach that uses both models for consensus:
|
||||
* - MiniCPM-V for visual extraction
|
||||
* - PaddleOCR-VL for table recognition
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensurePaddleOcrVl, ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
// Service URLs
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
||||
|
||||
// Models
|
||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
||||
const PADDLEOCR_VL_MODEL = 'paddleocr-vl';
|
||||
|
||||
// Prompt for MiniCPM-V visual extraction
|
||||
const MINICPM_EXTRACT_PROMPT = `/nothink
|
||||
You are a bank statement parser. Extract EVERY transaction from the table.
|
||||
|
||||
Read the Amount column carefully:
|
||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
||||
- European format: comma = decimal point
|
||||
|
||||
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
||||
|
||||
Do not skip any rows. Return ONLY the JSON array, no explanation.`;
|
||||
|
||||
// Prompt for PaddleOCR-VL table extraction
|
||||
const PADDLEOCR_VL_TABLE_PROMPT = `Table Recognition:`;
|
||||
|
||||
// Post-processing prompt to convert PaddleOCR-VL output to JSON
|
||||
const PADDLEOCR_VL_CONVERT_PROMPT = `/nothink
|
||||
Convert the following bank statement table data to JSON.
|
||||
|
||||
Read the Amount values carefully:
|
||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
||||
- European format: comma = decimal point
|
||||
|
||||
For each transaction output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
||||
|
||||
Return ONLY the JSON array, no explanation.
|
||||
|
||||
Table data:
|
||||
---
|
||||
{TABLE_DATA}
|
||||
---`;
|
||||
|
||||
interface ITransaction {
|
||||
date: string;
|
||||
counterparty: string;
|
||||
amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract using MiniCPM-V via Ollama
|
||||
*/
|
||||
async function extractWithMiniCPM(images: string[], passLabel: string): Promise<ITransaction[]> {
|
||||
const payload = {
|
||||
model: MINICPM_MODEL,
|
||||
prompt: MINICPM_EXTRACT_PROMPT,
|
||||
images,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 16384,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
let lineBuffer = '';
|
||||
|
||||
console.log(`[${passLabel}] Extracting with MiniCPM-V...`);
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
lineBuffer += json.response;
|
||||
|
||||
if (lineBuffer.includes('\n')) {
|
||||
const parts = lineBuffer.split('\n');
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
console.log(parts[i]);
|
||||
}
|
||||
lineBuffer = parts[parts.length - 1];
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lineBuffer) {
|
||||
console.log(lineBuffer);
|
||||
}
|
||||
console.log('');
|
||||
|
||||
const startIdx = fullText.indexOf('[');
|
||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error('No JSON array found in response');
|
||||
}
|
||||
|
||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract table using PaddleOCR-VL via OpenAI-compatible API
|
||||
*/
|
||||
async function extractTableWithPaddleOCRVL(imageBase64: string): Promise<string> {
|
||||
const payload = {
|
||||
model: PADDLEOCR_VL_MODEL,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/png;base64,${imageBase64}` },
|
||||
},
|
||||
{
|
||||
type: 'text',
|
||||
text: PADDLEOCR_VL_TABLE_PROMPT,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
temperature: 0.0,
|
||||
max_tokens: 8192,
|
||||
};
|
||||
|
||||
const response = await fetch(`${PADDLEOCR_VL_URL}/v1/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.choices?.[0]?.message?.content || '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PaddleOCR-VL table output to transactions using MiniCPM-V
|
||||
*/
|
||||
async function convertTableToTransactions(
|
||||
tableData: string,
|
||||
passLabel: string
|
||||
): Promise<ITransaction[]> {
|
||||
const prompt = PADDLEOCR_VL_CONVERT_PROMPT.replace('{TABLE_DATA}', tableData);
|
||||
|
||||
const payload = {
|
||||
model: MINICPM_MODEL,
|
||||
prompt,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 16384,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
|
||||
console.log(`[${passLabel}] Converting table data to JSON...`);
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const startIdx = fullText.indexOf('[');
|
||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error('No JSON array found in response');
|
||||
}
|
||||
|
||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract using PaddleOCR-VL (table recognition) + conversion
|
||||
*/
|
||||
async function extractWithPaddleOCRVL(
|
||||
images: string[],
|
||||
passLabel: string
|
||||
): Promise<ITransaction[]> {
|
||||
console.log(`[${passLabel}] Extracting tables with PaddleOCR-VL...`);
|
||||
|
||||
// Extract table data from each page
|
||||
const tableDataParts: string[] = [];
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
console.log(`[${passLabel}] Processing page ${i + 1}/${images.length}...`);
|
||||
const tableData = await extractTableWithPaddleOCRVL(images[i]);
|
||||
if (tableData.trim()) {
|
||||
tableDataParts.push(`--- Page ${i + 1} ---\n${tableData}`);
|
||||
}
|
||||
}
|
||||
|
||||
const combinedTableData = tableDataParts.join('\n\n');
|
||||
console.log(`[${passLabel}] Got ${combinedTableData.length} chars of table data`);
|
||||
|
||||
// Convert to transactions
|
||||
return convertTableToTransactions(combinedTableData, passLabel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hash of transactions for comparison
|
||||
*/
|
||||
function hashTransactions(transactions: ITransaction[]): string {
|
||||
return transactions
|
||||
.map((t) => `${t.date}|${t.amount.toFixed(2)}`)
|
||||
.sort()
|
||||
.join(';');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if PaddleOCR-VL service is available
|
||||
*/
|
||||
async function isPaddleOCRVLAvailable(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${PADDLEOCR_VL_URL}/health`, {
|
||||
method: 'GET',
|
||||
signal: AbortSignal.timeout(5000),
|
||||
});
|
||||
return response.ok;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with dual-VLM consensus
|
||||
* Strategy:
|
||||
* Pass 1 = MiniCPM-V visual extraction
|
||||
* Pass 2 = PaddleOCR-VL table recognition (if available)
|
||||
* Pass 3+ = MiniCPM-V visual (fallback)
|
||||
*/
|
||||
async function extractWithConsensus(
|
||||
images: string[],
|
||||
maxPasses: number = 5
|
||||
): Promise<ITransaction[]> {
|
||||
const results: Array<{ transactions: ITransaction[]; hash: string }> = [];
|
||||
const hashCounts: Map<string, number> = new Map();
|
||||
|
||||
const addResult = (transactions: ITransaction[], passLabel: string): number => {
|
||||
const hash = hashTransactions(transactions);
|
||||
results.push({ transactions, hash });
|
||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
||||
console.log(
|
||||
`[${passLabel}] Got ${transactions.length} transactions (hash: ${hash.substring(0, 20)}...)`
|
||||
);
|
||||
return hashCounts.get(hash)!;
|
||||
};
|
||||
|
||||
// Check if PaddleOCR-VL is available
|
||||
const paddleOCRVLAvailable = await isPaddleOCRVLAvailable();
|
||||
if (paddleOCRVLAvailable) {
|
||||
console.log('[Setup] PaddleOCR-VL service available - using dual-VLM consensus');
|
||||
} else {
|
||||
console.log('[Setup] PaddleOCR-VL not available - using MiniCPM-V only');
|
||||
}
|
||||
|
||||
// Pass 1: MiniCPM-V visual extraction
|
||||
try {
|
||||
const pass1Result = await extractWithMiniCPM(images, 'Pass 1 MiniCPM-V');
|
||||
addResult(pass1Result, 'Pass 1 MiniCPM-V');
|
||||
} catch (err) {
|
||||
console.log(`[Pass 1] Error: ${err}`);
|
||||
}
|
||||
|
||||
// Pass 2: PaddleOCR-VL table recognition (if available)
|
||||
if (paddleOCRVLAvailable) {
|
||||
try {
|
||||
const pass2Result = await extractWithPaddleOCRVL(images, 'Pass 2 PaddleOCR-VL');
|
||||
const count = addResult(pass2Result, 'Pass 2 PaddleOCR-VL');
|
||||
if (count >= 2) {
|
||||
console.log('[Consensus] MiniCPM-V and PaddleOCR-VL extractions match!');
|
||||
return pass2Result;
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(`[Pass 2 PaddleOCR-VL] Error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Pass 3+: Continue with MiniCPM-V visual passes
|
||||
const startPass = paddleOCRVLAvailable ? 3 : 2;
|
||||
for (let pass = startPass; pass <= maxPasses; pass++) {
|
||||
try {
|
||||
const transactions = await extractWithMiniCPM(images, `Pass ${pass} MiniCPM-V`);
|
||||
const count = addResult(transactions, `Pass ${pass} MiniCPM-V`);
|
||||
|
||||
if (count >= 2) {
|
||||
console.log(`[Consensus] Reached after ${pass} passes`);
|
||||
return transactions;
|
||||
}
|
||||
|
||||
console.log(`[Pass ${pass}] No consensus yet, trying again...`);
|
||||
} catch (err) {
|
||||
console.log(`[Pass ${pass}] Error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// No consensus reached - return the most common result
|
||||
let bestHash = '';
|
||||
let bestCount = 0;
|
||||
for (const [hash, count] of hashCounts) {
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
bestHash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bestHash) {
|
||||
throw new Error('No valid results obtained');
|
||||
}
|
||||
|
||||
const best = results.find((r) => r.hash === bestHash)!;
|
||||
console.log(`[No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
||||
return best.transactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted transactions against expected
|
||||
*/
|
||||
function compareTransactions(
|
||||
extracted: ITransaction[],
|
||||
expected: ITransaction[]
|
||||
): { matches: number; total: number; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
let matches = 0;
|
||||
|
||||
for (let i = 0; i < expected.length; i++) {
|
||||
const exp = expected[i];
|
||||
const ext = extracted[i];
|
||||
|
||||
if (!ext) {
|
||||
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const dateMatch = ext.date === exp.date;
|
||||
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
||||
|
||||
if (dateMatch && amountMatch) {
|
||||
matches++;
|
||||
} else {
|
||||
errors.push(
|
||||
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (extracted.length > expected.length) {
|
||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||
}
|
||||
|
||||
return { matches, total: expected.length, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases (PDF + JSON pairs) in .nogit/
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit');
|
||||
if (!fs.existsSync(testDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const pdfFiles = files.filter((f: string) => f.endsWith('.pdf'));
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of pdfFiles) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure PaddleOCR-VL is running (auto-detects GPU/CPU)
|
||||
const paddleOk = await ensurePaddleOcrVl();
|
||||
expect(paddleOk).toBeTrue();
|
||||
|
||||
// Ensure MiniCPM is running
|
||||
const minicpmOk = await ensureMiniCpm();
|
||||
expect(minicpmOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
const data = await response.json();
|
||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('should check PaddleOCR-VL availability', async () => {
|
||||
const available = await isPaddleOCRVLAvailable();
|
||||
console.log(`PaddleOCR-VL available: ${available}`);
|
||||
expect(available).toBeTrue();
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract transactions from ${testCase.name}`, async () => {
|
||||
// Load expected transactions
|
||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.length} transactions`);
|
||||
|
||||
// Convert PDF to images
|
||||
console.log('Converting PDF to images...');
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(`Converted: ${images.length} pages\n`);
|
||||
|
||||
// Extract with dual-VLM consensus
|
||||
const extracted = await extractWithConsensus(images);
|
||||
console.log(`\nFinal: ${extracted.length} transactions`);
|
||||
|
||||
// Compare results
|
||||
const result = compareTransactions(extracted, expected);
|
||||
console.log(`Accuracy: ${result.matches}/${result.total}`);
|
||||
|
||||
if (result.errors.length > 0) {
|
||||
console.log('Errors:');
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Assert high accuracy
|
||||
const accuracy = result.matches / result.total;
|
||||
expect(accuracy).toBeGreaterThan(0.95);
|
||||
expect(extracted.length).toEqual(expected.length);
|
||||
});
|
||||
}
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,8 +1,10 @@
|
||||
/**
|
||||
* Bank statement extraction test using MiniCPM-V only (visual extraction)
|
||||
* Bank statement extraction using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||
*
|
||||
* This tests MiniCPM-V's ability to extract bank transactions directly from images
|
||||
* without any OCR augmentation.
|
||||
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||
* 1. Process each page with the orchestrator
|
||||
* 2. Driver extracts transactions and validates JSON before completing
|
||||
* 3. Streaming output during extraction
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
@@ -10,25 +12,11 @@ import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
import { SmartAi } from '@push.rocks/smartai';
|
||||
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||
|
||||
// Service URL
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
|
||||
// Model
|
||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
||||
|
||||
// Prompt for MiniCPM-V visual extraction
|
||||
const MINICPM_EXTRACT_PROMPT = `/nothink
|
||||
You are a bank statement parser. Extract EVERY transaction from the table.
|
||||
|
||||
Read the Amount column carefully:
|
||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
||||
- European format: comma = decimal point
|
||||
|
||||
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
||||
|
||||
Do not skip any rows. Return ONLY the JSON array, no explanation.`;
|
||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||
|
||||
interface ITransaction {
|
||||
date: string;
|
||||
@@ -36,6 +24,10 @@ interface ITransaction {
|
||||
amount: number;
|
||||
}
|
||||
|
||||
// SmartAi instance and orchestrator (initialized in setup)
|
||||
let smartAi: SmartAi;
|
||||
let orchestrator: DualAgentOrchestrator;
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
@@ -64,150 +56,155 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
}
|
||||
}
|
||||
|
||||
const EXTRACTION_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
||||
|
||||
IMPORTANT RULES:
|
||||
1. Each transaction has: date, counterparty (description), and an amount
|
||||
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
||||
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
||||
4. Date format: YYYY-MM-DD
|
||||
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
||||
6. Only include actual transactions with a specific date and amount
|
||||
|
||||
Before completing, validate your JSON output:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||
</tool_call>
|
||||
|
||||
Output format (must be a valid JSON array):
|
||||
[
|
||||
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
||||
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
||||
]
|
||||
|
||||
Only complete after validation passes. Output the final JSON array in <task_complete> tags.`;
|
||||
|
||||
/**
|
||||
* Extract using MiniCPM-V via Ollama
|
||||
* Parse amount from various formats
|
||||
*/
|
||||
async function extractWithMiniCPM(images: string[], passLabel: string): Promise<ITransaction[]> {
|
||||
const payload = {
|
||||
model: MINICPM_MODEL,
|
||||
prompt: MINICPM_EXTRACT_PROMPT,
|
||||
images,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 16384,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
function parseAmount(value: unknown): number {
|
||||
if (typeof value === 'number') return value;
|
||||
if (typeof value !== 'string') return 0;
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
let s = value.replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||
// European format: comma is decimal
|
||||
if (s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')) {
|
||||
s = s.replace(/\./g, '').replace(',', '.');
|
||||
} else {
|
||||
s = s.replace(/,/g, '');
|
||||
}
|
||||
return parseFloat(s) || 0;
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
let lineBuffer = '';
|
||||
|
||||
console.log(`[${passLabel}] Extracting with MiniCPM-V...`);
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
/**
|
||||
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): unknown[] | null {
|
||||
// Try to find JSON in task_complete tags
|
||||
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||
if (completeMatch) {
|
||||
const content = completeMatch[1].trim();
|
||||
// Try to find JSON in the content
|
||||
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
lineBuffer += json.response;
|
||||
|
||||
if (lineBuffer.includes('\n')) {
|
||||
const parts = lineBuffer.split('\n');
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
console.log(parts[i]);
|
||||
}
|
||||
lineBuffer = parts[parts.length - 1];
|
||||
}
|
||||
}
|
||||
const parsed = JSON.parse(jsonStr);
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lineBuffer) {
|
||||
console.log(lineBuffer);
|
||||
}
|
||||
console.log('');
|
||||
|
||||
const startIdx = fullText.indexOf('[');
|
||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error('No JSON array found in response');
|
||||
}
|
||||
|
||||
return JSON.parse(fullText.substring(startIdx, endIdx));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hash of transactions for comparison
|
||||
*/
|
||||
function hashTransactions(transactions: ITransaction[]): string {
|
||||
return transactions
|
||||
.map((t) => `${t.date}|${t.amount.toFixed(2)}`)
|
||||
.sort()
|
||||
.join(';');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with consensus voting using MiniCPM-V only
|
||||
*/
|
||||
async function extractWithConsensus(
|
||||
images: string[],
|
||||
maxPasses: number = 5
|
||||
): Promise<ITransaction[]> {
|
||||
const results: Array<{ transactions: ITransaction[]; hash: string }> = [];
|
||||
const hashCounts: Map<string, number> = new Map();
|
||||
|
||||
const addResult = (transactions: ITransaction[], passLabel: string): number => {
|
||||
const hash = hashTransactions(transactions);
|
||||
results.push({ transactions, hash });
|
||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
||||
console.log(
|
||||
`[${passLabel}] Got ${transactions.length} transactions (hash: ${hash.substring(0, 20)}...)`
|
||||
);
|
||||
return hashCounts.get(hash)!;
|
||||
};
|
||||
|
||||
console.log('[Setup] Using MiniCPM-V only');
|
||||
|
||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
||||
// Try to find JSON array pattern
|
||||
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
const transactions = await extractWithMiniCPM(images, `Pass ${pass} MiniCPM-V`);
|
||||
const count = addResult(transactions, `Pass ${pass} MiniCPM-V`);
|
||||
const parsed = JSON.parse(jsonMatch[0]);
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find JSON in markdown code block
|
||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(jsonStr);
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
// Try to find JSON array pattern
|
||||
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
const parsed = JSON.parse(jsonMatch[0]);
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into transactions
|
||||
*/
|
||||
function parseJsonToTransactions(response: string): ITransaction[] {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed || !Array.isArray(parsed)) return [];
|
||||
|
||||
return parsed.map((tx: any) => ({
|
||||
date: String(tx.date || ''),
|
||||
counterparty: String(tx.counterparty || tx.description || ''),
|
||||
amount: parseAmount(tx.amount),
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions from a single page using smartagent orchestrator
|
||||
*/
|
||||
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||
console.log(`\n ======== Page ${pageNum} ========`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = await orchestrator.run(EXTRACTION_PROMPT, { images: [image] });
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(`\n [Page ${pageNum}] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||
|
||||
const transactions = parseJsonToTransactions(result.result);
|
||||
|
||||
console.log(` [Page ${pageNum}] Extracted ${transactions.length} transactions:`);
|
||||
for (let i = 0; i < Math.min(transactions.length, 10); i++) {
|
||||
const tx = transactions[i];
|
||||
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||
}
|
||||
if (transactions.length > 10) {
|
||||
console.log(` ... and ${transactions.length - 10} more transactions`);
|
||||
}
|
||||
|
||||
if (count >= 2) {
|
||||
console.log(`[Consensus] Reached after ${pass} passes`);
|
||||
return transactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all transactions from bank statement
|
||||
*/
|
||||
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||
|
||||
const allTransactions: ITransaction[] = [];
|
||||
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
|
||||
allTransactions.push(...pageTransactions);
|
||||
}
|
||||
|
||||
console.log(`[Pass ${pass}] No consensus yet, trying again...`);
|
||||
} catch (err) {
|
||||
console.log(`[Pass ${pass}] Error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// No consensus reached - return the most common result
|
||||
let bestHash = '';
|
||||
let bestCount = 0;
|
||||
for (const [hash, count] of hashCounts) {
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
bestHash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bestHash) {
|
||||
throw new Error('No valid results obtained');
|
||||
}
|
||||
|
||||
const best = results.find((r) => r.hash === bestHash)!;
|
||||
console.log(`[No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
||||
return best.transactions;
|
||||
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
|
||||
return allTransactions;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -216,8 +213,9 @@ async function extractWithConsensus(
|
||||
function compareTransactions(
|
||||
extracted: ITransaction[],
|
||||
expected: ITransaction[]
|
||||
): { matches: number; total: number; errors: string[] } {
|
||||
): { matches: number; total: number; errors: string[]; variations: string[] } {
|
||||
const errors: string[] = [];
|
||||
const variations: string[] = [];
|
||||
let matches = 0;
|
||||
|
||||
for (let i = 0; i < expected.length; i++) {
|
||||
@@ -234,6 +232,12 @@ function compareTransactions(
|
||||
|
||||
if (dateMatch && amountMatch) {
|
||||
matches++;
|
||||
// Track counterparty variations (date and amount match but name differs)
|
||||
if (ext.counterparty !== exp.counterparty) {
|
||||
variations.push(
|
||||
`[${i}] "${exp.counterparty}" → "${ext.counterparty}"`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
errors.push(
|
||||
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
|
||||
@@ -245,7 +249,7 @@ function compareTransactions(
|
||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||
}
|
||||
|
||||
return { matches, total: expected.length, errors };
|
||||
return { matches, total: expected.length, errors, variations };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -273,62 +277,160 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure MiniCPM is running
|
||||
const minicpmOk = await ensureMiniCpm();
|
||||
expect(minicpmOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
||||
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||
|
||||
smartAi = new SmartAi({
|
||||
ollama: {
|
||||
baseUrl: OLLAMA_URL,
|
||||
model: MODEL,
|
||||
defaultOptions: {
|
||||
num_ctx: 32768,
|
||||
num_predict: 4000,
|
||||
temperature: 0.1,
|
||||
},
|
||||
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||
},
|
||||
});
|
||||
|
||||
await smartAi.start();
|
||||
|
||||
orchestrator = new DualAgentOrchestrator({
|
||||
smartAiInstance: smartAi,
|
||||
defaultProvider: 'ollama',
|
||||
guardianPolicyPrompt: `You are a Guardian agent overseeing bank statement extraction tasks.
|
||||
|
||||
APPROVE all tool calls that:
|
||||
- Use the json.validate action to verify JSON output
|
||||
- Are reasonable attempts to complete the extraction task
|
||||
|
||||
REJECT tool calls that:
|
||||
- Attempt to access external resources
|
||||
- Try to execute arbitrary code
|
||||
- Are clearly unrelated to bank statement extraction`,
|
||||
driverSystemMessage: `You are an AI assistant that extracts bank transactions from statement images.
|
||||
|
||||
Your task is to analyze bank statement images and extract transaction data.
|
||||
You have access to a json.validate tool to verify your JSON output.
|
||||
|
||||
IMPORTANT: Always validate your JSON before completing the task.
|
||||
|
||||
## Tool Usage Format
|
||||
When you need to validate JSON, output:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||
</tool_call>
|
||||
|
||||
## Completion Format
|
||||
After validation passes, complete the task:
|
||||
|
||||
<task_complete>
|
||||
[{"date": "YYYY-MM-DD", "counterparty": "...", "amount": -123.45}, ...]
|
||||
</task_complete>`,
|
||||
maxIterations: 5,
|
||||
maxConsecutiveRejections: 3,
|
||||
onToken: (token, source) => {
|
||||
if (source === 'driver') {
|
||||
process.stdout.write(token);
|
||||
}
|
||||
},
|
||||
onProgress: (event) => {
|
||||
if (event.logLevel === 'error') {
|
||||
console.error(event.logMessage);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Register the JsonValidatorTool
|
||||
orchestrator.registerTool(new JsonValidatorTool());
|
||||
|
||||
await orchestrator.start();
|
||||
console.log('[Setup] Orchestrator initialized!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
const data = await response.json();
|
||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V only)\n`);
|
||||
console.log(`\nFound ${testCases.length} bank statement test cases (smartagent + MiniCPM-V)\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract transactions from ${testCase.name}`, async () => {
|
||||
// Load expected transactions
|
||||
tap.test(`should extract: ${testCase.name}`, async () => {
|
||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.length} transactions`);
|
||||
|
||||
// Convert PDF to images
|
||||
console.log('Converting PDF to images...');
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(`Converted: ${images.length} pages\n`);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Extract with consensus (MiniCPM-V only)
|
||||
const extracted = await extractWithConsensus(images);
|
||||
console.log(`\nFinal: ${extracted.length} transactions`);
|
||||
const extracted = await extractTransactions(images);
|
||||
console.log(` Extracted: ${extracted.length} transactions`);
|
||||
|
||||
// Compare results
|
||||
const result = compareTransactions(extracted, expected);
|
||||
console.log(`Accuracy: ${result.matches}/${result.total}`);
|
||||
const perfectMatch = result.matches === result.total && extracted.length === expected.length;
|
||||
|
||||
if (result.errors.length > 0) {
|
||||
console.log('Errors:');
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
if (perfectMatch) {
|
||||
passedCount++;
|
||||
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||
result.errors.slice(0, 10).forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Assert high accuracy
|
||||
const accuracy = result.matches / result.total;
|
||||
expect(accuracy).toBeGreaterThan(0.95);
|
||||
// Log counterparty variations (names that differ but date/amount matched)
|
||||
if (result.variations.length > 0) {
|
||||
console.log(` Counterparty variations (${result.variations.length}):`);
|
||||
result.variations.slice(0, 5).forEach((v) => console.log(` ${v}`));
|
||||
if (result.variations.length > 5) {
|
||||
console.log(` ... and ${result.variations.length - 5} more variations`);
|
||||
}
|
||||
}
|
||||
|
||||
expect(result.matches).toEqual(result.total);
|
||||
expect(extracted.length).toEqual(expected.length);
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('cleanup: stop orchestrator', async () => {
|
||||
if (orchestrator) {
|
||||
await orchestrator.stop();
|
||||
}
|
||||
console.log('[Cleanup] Orchestrator stopped');
|
||||
});
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const total = testCases.length;
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Bank Statement Summary`);
|
||||
console.log(` (smartagent + ${MODEL})`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||
console.log(` Passed: ${passedCount}/${total}`);
|
||||
console.log(` Failed: ${failedCount}/${total}`);
|
||||
console.log(`======================================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
|
||||
752
test/test.bankstatements.nanonets.ts
Normal file
752
test/test.bankstatements.nanonets.ts
Normal file
@@ -0,0 +1,752 @@
|
||||
/**
|
||||
* Bank statement extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||
*
|
||||
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||
*
|
||||
* This approach avoids GPU contention by running services sequentially.
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||
import { SmartAi } from '@push.rocks/smartai';
|
||||
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Temp directory for storing markdown between stages
|
||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-markdown');
|
||||
|
||||
// SmartAi instance for Ollama with optimized settings
|
||||
const smartAi = new SmartAi({
|
||||
ollama: {
|
||||
baseUrl: OLLAMA_URL,
|
||||
model: EXTRACTION_MODEL,
|
||||
defaultOptions: {
|
||||
num_ctx: 32768, // Larger context for long statements + thinking
|
||||
temperature: 0, // Deterministic for JSON extraction
|
||||
},
|
||||
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||
},
|
||||
});
|
||||
|
||||
// DualAgentOrchestrator for structured task execution
|
||||
let orchestrator: DualAgentOrchestrator;
|
||||
|
||||
interface ITransaction {
|
||||
date: string;
|
||||
counterparty: string;
|
||||
amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
jsonPath: string;
|
||||
markdownPath?: string;
|
||||
images?: IImageData[];
|
||||
}
|
||||
|
||||
// Nanonets-specific prompt for document OCR to markdown
|
||||
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||
Return the tables in html format.
|
||||
Return the equations in LaTeX representation.
|
||||
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the statement text is provided)
|
||||
const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from the bank statement. Return ONLY valid JSON array.
|
||||
|
||||
WHERE TO FIND DATA:
|
||||
- Transactions are typically in TABLES with columns: Date, Description/Counterparty, Debit, Credit, Balance
|
||||
- Look for rows with actual money movements, NOT header rows or summary totals
|
||||
|
||||
RULES:
|
||||
1. date: Convert to YYYY-MM-DD format
|
||||
2. counterparty: The name/description of who the money went to/from
|
||||
3. amount: NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
|
||||
4. Only include actual transactions, NOT opening/closing balances
|
||||
|
||||
JSON array only:
|
||||
[{"date":"YYYY-MM-DD","counterparty":"NAME","amount":-25.99}]`;
|
||||
|
||||
// Constants for smart batching
|
||||
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
|
||||
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||
|
||||
/**
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
// One page per batch for reliable processing
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
|
||||
// Get image dimensions using identify command
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
// Build content array with all images first, then the prompt
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
// Add prompt with page separator instruction if multiple pages
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer dummy',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
// For single-page batches, add page marker if not present
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop Nanonets container
|
||||
*/
|
||||
function stopNanonets(): void {
|
||||
console.log(' [Docker] Stopping Nanonets container...');
|
||||
try {
|
||||
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||
// Wait for GPU memory to be released
|
||||
execSync('sleep 5', { stdio: 'pipe' });
|
||||
console.log(' [Docker] Nanonets stopped');
|
||||
} catch {
|
||||
console.log(' [Docker] Nanonets was not running');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available and warmed up
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract valid JSON from a response string
|
||||
*/
|
||||
function tryExtractJson(response: string): unknown[] | null {
|
||||
// Remove thinking tags
|
||||
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
|
||||
// Try task_complete tags first
|
||||
const completeMatch = clean.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||
if (completeMatch) {
|
||||
clean = completeMatch[1].trim();
|
||||
}
|
||||
|
||||
// Try code block
|
||||
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(jsonStr);
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
// Try to find JSON array
|
||||
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
const parsed = JSON.parse(sanitizeJson(jsonMatch[0]));
|
||||
if (Array.isArray(parsed)) return parsed;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions from markdown using smartagent DualAgentOrchestrator
|
||||
* Validates JSON and retries if invalid
|
||||
*/
|
||||
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
|
||||
const startTime = Date.now();
|
||||
|
||||
console.log(` [${queryId}] Statement: ${markdown.length} chars`);
|
||||
|
||||
// Build the extraction task with document context
|
||||
const taskPrompt = `Extract all transactions from this bank statement document and output ONLY the JSON array:
|
||||
|
||||
${markdown}
|
||||
|
||||
${JSON_EXTRACTION_PROMPT}
|
||||
|
||||
Before completing, validate your JSON using the json.validate tool:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||
</tool_call>
|
||||
|
||||
Only complete after validation passes. Output the final JSON array in <task_complete></task_complete> tags.`;
|
||||
|
||||
try {
|
||||
const result = await orchestrator.run(taskPrompt);
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||
|
||||
// Try to parse JSON from result
|
||||
let jsonData: unknown[] | null = null;
|
||||
let responseText = result.result || '';
|
||||
|
||||
if (result.success && responseText) {
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
|
||||
// Fallback: try parsing from history
|
||||
if (!jsonData && result.history?.length > 0) {
|
||||
const lastMessage = result.history[result.history.length - 1];
|
||||
if (lastMessage?.content) {
|
||||
responseText = lastMessage.content;
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
}
|
||||
|
||||
if (!jsonData) {
|
||||
console.log(` [${queryId}] Failed to parse JSON`);
|
||||
return [];
|
||||
}
|
||||
|
||||
// Convert to transactions
|
||||
const txs = jsonData.map((tx: any) => ({
|
||||
date: String(tx.date || ''),
|
||||
counterparty: String(tx.counterparty || tx.description || ''),
|
||||
amount: parseAmount(tx.amount),
|
||||
}));
|
||||
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
|
||||
return txs;
|
||||
} catch (error) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize JSON string
|
||||
*/
|
||||
function sanitizeJson(jsonStr: string): string {
|
||||
let s = jsonStr;
|
||||
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
|
||||
s = s.replace(/:\s*\+(\d)/g, ': $1');
|
||||
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
|
||||
s = s.replace(/,\s*([}\]])/g, '$1');
|
||||
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
|
||||
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
|
||||
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from various formats
|
||||
*/
|
||||
function parseAmount(value: unknown): number {
|
||||
if (typeof value === 'number') return value;
|
||||
if (typeof value !== 'string') return 0;
|
||||
|
||||
let s = value.replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||
if (s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')) {
|
||||
s = s.replace(/\./g, '').replace(',', '.');
|
||||
} else {
|
||||
s = s.replace(/,/g, '');
|
||||
}
|
||||
return parseFloat(s) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into transactions
|
||||
*/
|
||||
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
|
||||
// Remove thinking tags if present
|
||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
|
||||
// Debug: show what we're working with
|
||||
console.log(` [${queryId}] Response preview: ${cleanResponse.substring(0, 300)}...`);
|
||||
|
||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||
jsonStr = sanitizeJson(jsonStr);
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(jsonStr);
|
||||
if (Array.isArray(parsed)) {
|
||||
const txs = parsed.map(tx => ({
|
||||
date: String(tx.date || ''),
|
||||
counterparty: String(tx.counterparty || tx.description || ''),
|
||||
amount: parseAmount(tx.amount),
|
||||
}));
|
||||
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
|
||||
return txs;
|
||||
}
|
||||
} catch (e) {
|
||||
// Try to find a JSON array in the text
|
||||
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||
if (arrayMatch) {
|
||||
console.log(` [${queryId}] Array match found: ${arrayMatch[0].length} chars`);
|
||||
try {
|
||||
const parsed = JSON.parse(sanitizeJson(arrayMatch[0]));
|
||||
if (Array.isArray(parsed)) {
|
||||
const txs = parsed.map(tx => ({
|
||||
date: String(tx.date || ''),
|
||||
counterparty: String(tx.counterparty || tx.description || ''),
|
||||
amount: parseAmount(tx.amount),
|
||||
}));
|
||||
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
|
||||
return txs;
|
||||
}
|
||||
} catch (innerErr) {
|
||||
console.log(` [${queryId}] Array parse error: ${(innerErr as Error).message}`);
|
||||
}
|
||||
} else {
|
||||
console.log(` [${queryId}] No JSON array found in response`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(` [${queryId}] PARSE FAILED`);
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions (single pass)
|
||||
*/
|
||||
async function extractTransactions(markdown: string, docName: string): Promise<ITransaction[]> {
|
||||
console.log(` [${docName}] Extracting...`);
|
||||
const txs = await extractTransactionsFromMarkdown(markdown, docName);
|
||||
console.log(` [${docName}] Extracted ${txs.length} transactions`);
|
||||
return txs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare transactions
|
||||
*/
|
||||
function compareTransactions(
|
||||
extracted: ITransaction[],
|
||||
expected: ITransaction[]
|
||||
): { matches: number; total: number; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
let matches = 0;
|
||||
|
||||
for (let i = 0; i < expected.length; i++) {
|
||||
const exp = expected[i];
|
||||
const ext = extracted[i];
|
||||
|
||||
if (!ext) {
|
||||
errors.push(`Missing tx ${i}: ${exp.date} ${exp.counterparty}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const dateMatch = ext.date === exp.date;
|
||||
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
||||
|
||||
if (dateMatch && amountMatch) {
|
||||
matches++;
|
||||
} else {
|
||||
errors.push(`Mismatch ${i}: exp ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (extracted.length > expected.length) {
|
||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||
}
|
||||
|
||||
return { matches, total: expected.length, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const testDir = path.join(process.cwd(), '.nogit');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} bank statement test cases\n`);
|
||||
|
||||
// Ensure temp directory exists
|
||||
if (!fs.existsSync(TEMP_MD_DIR)) {
|
||||
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// -------- STAGE 1: OCR with Nanonets --------
|
||||
|
||||
// Check if all markdown files already exist
|
||||
function allMarkdownFilesExist(): boolean {
|
||||
for (const tc of testCases) {
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
if (!fs.existsSync(mdPath)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Track whether we need to run Stage 1
|
||||
let stage1Needed = !allMarkdownFilesExist();
|
||||
|
||||
tap.test('Stage 1: Setup Nanonets', async () => {
|
||||
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||
|
||||
if (!stage1Needed) {
|
||||
console.log(' [SKIP] All markdown files already exist, skipping Nanonets setup');
|
||||
return;
|
||||
}
|
||||
|
||||
const ok = await ensureNanonetsOcr();
|
||||
expect(ok).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Convert all documents to markdown', async () => {
|
||||
if (!stage1Needed) {
|
||||
console.log(' [SKIP] Using existing markdown files from previous run\n');
|
||||
// Load existing markdown paths
|
||||
for (const tc of testCases) {
|
||||
tc.markdownPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
console.log(` Loaded: ${tc.markdownPath}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('\n Converting all PDFs to markdown with Nanonets-OCR-s...\n');
|
||||
|
||||
for (const tc of testCases) {
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
|
||||
// Convert PDF to images
|
||||
const images = convertPdfToImages(tc.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Convert to markdown
|
||||
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||
|
||||
// Save markdown to temp file
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
fs.writeFileSync(mdPath, markdown);
|
||||
tc.markdownPath = mdPath;
|
||||
console.log(` Saved: ${mdPath}`);
|
||||
}
|
||||
|
||||
console.log('\n Stage 1 complete: All documents converted to markdown\n');
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||
if (!stage1Needed) {
|
||||
console.log(' [SKIP] Nanonets was not started');
|
||||
return;
|
||||
}
|
||||
|
||||
stopNanonets();
|
||||
// Verify it's stopped
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||
});
|
||||
|
||||
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||
|
||||
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
|
||||
// Initialize SmartAi and DualAgentOrchestrator
|
||||
console.log(' [SmartAgent] Starting SmartAi...');
|
||||
await smartAi.start();
|
||||
|
||||
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
|
||||
orchestrator = new DualAgentOrchestrator({
|
||||
smartAiInstance: smartAi,
|
||||
defaultProvider: 'ollama',
|
||||
guardianPolicyPrompt: `
|
||||
JSON EXTRACTION POLICY:
|
||||
- APPROVE all JSON extraction tasks
|
||||
- APPROVE all json.validate tool calls
|
||||
- This is a read-only operation - no file system or network access needed
|
||||
- The task is to extract structured transaction data from document text
|
||||
`,
|
||||
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract transaction data from bank statements.
|
||||
|
||||
CRITICAL RULES:
|
||||
1. Output valid JSON array with the exact format requested
|
||||
2. Amounts should be NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
|
||||
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||
</tool_call>
|
||||
|
||||
4. Only complete after validation passes
|
||||
|
||||
When done, wrap your JSON array in <task_complete></task_complete> tags.`,
|
||||
maxIterations: 5,
|
||||
// Enable streaming for real-time progress visibility
|
||||
onToken: (token, source) => {
|
||||
if (source === 'driver') {
|
||||
process.stdout.write(token);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Register JsonValidatorTool for self-validation
|
||||
orchestrator.registerTool(new JsonValidatorTool());
|
||||
|
||||
console.log(' [SmartAgent] Starting orchestrator...');
|
||||
await orchestrator.start();
|
||||
console.log(' [SmartAgent] Ready for extraction');
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
console.log(` Expected: ${expected.length} transactions`);
|
||||
|
||||
// Load saved markdown
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
if (!fs.existsSync(mdPath)) {
|
||||
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||
}
|
||||
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
// Extract transactions (single pass)
|
||||
const extracted = await extractTransactions(markdown, tc.name);
|
||||
|
||||
// Log results
|
||||
console.log(` Extracted: ${extracted.length} transactions`);
|
||||
for (let i = 0; i < Math.min(extracted.length, 5); i++) {
|
||||
const tx = extracted[i];
|
||||
console.log(` ${i + 1}. ${tx.date} | ${tx.counterparty.substring(0, 25).padEnd(25)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||
}
|
||||
if (extracted.length > 5) {
|
||||
console.log(` ... and ${extracted.length - 5} more`);
|
||||
}
|
||||
|
||||
// Compare
|
||||
const result = compareTransactions(extracted, expected);
|
||||
const pass = result.matches === result.total && extracted.length === expected.length;
|
||||
|
||||
if (pass) {
|
||||
passedCount++;
|
||||
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||
result.errors.slice(0, 5).forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
expect(result.matches).toEqual(result.total);
|
||||
expect(extracted.length).toEqual(expected.length);
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
// Cleanup orchestrator and SmartAi
|
||||
if (orchestrator) {
|
||||
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||
await orchestrator.stop();
|
||||
}
|
||||
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||
await smartAi.stop();
|
||||
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Bank Statement Summary (Nanonets + SmartAgent)`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Stage 1: Nanonets-OCR-s (document -> markdown)`);
|
||||
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (markdown -> JSON)`);
|
||||
console.log(` Passed: ${passedCount}/${testCases.length}`);
|
||||
console.log(` Failed: ${failedCount}/${testCases.length}`);
|
||||
console.log(`======================================================\n`);
|
||||
|
||||
// Only cleanup temp files if ALL tests passed
|
||||
if (failedCount === 0 && passedCount === testCases.length) {
|
||||
try {
|
||||
fs.rmSync(TEMP_MD_DIR, { recursive: true, force: true });
|
||||
console.log(` Cleaned up temp directory: ${TEMP_MD_DIR}\n`);
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
} else {
|
||||
console.log(` Keeping temp directory for debugging: ${TEMP_MD_DIR}\n`);
|
||||
}
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,346 +0,0 @@
|
||||
/**
|
||||
* Bank statement extraction test using PaddleOCR-VL Full Pipeline
|
||||
*
|
||||
* This tests the complete PaddleOCR-VL pipeline for bank statements:
|
||||
* 1. PP-DocLayoutV2 for layout detection
|
||||
* 2. PaddleOCR-VL for recognition (tables with proper structure)
|
||||
* 3. Structured Markdown output with tables
|
||||
* 4. MiniCPM extracts transactions from structured tables
|
||||
*
|
||||
* The structured Markdown has properly formatted tables,
|
||||
* making it much easier for MiniCPM to extract transaction data.
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensurePaddleOcrVlFull, ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const MINICPM_MODEL = 'minicpm-v:latest';
|
||||
|
||||
interface ITransaction {
|
||||
date: string;
|
||||
counterparty: string;
|
||||
amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse document using PaddleOCR-VL Full Pipeline (returns structured Markdown)
|
||||
*/
|
||||
async function parseDocument(imageBase64: string): Promise<string> {
|
||||
const response = await fetch(`${PADDLEOCR_VL_URL}/parse`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
image: imageBase64,
|
||||
output_format: 'markdown',
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (!data.success) {
|
||||
throw new Error(`PaddleOCR-VL error: ${data.error}`);
|
||||
}
|
||||
|
||||
return data.result?.markdown || '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions from structured Markdown using MiniCPM
|
||||
*/
|
||||
async function extractTransactionsFromMarkdown(markdown: string): Promise<ITransaction[]> {
|
||||
console.log(` [Extract] Processing ${markdown.length} chars of Markdown`);
|
||||
|
||||
const prompt = `/nothink
|
||||
Convert this bank statement to a JSON array of transactions.
|
||||
|
||||
Read the Amount values carefully:
|
||||
- "- 21,47 €" means DEBIT, output as: -21.47
|
||||
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
|
||||
- European format: comma = decimal point, dot = thousands
|
||||
|
||||
For each transaction output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
|
||||
|
||||
Return ONLY the JSON array, no explanation.
|
||||
|
||||
Document:
|
||||
${markdown}`;
|
||||
|
||||
const payload = {
|
||||
model: MINICPM_MODEL,
|
||||
prompt,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 16384,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract JSON array from response
|
||||
const startIdx = fullText.indexOf('[');
|
||||
const endIdx = fullText.lastIndexOf(']') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error(`No JSON array found in response: ${fullText.substring(0, 200)}`);
|
||||
}
|
||||
|
||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
||||
return JSON.parse(jsonStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions from all pages of a bank statement
|
||||
*/
|
||||
async function extractAllTransactions(images: string[]): Promise<ITransaction[]> {
|
||||
const allTransactions: ITransaction[] = [];
|
||||
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
console.log(` Processing page ${i + 1}/${images.length}...`);
|
||||
|
||||
// Parse with full pipeline
|
||||
const markdown = await parseDocument(images[i]);
|
||||
console.log(` [Parse] Got ${markdown.split('\n').length} lines of Markdown`);
|
||||
|
||||
// Extract transactions
|
||||
try {
|
||||
const transactions = await extractTransactionsFromMarkdown(markdown);
|
||||
console.log(` [Extracted] ${transactions.length} transactions`);
|
||||
allTransactions.push(...transactions);
|
||||
} catch (err) {
|
||||
console.log(` [Error] ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
return allTransactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare transactions - find matching transaction in expected list
|
||||
*/
|
||||
function findMatchingTransaction(
|
||||
tx: ITransaction,
|
||||
expectedList: ITransaction[]
|
||||
): ITransaction | undefined {
|
||||
return expectedList.find((exp) => {
|
||||
const dateMatch = tx.date === exp.date;
|
||||
const amountMatch = Math.abs(tx.amount - exp.amount) < 0.02;
|
||||
const counterpartyMatch =
|
||||
tx.counterparty?.toLowerCase().includes(exp.counterparty?.toLowerCase().slice(0, 10)) ||
|
||||
exp.counterparty?.toLowerCase().includes(tx.counterparty?.toLowerCase().slice(0, 10));
|
||||
return dateMatch && amountMatch && counterpartyMatch;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate extraction accuracy
|
||||
*/
|
||||
function calculateAccuracy(
|
||||
extracted: ITransaction[],
|
||||
expected: ITransaction[]
|
||||
): { matched: number; total: number; accuracy: number } {
|
||||
let matched = 0;
|
||||
const usedExpected = new Set<number>();
|
||||
|
||||
for (const tx of extracted) {
|
||||
for (let i = 0; i < expected.length; i++) {
|
||||
if (usedExpected.has(i)) continue;
|
||||
|
||||
const exp = expected[i];
|
||||
const dateMatch = tx.date === exp.date;
|
||||
const amountMatch = Math.abs(tx.amount - exp.amount) < 0.02;
|
||||
|
||||
if (dateMatch && amountMatch) {
|
||||
matched++;
|
||||
usedExpected.add(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
matched,
|
||||
total: expected.length,
|
||||
accuracy: expected.length > 0 ? (matched / expected.length) * 100 : 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases (PDF + JSON pairs) in .nogit/bankstatements/
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit/bankstatements');
|
||||
if (!fs.existsSync(testDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of pdfFiles) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure PaddleOCR-VL Full Pipeline is running
|
||||
const paddleOk = await ensurePaddleOcrVlFull();
|
||||
expect(paddleOk).toBeTrue();
|
||||
|
||||
// Ensure MiniCPM is running (for field extraction from Markdown)
|
||||
const minicpmOk = await ensureMiniCpm();
|
||||
expect(minicpmOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} bank statement test cases (PaddleOCR-VL Full Pipeline)\n`);
|
||||
|
||||
const results: Array<{ name: string; accuracy: number; matched: number; total: number }> = [];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract bank statement: ${testCase.name}`, async () => {
|
||||
// Load expected data
|
||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.length} transactions`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Convert PDF to images
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Extract all transactions
|
||||
const extracted = await extractAllTransactions(images);
|
||||
|
||||
const endTime = Date.now();
|
||||
const elapsedMs = endTime - startTime;
|
||||
|
||||
// Calculate accuracy
|
||||
const accuracy = calculateAccuracy(extracted, expected);
|
||||
results.push({
|
||||
name: testCase.name,
|
||||
accuracy: accuracy.accuracy,
|
||||
matched: accuracy.matched,
|
||||
total: accuracy.total,
|
||||
});
|
||||
|
||||
console.log(` Extracted: ${extracted.length} transactions`);
|
||||
console.log(` Matched: ${accuracy.matched}/${accuracy.total} (${accuracy.accuracy.toFixed(1)}%)`);
|
||||
console.log(` Time: ${(elapsedMs / 1000).toFixed(1)}s`);
|
||||
|
||||
// We expect at least 50% accuracy
|
||||
expect(accuracy.accuracy).toBeGreaterThan(50);
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const totalStatements = results.length;
|
||||
const avgAccuracy =
|
||||
results.length > 0 ? results.reduce((a, b) => a + b.accuracy, 0) / results.length : 0;
|
||||
const totalMatched = results.reduce((a, b) => a + b.matched, 0);
|
||||
const totalExpected = results.reduce((a, b) => a + b.total, 0);
|
||||
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Bank Statement Extraction Summary (PaddleOCR-VL Full)`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Method: PaddleOCR-VL Full Pipeline -> MiniCPM`);
|
||||
console.log(` Statements: ${totalStatements}`);
|
||||
console.log(` Transactions: ${totalMatched}/${totalExpected} matched`);
|
||||
console.log(` Avg accuracy: ${avgAccuracy.toFixed(1)}%`);
|
||||
console.log(`======================================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
345
test/test.bankstatements.qwen3vl.ts
Normal file
345
test/test.bankstatements.qwen3vl.ts
Normal file
@@ -0,0 +1,345 @@
|
||||
/**
|
||||
* Bank statement extraction using Qwen3-VL 8B Vision (Direct)
|
||||
*
|
||||
* Multi-query approach:
|
||||
* 1. First ask how many transactions on each page
|
||||
* 2. Then query each transaction individually
|
||||
* Single pass, no consensus voting.
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const VISION_MODEL = 'qwen3-vl:8b';
|
||||
|
||||
interface ITransaction {
|
||||
date: string;
|
||||
counterparty: string;
|
||||
amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Qwen3-VL with a simple prompt
|
||||
*/
|
||||
async function queryVision(image: string, prompt: string): Promise<string> {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: VISION_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: prompt,
|
||||
images: [image],
|
||||
}],
|
||||
stream: false,
|
||||
options: {
|
||||
num_predict: 500,
|
||||
temperature: 0.1,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return (data.message?.content || '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Count transactions on a page
|
||||
*/
|
||||
async function countTransactions(image: string, pageNum: number): Promise<number> {
|
||||
const response = await queryVision(image,
|
||||
`How many transaction rows are in this bank statement table?
|
||||
Count only the data rows (with dates like "01.01.2024" and amounts like "- 50,00 €").
|
||||
Do NOT count the header row or summary/total rows.
|
||||
Answer with just the number, for example: 7`
|
||||
);
|
||||
|
||||
console.log(` [Page ${pageNum}] Count query response: "${response}"`);
|
||||
const match = response.match(/(\d+)/);
|
||||
const count = match ? parseInt(match[1], 10) : 0;
|
||||
console.log(` [Page ${pageNum}] Parsed count: ${count}`);
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a single transaction by index (logs immediately when complete)
|
||||
*/
|
||||
async function getTransaction(image: string, index: number, pageNum: number): Promise<ITransaction | null> {
|
||||
const response = await queryVision(image,
|
||||
`This is a bank statement. Look at transaction row #${index} in the table (counting from top, excluding headers).
|
||||
|
||||
Extract this transaction's details:
|
||||
- Date in YYYY-MM-DD format
|
||||
- Counterparty/description name
|
||||
- Amount as number (negative for debits like "- 21,47 €" = -21.47, positive for credits like "+ 100,00 €" = 100.00)
|
||||
|
||||
Answer in format: DATE|COUNTERPARTY|AMOUNT
|
||||
Example: 2024-01-15|Amazon|−25.99`
|
||||
);
|
||||
|
||||
// Parse the response
|
||||
const lines = response.split('\n').filter(l => l.includes('|'));
|
||||
const line = lines[lines.length - 1] || response;
|
||||
const parts = line.split('|').map(p => p.trim());
|
||||
|
||||
if (parts.length >= 3) {
|
||||
// Parse amount - handle various formats
|
||||
let amountStr = parts[2].replace(/[€$£\s]/g, '').replace('−', '-').replace('–', '-');
|
||||
// European format: comma is decimal
|
||||
if (amountStr.includes(',')) {
|
||||
amountStr = amountStr.replace(/\./g, '').replace(',', '.');
|
||||
}
|
||||
const amount = parseFloat(amountStr) || 0;
|
||||
|
||||
const tx = {
|
||||
date: parts[0],
|
||||
counterparty: parts[1],
|
||||
amount: amount,
|
||||
};
|
||||
// Log immediately as this transaction completes
|
||||
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] ${tx.date} | ${tx.counterparty.substring(0, 25).padEnd(25)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||
return tx;
|
||||
}
|
||||
|
||||
// Log raw response on parse failure
|
||||
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] PARSE FAILED: "${response.replace(/\n/g, ' ').substring(0, 60)}..."`);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract transactions from a single page using multi-query approach
|
||||
*/
|
||||
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||
// Step 1: Count transactions
|
||||
const count = await countTransactions(image, pageNum);
|
||||
|
||||
if (count === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Step 2: Query each transaction (in batches to avoid overwhelming)
|
||||
// Each transaction logs itself as it completes
|
||||
const transactions: ITransaction[] = [];
|
||||
const batchSize = 5;
|
||||
|
||||
for (let start = 1; start <= count; start += batchSize) {
|
||||
const end = Math.min(start + batchSize - 1, count);
|
||||
const indices = Array.from({ length: end - start + 1 }, (_, i) => start + i);
|
||||
|
||||
// Query batch in parallel - each logs as it completes
|
||||
const results = await Promise.all(
|
||||
indices.map(i => getTransaction(image, i, pageNum))
|
||||
);
|
||||
|
||||
for (const tx of results) {
|
||||
if (tx) {
|
||||
transactions.push(tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(` [Page ${pageNum}] Complete: ${transactions.length}/${count} extracted`);
|
||||
return transactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all transactions from bank statement
|
||||
*/
|
||||
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
|
||||
|
||||
const allTransactions: ITransaction[] = [];
|
||||
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
|
||||
allTransactions.push(...pageTransactions);
|
||||
}
|
||||
|
||||
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
|
||||
return allTransactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare transactions
|
||||
*/
|
||||
function compareTransactions(
|
||||
extracted: ITransaction[],
|
||||
expected: ITransaction[]
|
||||
): { matches: number; total: number; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
let matches = 0;
|
||||
|
||||
for (let i = 0; i < expected.length; i++) {
|
||||
const exp = expected[i];
|
||||
const ext = extracted[i];
|
||||
|
||||
if (!ext) {
|
||||
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const dateMatch = ext.date === exp.date;
|
||||
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
|
||||
|
||||
if (dateMatch && amountMatch) {
|
||||
matches++;
|
||||
} else {
|
||||
errors.push(`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (extracted.length > expected.length) {
|
||||
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
|
||||
}
|
||||
|
||||
return { matches, total: expected.length, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases in .nogit/
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Qwen3-VL model is available
|
||||
*/
|
||||
async function ensureQwen3Vl(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
|
||||
console.log(`[Ollama] Model available: ${VISION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(`[Ollama] Pulling ${VISION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Qwen3-VL is running', async () => {
|
||||
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
const visionOk = await ensureQwen3Vl();
|
||||
expect(visionOk).toBeTrue();
|
||||
console.log('\n[Setup] Ready!\n');
|
||||
});
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} bank statement test cases (Qwen3-VL)\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract: ${testCase.name}`, async () => {
|
||||
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.length} transactions`);
|
||||
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
const extracted = await extractTransactions(images);
|
||||
console.log(` Extracted: ${extracted.length} transactions`);
|
||||
|
||||
const result = compareTransactions(extracted, expected);
|
||||
const accuracy = result.total > 0 ? result.matches / result.total : 0;
|
||||
|
||||
if (accuracy >= 0.95 && extracted.length === expected.length) {
|
||||
passedCount++;
|
||||
console.log(` Result: PASS (${result.matches}/${result.total})`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: FAIL (${result.matches}/${result.total})`);
|
||||
result.errors.slice(0, 5).forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
expect(accuracy).toBeGreaterThan(0.95);
|
||||
expect(extracted.length).toEqual(expected.length);
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const total = testCases.length;
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Bank Statement Summary (Qwen3-VL Vision)`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Method: Multi-query (count then extract each)`);
|
||||
console.log(` Passed: ${passedCount}/${total}`);
|
||||
console.log(` Failed: ${failedCount}/${total}`);
|
||||
console.log(`======================================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,455 +0,0 @@
|
||||
/**
|
||||
* Invoice extraction test using MiniCPM-V (visual) + PaddleOCR-VL (OCR augmentation)
|
||||
*
|
||||
* This is the combined approach that uses both models for best accuracy:
|
||||
* - MiniCPM-V for visual understanding
|
||||
* - PaddleOCR-VL for OCR text to augment prompts
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensurePaddleOcrVl, ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const MODEL = 'minicpm-v:latest';
|
||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract OCR text from an image using PaddleOCR-VL (OpenAI-compatible API)
|
||||
*/
|
||||
async function extractOcrText(imageBase64: string): Promise<string> {
|
||||
try {
|
||||
const response = await fetch(`${PADDLEOCR_VL_URL}/v1/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: 'paddleocr-vl',
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'image_url', image_url: { url: `data:image/png;base64,${imageBase64}` } },
|
||||
{ type: 'text', text: 'OCR:' }
|
||||
]
|
||||
}],
|
||||
temperature: 0.0,
|
||||
max_tokens: 4096
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) return '';
|
||||
|
||||
const data = await response.json();
|
||||
return data.choices?.[0]?.message?.content || '';
|
||||
} catch {
|
||||
// PaddleOCR-VL unavailable
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Build prompt with optional OCR text
|
||||
*/
|
||||
function buildPrompt(ocrText: string): string {
|
||||
const base = `/nothink
|
||||
You are an invoice parser. Extract the following fields from this invoice:
|
||||
|
||||
1. invoice_number: The invoice/receipt number
|
||||
2. invoice_date: Date in YYYY-MM-DD format
|
||||
3. vendor_name: Company that issued the invoice
|
||||
4. currency: EUR, USD, etc.
|
||||
5. net_amount: Amount before tax (if shown)
|
||||
6. vat_amount: Tax/VAT amount (if shown, 0 if reverse charge or no tax)
|
||||
7. total_amount: Final amount due
|
||||
|
||||
Return ONLY valid JSON in this exact format:
|
||||
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company Name","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}
|
||||
|
||||
If a field is not visible, use null for strings or 0 for numbers.
|
||||
No explanation, just the JSON object.`;
|
||||
|
||||
if (ocrText) {
|
||||
// Limit OCR text to prevent context overflow
|
||||
const maxOcrLength = 4000;
|
||||
const truncatedOcr = ocrText.length > maxOcrLength
|
||||
? ocrText.substring(0, maxOcrLength) + '\n... (truncated)'
|
||||
: ocrText;
|
||||
|
||||
return `${base}
|
||||
|
||||
OCR text extracted from the invoice (use for reference):
|
||||
---
|
||||
${truncatedOcr}
|
||||
---
|
||||
|
||||
Cross-reference the image with the OCR text above for accuracy.`;
|
||||
}
|
||||
return base;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Single extraction pass
|
||||
*/
|
||||
async function extractOnce(images: string[], passNum: number, ocrText: string = ''): Promise<IInvoice> {
|
||||
const payload = {
|
||||
model: MODEL,
|
||||
prompt: buildPrompt(ocrText),
|
||||
images,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 2048,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract JSON from response
|
||||
const startIdx = fullText.indexOf('{');
|
||||
const endIdx = fullText.lastIndexOf('}') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
||||
}
|
||||
|
||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
||||
return JSON.parse(jsonStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hash of invoice for comparison (using key fields)
|
||||
*/
|
||||
function hashInvoice(invoice: IInvoice): string {
|
||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${invoice.total_amount.toFixed(2)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with majority voting - run until 2 passes match
|
||||
* Optimization: Run Pass 1, OCR, and Pass 2 (after OCR) in parallel
|
||||
*/
|
||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
||||
const hashCounts: Map<string, number> = new Map();
|
||||
|
||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
||||
const hash = hashInvoice(invoice);
|
||||
results.push({ invoice, hash });
|
||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||
return hashCounts.get(hash)!;
|
||||
};
|
||||
|
||||
// OPTIMIZATION: Run Pass 1 (no OCR) in parallel with OCR -> Pass 2 (with OCR)
|
||||
let ocrText = '';
|
||||
const pass1Promise = extractOnce(images, 1, '').catch((err) => ({ error: err }));
|
||||
|
||||
// OCR then immediately Pass 2
|
||||
const ocrThenPass2Promise = (async () => {
|
||||
ocrText = await extractOcrText(images[0]);
|
||||
if (ocrText) {
|
||||
console.log(` [OCR] Extracted ${ocrText.split('\n').length} text lines`);
|
||||
}
|
||||
return extractOnce(images, 2, ocrText).catch((err) => ({ error: err }));
|
||||
})();
|
||||
|
||||
// Wait for both to complete
|
||||
const [pass1Result, pass2Result] = await Promise.all([pass1Promise, ocrThenPass2Promise]);
|
||||
|
||||
// Process Pass 1 result
|
||||
if ('error' in pass1Result) {
|
||||
console.log(` [Pass 1] Error: ${(pass1Result as {error: unknown}).error}`);
|
||||
} else {
|
||||
const count = addResult(pass1Result as IInvoice, 'Pass 1');
|
||||
if (count >= 2) {
|
||||
console.log(` [Consensus] Reached after parallel passes`);
|
||||
return pass1Result as IInvoice;
|
||||
}
|
||||
}
|
||||
|
||||
// Process Pass 2 result
|
||||
if ('error' in pass2Result) {
|
||||
console.log(` [Pass 2+OCR] Error: ${(pass2Result as {error: unknown}).error}`);
|
||||
} else {
|
||||
const count = addResult(pass2Result as IInvoice, 'Pass 2+OCR');
|
||||
if (count >= 2) {
|
||||
console.log(` [Consensus] Reached after parallel passes`);
|
||||
return pass2Result as IInvoice;
|
||||
}
|
||||
}
|
||||
|
||||
// Continue with passes 3+ using OCR text if no consensus yet
|
||||
for (let pass = 3; pass <= maxPasses; pass++) {
|
||||
try {
|
||||
const invoice = await extractOnce(images, pass, ocrText);
|
||||
const count = addResult(invoice, `Pass ${pass}+OCR`);
|
||||
|
||||
if (count >= 2) {
|
||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
||||
return invoice;
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// No consensus reached - return the most common result
|
||||
let bestHash = '';
|
||||
let bestCount = 0;
|
||||
for (const [hash, count] of hashCounts) {
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
bestHash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bestHash) {
|
||||
throw new Error(`No valid results for ${invoiceName}`);
|
||||
}
|
||||
|
||||
const best = results.find((r) => r.hash === bestHash)!;
|
||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
||||
return best.invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Compare invoice number (normalize by removing spaces and case)
|
||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
// Compare date
|
||||
if (extracted.invoice_date !== expected.invoice_date) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
// Compare total amount (with tolerance)
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
// Compare currency
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
|
||||
* Priority invoices (like vodafone) run first for quick feedback
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of pdfFiles) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Sort with priority invoices first, then alphabetically
|
||||
const priorityPrefixes = ['vodafone'];
|
||||
testCases.sort((a, b) => {
|
||||
const aPriority = priorityPrefixes.findIndex((p) => a.name.startsWith(p));
|
||||
const bPriority = priorityPrefixes.findIndex((p) => b.name.startsWith(p));
|
||||
|
||||
// Both have priority - sort by priority order
|
||||
if (aPriority >= 0 && bPriority >= 0) return aPriority - bPriority;
|
||||
// Only a has priority - a comes first
|
||||
if (aPriority >= 0) return -1;
|
||||
// Only b has priority - b comes first
|
||||
if (bPriority >= 0) return 1;
|
||||
// Neither has priority - alphabetical
|
||||
return a.name.localeCompare(b.name);
|
||||
});
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure PaddleOCR-VL is running (auto-detects GPU/CPU)
|
||||
const paddleOk = await ensurePaddleOcrVl();
|
||||
expect(paddleOk).toBeTrue();
|
||||
|
||||
// Ensure MiniCPM is running
|
||||
const minicpmOk = await ensureMiniCpm();
|
||||
expect(minicpmOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
const data = await response.json();
|
||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
const processingTimes: number[] = [];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
||||
// Load expected data
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Convert PDF to images
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Extract with consensus voting
|
||||
const extracted = await extractWithConsensus(images, testCase.name);
|
||||
|
||||
const endTime = Date.now();
|
||||
const elapsedMs = endTime - startTime;
|
||||
processingTimes.push(elapsedMs);
|
||||
|
||||
// Compare results
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Assert match
|
||||
expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
||||
const avgTimeSec = avgTimeMs / 1000;
|
||||
const totalTimeSec = totalTimeMs / 1000;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Invoice Extraction Summary`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`----------------------------------------`);
|
||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||
console.log(`========================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
440
test/test.invoices.extraction.ts
Normal file
440
test/test.invoices.extraction.ts
Normal file
@@ -0,0 +1,440 @@
|
||||
/**
|
||||
* Invoice extraction tuning - uses pre-generated markdown files
|
||||
*
|
||||
* Skips OCR stage, only runs GPT-OSS extraction on existing .debug.md files.
|
||||
* Use this to quickly iterate on extraction prompts and logic.
|
||||
*
|
||||
* Run with: tstest test/test.invoices.extraction.ts --verbose
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Test these specific invoices (must have .debug.md files)
|
||||
const TEST_INVOICES = [
|
||||
'consensus_2021-09',
|
||||
'hetzner_2022-04',
|
||||
'qonto_2021-08',
|
||||
'qonto_2021-09',
|
||||
];
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
markdownPath: string;
|
||||
jsonPath: string;
|
||||
}
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
|
||||
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||
|
||||
WHERE TO FIND DATA:
|
||||
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer")
|
||||
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||
|
||||
RULES:
|
||||
1. invoice_number: Extract ONLY the value (e.g., "R0015632540"), NOT the label "Invoice no."
|
||||
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||
3. vendor_name: The company issuing the invoice
|
||||
4. currency: EUR, USD, or GBP
|
||||
5. net_amount: Total before tax
|
||||
6. vat_amount: Tax amount
|
||||
7. total_amount: Final total with tax
|
||||
|
||||
JSON only:
|
||||
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`;
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice number - minimal normalization
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON from response
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into IInvoice
|
||||
*/
|
||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed) return null;
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(parsed.currency || '')),
|
||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
const startTime = Date.now();
|
||||
|
||||
console.log(` [${queryId}] Invoice: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: `Here is an invoice document:\n\n${markdown}` },
|
||||
{ role: 'assistant', content: 'I have read the invoice document you provided. I can see all the text content. What would you like me to do with it?' },
|
||||
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
|
||||
],
|
||||
stream: true,
|
||||
options: {
|
||||
num_ctx: 32768, // Larger context for long invoices + thinking
|
||||
temperature: 0, // Deterministic for JSON extraction
|
||||
},
|
||||
}),
|
||||
signal: AbortSignal.timeout(120000), // 2 min timeout
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors for partial chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking, ${content.length} output (${elapsed}s)`);
|
||||
|
||||
return parseJsonToInvoice(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize invoice number for comparison (remove spaces, lowercase)
|
||||
*/
|
||||
function normalizeInvoiceNumber(s: string): string {
|
||||
return s.replace(/\s+/g, '').toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Invoice number - normalize spaces for comparison
|
||||
const extNum = normalizeInvoiceNumber(extracted.invoice_number || '');
|
||||
const expNum = normalizeInvoiceNumber(expected.invoice_number || '');
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases with existing debug markdown
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const invoicesDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(invoicesDir)) return [];
|
||||
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const invoiceName of TEST_INVOICES) {
|
||||
const markdownPath = path.join(invoicesDir, `${invoiceName}.debug.md`);
|
||||
const jsonPath = path.join(invoicesDir, `${invoiceName}.json`);
|
||||
|
||||
if (fs.existsSync(markdownPath) && fs.existsSync(jsonPath)) {
|
||||
testCases.push({
|
||||
name: invoiceName,
|
||||
markdownPath,
|
||||
jsonPath,
|
||||
});
|
||||
} else {
|
||||
if (!fs.existsSync(markdownPath)) {
|
||||
console.warn(`Warning: Missing markdown: ${markdownPath}`);
|
||||
}
|
||||
if (!fs.existsSync(jsonPath)) {
|
||||
console.warn(`Warning: Missing JSON: ${jsonPath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\n========================================`);
|
||||
console.log(` EXTRACTION TUNING TEST`);
|
||||
console.log(` (Skips OCR, uses existing .debug.md)`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Testing ${testCases.length} invoices:`);
|
||||
for (const tc of testCases) {
|
||||
console.log(` - ${tc.name}`);
|
||||
}
|
||||
console.log(`========================================\n`);
|
||||
|
||||
tap.test('Setup Ollama + GPT-OSS 20B', async () => {
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Extract ${tc.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
const markdown = fs.readFileSync(tc.markdownPath, 'utf-8');
|
||||
|
||||
console.log(`\n ========================================`);
|
||||
console.log(` === ${tc.name} ===`);
|
||||
console.log(` ========================================`);
|
||||
console.log(` EXPECTED: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const extracted = await extractInvoiceFromMarkdown(markdown, tc.name);
|
||||
|
||||
if (!extracted) {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ FAILED TO PARSE (${((Date.now() - startTime) / 1000).toFixed(1)}s)`);
|
||||
return;
|
||||
}
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
|
||||
console.log(` EXTRACTED: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
console.log(` ERRORS:`);
|
||||
result.errors.forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Extraction Tuning Summary`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Model: ${EXTRACTION_MODEL}`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`========================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
695
test/test.invoices.failed.ts
Normal file
695
test/test.invoices.failed.ts
Normal file
@@ -0,0 +1,695 @@
|
||||
/**
|
||||
* Focused test for failed invoice extractions
|
||||
*
|
||||
* Tests only the 4 invoices that failed in the main test:
|
||||
* - consensus_2021-09: invoice_number "2021/1384" → "20211384" (slash stripped)
|
||||
* - hetzner_2022-04: model hallucinated after 281s thinking
|
||||
* - qonto_2021-08: invoice_number "08-21-INVOICE-410870" → "4108705" (prefix stripped)
|
||||
* - qonto_2021-09: invoice_number "09-21-INVOICE-4303642" → "4303642" (prefix stripped)
|
||||
*
|
||||
* Run with: tstest test/test.invoices.failed.ts --verbose
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Temp directory for storing markdown between stages
|
||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-failed-debug');
|
||||
|
||||
// Only test these specific invoices that failed
|
||||
const FAILED_INVOICES = [
|
||||
'consensus_2021-09',
|
||||
'hetzner_2022-04',
|
||||
'qonto_2021-08',
|
||||
'qonto_2021-09',
|
||||
];
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
jsonPath: string;
|
||||
markdownPath?: string;
|
||||
}
|
||||
|
||||
// Nanonets-specific prompt for document OCR to markdown
|
||||
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||
Return the tables in html format.
|
||||
Return the equations in LaTeX representation.
|
||||
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B
|
||||
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
|
||||
|
||||
IMPORTANT RULES:
|
||||
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID). PRESERVE ALL CHARACTERS including slashes, dashes, and prefixes.
|
||||
2. invoice_date: Format as YYYY-MM-DD
|
||||
3. vendor_name: The company that issued the invoice
|
||||
4. currency: EUR, USD, or GBP
|
||||
5. net_amount: Amount before tax
|
||||
6. vat_amount: Tax/VAT amount
|
||||
7. total_amount: Final total (gross amount)
|
||||
|
||||
Return ONLY this JSON format, no explanation:
|
||||
{
|
||||
"invoice_number": "INV-2024-001",
|
||||
"invoice_date": "2024-01-15",
|
||||
"vendor_name": "Company Name",
|
||||
"currency": "EUR",
|
||||
"net_amount": 100.00,
|
||||
"vat_amount": 19.00,
|
||||
"total_amount": 119.00
|
||||
}
|
||||
|
||||
INVOICE TEXT:
|
||||
`;
|
||||
|
||||
const PATCH_SIZE = 14;
|
||||
|
||||
/**
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer dummy',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096 * batch.length,
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000),
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop Nanonets container
|
||||
*/
|
||||
function stopNanonets(): void {
|
||||
console.log(' [Docker] Stopping Nanonets container...');
|
||||
try {
|
||||
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||
execSync('sleep 5', { stdio: 'pipe' });
|
||||
console.log(' [Docker] Nanonets stopped');
|
||||
} catch {
|
||||
console.log(' [Docker] Nanonets was not running');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice number - MINIMAL normalization for debugging
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
// Only remove markdown formatting, preserve everything else
|
||||
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON from response
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into IInvoice
|
||||
*/
|
||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed) return null;
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(parsed.currency || '')),
|
||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
const startTime = Date.now();
|
||||
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
||||
|
||||
// Log exact prompt
|
||||
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
||||
console.log(fullPrompt);
|
||||
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: EXTRACTION_MODEL,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hi there, how are you?' },
|
||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
||||
{ role: 'user', content: fullPrompt },
|
||||
],
|
||||
stream: true,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
// Stream the response
|
||||
let content = '';
|
||||
let thinkingContent = '';
|
||||
let thinkingStarted = false;
|
||||
let outputStarted = false;
|
||||
const reader = response.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
|
||||
const thinking = json.message?.thinking || '';
|
||||
if (thinking) {
|
||||
if (!thinkingStarted) {
|
||||
process.stdout.write(` [${queryId}] THINKING: `);
|
||||
thinkingStarted = true;
|
||||
}
|
||||
process.stdout.write(thinking);
|
||||
thinkingContent += thinking;
|
||||
}
|
||||
|
||||
const token = json.message?.content || '';
|
||||
if (token) {
|
||||
if (!outputStarted) {
|
||||
if (thinkingStarted) process.stdout.write('\n');
|
||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
||||
outputStarted = true;
|
||||
}
|
||||
process.stdout.write(token);
|
||||
content += token;
|
||||
}
|
||||
} catch {
|
||||
// Ignore parse errors for partial chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
||||
}
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
||||
|
||||
// Log raw response for debugging
|
||||
console.log(` [${queryId}] RAW RESPONSE: ${content}`);
|
||||
|
||||
return parseJsonToInvoice(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice (single pass)
|
||||
*/
|
||||
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||
console.log(` [${docName}] Extracting...`);
|
||||
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||
if (!invoice) {
|
||||
return {
|
||||
invoice_number: '',
|
||||
invoice_date: '',
|
||||
vendor_name: '',
|
||||
currency: 'EUR',
|
||||
net_amount: 0,
|
||||
vat_amount: 0,
|
||||
total_amount: 0,
|
||||
};
|
||||
}
|
||||
console.log(` [${docName}] Extracted: ${JSON.stringify(invoice, null, 2)}`);
|
||||
return invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected - detailed output
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Invoice number comparison - exact match after whitespace normalization
|
||||
const extNum = extracted.invoice_number?.trim() || '';
|
||||
const expNum = expected.invoice_number?.trim() || '';
|
||||
if (extNum.toLowerCase() !== expNum.toLowerCase()) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases for failed invoices only
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const invoiceName of FAILED_INVOICES) {
|
||||
const pdfFile = `${invoiceName}.pdf`;
|
||||
const jsonFile = `${invoiceName}.json`;
|
||||
|
||||
if (files.includes(pdfFile) && files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: invoiceName,
|
||||
pdfPath: path.join(testDir, pdfFile),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
} else {
|
||||
console.warn(`Warning: Missing files for ${invoiceName}`);
|
||||
}
|
||||
}
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\n========================================`);
|
||||
console.log(` FAILED INVOICES DEBUG TEST`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Testing ${testCases.length} failed invoices:`);
|
||||
for (const tc of testCases) {
|
||||
console.log(` - ${tc.name}`);
|
||||
}
|
||||
console.log(`========================================\n`);
|
||||
|
||||
// Ensure temp directory exists
|
||||
if (!fs.existsSync(TEMP_MD_DIR)) {
|
||||
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// -------- STAGE 1: OCR with Nanonets --------
|
||||
|
||||
tap.test('Stage 1: Setup Nanonets', async () => {
|
||||
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||
const ok = await ensureNanonetsOcr();
|
||||
expect(ok).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Convert failed invoices to markdown', async () => {
|
||||
console.log('\n Converting failed invoice PDFs to markdown with Nanonets-OCR-s...\n');
|
||||
|
||||
for (const tc of testCases) {
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
|
||||
const images = convertPdfToImages(tc.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
fs.writeFileSync(mdPath, markdown);
|
||||
tc.markdownPath = mdPath;
|
||||
console.log(` Saved: ${mdPath}`);
|
||||
|
||||
// Also save to .nogit for inspection
|
||||
const debugMdPath = path.join(process.cwd(), '.nogit/invoices', `${tc.name}.debug.md`);
|
||||
fs.writeFileSync(debugMdPath, markdown);
|
||||
console.log(` Debug copy: ${debugMdPath}`);
|
||||
}
|
||||
|
||||
console.log('\n Stage 1 complete: All failed invoices converted to markdown\n');
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||
stopNanonets();
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||
});
|
||||
|
||||
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||
|
||||
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
console.log(`\n ========================================`);
|
||||
console.log(` === ${tc.name} ===`);
|
||||
console.log(` ========================================`);
|
||||
console.log(` EXPECTED:`);
|
||||
console.log(` invoice_number: "${expected.invoice_number}"`);
|
||||
console.log(` invoice_date: "${expected.invoice_date}"`);
|
||||
console.log(` vendor_name: "${expected.vendor_name}"`);
|
||||
console.log(` total_amount: ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
||||
if (!fs.existsSync(mdPath)) {
|
||||
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||
}
|
||||
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const extracted = await extractInvoice(markdown, tc.name);
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
|
||||
console.log(`\n EXTRACTED:`);
|
||||
console.log(` invoice_number: "${extracted.invoice_number}"`);
|
||||
console.log(` invoice_date: "${extracted.invoice_date}"`);
|
||||
console.log(` vendor_name: "${extracted.vendor_name}"`);
|
||||
console.log(` total_amount: ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
console.log(` ERRORS:`);
|
||||
result.errors.forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Don't fail the test - we're debugging
|
||||
// expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Failed Invoices Debug Summary`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Markdown files saved to: ${TEMP_MD_DIR}`);
|
||||
console.log(` Debug copies in: .nogit/invoices/*.debug.md`);
|
||||
console.log(`========================================\n`);
|
||||
|
||||
// Don't cleanup temp files for debugging
|
||||
console.log(` Keeping temp files for debugging.\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,8 +1,10 @@
|
||||
/**
|
||||
* Invoice extraction test using MiniCPM-V only (visual extraction)
|
||||
* Invoice extraction test using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||
*
|
||||
* This tests MiniCPM-V's ability to extract invoice data directly from images
|
||||
* without any OCR augmentation.
|
||||
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||
* 1. Pass images to the orchestrator
|
||||
* 2. Driver extracts invoice data and validates JSON before completing
|
||||
* 3. If validation fails, driver retries within the same task
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
@@ -10,9 +12,11 @@ import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
import { SmartAi } from '@push.rocks/smartai';
|
||||
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const MODEL = 'minicpm-v:latest';
|
||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
@@ -24,27 +28,9 @@ interface IInvoice {
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build extraction prompt (MiniCPM-V only, no OCR augmentation)
|
||||
*/
|
||||
function buildPrompt(): string {
|
||||
return `/nothink
|
||||
You are an invoice parser. Extract the following fields from this invoice:
|
||||
|
||||
1. invoice_number: The invoice/receipt number
|
||||
2. invoice_date: Date in YYYY-MM-DD format
|
||||
3. vendor_name: Company that issued the invoice
|
||||
4. currency: EUR, USD, etc.
|
||||
5. net_amount: Amount before tax (if shown)
|
||||
6. vat_amount: Tax/VAT amount (if shown, 0 if reverse charge or no tax)
|
||||
7. total_amount: Final amount due
|
||||
|
||||
Return ONLY valid JSON in this exact format:
|
||||
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company Name","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}
|
||||
|
||||
If a field is not visible, use null for strings or 0 for numbers.
|
||||
No explanation, just the JSON object.`;
|
||||
}
|
||||
// SmartAi instance and orchestrator (initialized in setup)
|
||||
let smartAi: SmartAi;
|
||||
let orchestrator: DualAgentOrchestrator;
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
@@ -55,7 +41,7 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
`convert -density 300 -quality 95 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
@@ -74,123 +60,212 @@ function convertPdfToImages(pdfPath: string): string[] {
|
||||
}
|
||||
}
|
||||
|
||||
const EXTRACTION_PROMPT = `Extract invoice data from the provided image(s).
|
||||
|
||||
IMPORTANT: You must output a valid JSON object with these exact fields:
|
||||
{
|
||||
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
||||
"invoice_date": "YYYY-MM-DD format",
|
||||
"vendor_name": "company that issued the invoice",
|
||||
"currency": "EUR, USD, or GBP",
|
||||
"net_amount": 0.00,
|
||||
"vat_amount": 0.00,
|
||||
"total_amount": 0.00
|
||||
}
|
||||
|
||||
Before completing, use the json.validate tool to verify your output is valid JSON with all required fields.
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON_HERE", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||
</tool_call>
|
||||
|
||||
Only complete the task after validation passes. Output the final JSON in <task_complete> tags.`;
|
||||
|
||||
/**
|
||||
* Single extraction pass with MiniCPM-V
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
async function extractOnce(images: string[], passNum: number): Promise<IInvoice> {
|
||||
const payload = {
|
||||
model: MODEL,
|
||||
prompt: buildPrompt(),
|
||||
images,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 2048,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
// Handle European format: 1.234,56 → 1234.56
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
/**
|
||||
* Extract invoice number from potentially verbose response
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const patterns = [
|
||||
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
|
||||
/\b([A-Z]\d{8,})\b/i, // R0014359508
|
||||
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
|
||||
/\b(\d{7,})\b/, // 1579087430
|
||||
];
|
||||
for (const pattern of patterns) {
|
||||
const match = clean.match(pattern);
|
||||
if (match) return match[1];
|
||||
}
|
||||
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
// Try DD/MM/YYYY or DD.MM.YYYY
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
/**
|
||||
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||
*/
|
||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||
// Try to find JSON in task_complete tags
|
||||
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||
if (completeMatch) {
|
||||
const content = completeMatch[1].trim();
|
||||
// Try to find JSON in the content
|
||||
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract JSON from response
|
||||
const startIdx = fullText.indexOf('{');
|
||||
const endIdx = fullText.lastIndexOf('}') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
||||
}
|
||||
|
||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
||||
return JSON.parse(jsonStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hash of invoice for comparison (using key fields)
|
||||
*/
|
||||
function hashInvoice(invoice: IInvoice): string {
|
||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${invoice.total_amount.toFixed(2)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with consensus voting using MiniCPM-V only
|
||||
*/
|
||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
||||
const hashCounts: Map<string, number> = new Map();
|
||||
|
||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
||||
const hash = hashInvoice(invoice);
|
||||
results.push({ invoice, hash });
|
||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||
return hashCounts.get(hash)!;
|
||||
};
|
||||
|
||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
||||
} catch {
|
||||
// Try to find JSON object pattern
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
const invoice = await extractOnce(images, pass);
|
||||
const count = addResult(invoice, `Pass ${pass}`);
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (count >= 2) {
|
||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
||||
// Try to find JSON in markdown code block
|
||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
// Try to find JSON object pattern
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON response into IInvoice
|
||||
*/
|
||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
||||
const parsed = extractJsonFromResponse(response);
|
||||
if (!parsed) return null;
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(parsed.currency || '')),
|
||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice data using smartagent orchestrator with vision
|
||||
*/
|
||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = await orchestrator.run(EXTRACTION_PROMPT, { images });
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [Vision] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||
|
||||
const invoice = parseJsonToInvoice(result.result);
|
||||
|
||||
if (invoice) {
|
||||
console.log(` [Result] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||
return invoice;
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
||||
}
|
||||
|
||||
// Return empty invoice if parsing failed
|
||||
console.log(` [Result] Parsing failed, returning empty invoice`);
|
||||
return {
|
||||
invoice_number: '',
|
||||
invoice_date: '',
|
||||
vendor_name: '',
|
||||
currency: 'EUR',
|
||||
net_amount: 0,
|
||||
vat_amount: 0,
|
||||
total_amount: 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
// No consensus reached - return the most common result
|
||||
let bestHash = '';
|
||||
let bestCount = 0;
|
||||
for (const [hash, count] of hashCounts) {
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
bestHash = hash;
|
||||
}
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
if (!bestHash) {
|
||||
throw new Error(`No valid results for ${invoiceName}`);
|
||||
}
|
||||
|
||||
const best = results.find((r) => r.hash === bestHash)!;
|
||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
||||
return best.invoice;
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -210,7 +285,7 @@ function compareInvoice(
|
||||
}
|
||||
|
||||
// Compare date
|
||||
if (extracted.invoice_date !== expected.invoice_date) {
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
@@ -252,9 +327,7 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
||||
}
|
||||
}
|
||||
|
||||
// Sort alphabetically
|
||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
@@ -262,24 +335,93 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure MiniCPM is running
|
||||
const minicpmOk = await ensureMiniCpm();
|
||||
expect(minicpmOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
|
||||
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||
|
||||
smartAi = new SmartAi({
|
||||
ollama: {
|
||||
baseUrl: OLLAMA_URL,
|
||||
model: MODEL,
|
||||
defaultOptions: {
|
||||
num_ctx: 32768,
|
||||
temperature: 0.1,
|
||||
},
|
||||
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||
},
|
||||
});
|
||||
|
||||
await smartAi.start();
|
||||
|
||||
orchestrator = new DualAgentOrchestrator({
|
||||
smartAiInstance: smartAi,
|
||||
defaultProvider: 'ollama',
|
||||
guardianPolicyPrompt: `You are a Guardian agent overseeing invoice extraction tasks.
|
||||
|
||||
APPROVE all tool calls that:
|
||||
- Use the json.validate action to verify JSON output
|
||||
- Are reasonable attempts to complete the extraction task
|
||||
|
||||
REJECT tool calls that:
|
||||
- Attempt to access external resources
|
||||
- Try to execute arbitrary code
|
||||
- Are clearly unrelated to invoice extraction`,
|
||||
driverSystemMessage: `You are an AI assistant that extracts invoice data from images.
|
||||
|
||||
Your task is to analyze invoice images and extract structured data.
|
||||
You have access to a json.validate tool to verify your JSON output.
|
||||
|
||||
IMPORTANT: Always validate your JSON before completing the task.
|
||||
|
||||
## Tool Usage Format
|
||||
When you need to validate JSON, output:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||
</tool_call>
|
||||
|
||||
## Completion Format
|
||||
After validation passes, complete the task:
|
||||
|
||||
<task_complete>
|
||||
{"invoice_number": "...", "invoice_date": "YYYY-MM-DD", ...}
|
||||
</task_complete>`,
|
||||
maxIterations: 5,
|
||||
maxConsecutiveRejections: 3,
|
||||
onToken: (token, source) => {
|
||||
if (source === 'driver') {
|
||||
process.stdout.write(token);
|
||||
}
|
||||
},
|
||||
onProgress: (event) => {
|
||||
if (event.logLevel === 'error') {
|
||||
console.error(event.logMessage);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Register the JsonValidatorTool
|
||||
orchestrator.registerTool(new JsonValidatorTool());
|
||||
|
||||
await orchestrator.start();
|
||||
console.log('[Setup] Orchestrator initialized!\n');
|
||||
});
|
||||
|
||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
const data = await response.json();
|
||||
const modelNames = data.models.map((m: { name: string }) => m.name);
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
|
||||
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V only)\n`);
|
||||
console.log(`\nFound ${testCases.length} invoice test cases (smartagent + MiniCPM-V)\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
@@ -287,25 +429,20 @@ const processingTimes: number[] = [];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
||||
// Load expected data
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Convert PDF to images
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Extract with consensus voting (MiniCPM-V only)
|
||||
const extracted = await extractWithConsensus(images, testCase.name);
|
||||
const extracted = await extractInvoiceFromImages(images);
|
||||
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const endTime = Date.now();
|
||||
const elapsedMs = endTime - startTime;
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
processingTimes.push(elapsedMs);
|
||||
|
||||
// Compare results
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
@@ -317,27 +454,33 @@ for (const testCase of testCases) {
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Assert match
|
||||
expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('cleanup: stop orchestrator', async () => {
|
||||
if (orchestrator) {
|
||||
await orchestrator.stop();
|
||||
}
|
||||
console.log('[Cleanup] Orchestrator stopped');
|
||||
});
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
||||
const avgTimeSec = avgTimeMs / 1000;
|
||||
const totalTimeSec = totalTimeMs / 1000;
|
||||
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Invoice Extraction Summary (MiniCPM)`);
|
||||
console.log(` Invoice Extraction Summary`);
|
||||
console.log(` (smartagent + ${MODEL})`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`----------------------------------------`);
|
||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
||||
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||
console.log(`========================================\n`);
|
||||
});
|
||||
|
||||
753
test/test.invoices.nanonets.ts
Normal file
753
test/test.invoices.nanonets.ts
Normal file
@@ -0,0 +1,753 @@
|
||||
/**
|
||||
* Invoice extraction using Nanonets-OCR2-3B + GPT-OSS 20B (sequential two-stage pipeline)
|
||||
*
|
||||
* Stage 1: Nanonets-OCR2-3B converts ALL document pages to markdown (stop after completion)
|
||||
* Stage 2: GPT-OSS 20B extracts structured JSON from saved markdown (after Nanonets stops)
|
||||
*
|
||||
* This approach avoids GPU contention by running services sequentially.
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||
import { SmartAi } from '@push.rocks/smartai';
|
||||
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||
|
||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||
|
||||
// Persistent cache directory for storing markdown between runs
|
||||
const MD_CACHE_DIR = path.join(process.cwd(), '.nogit/invoices-md');
|
||||
|
||||
// SmartAi instance for Ollama with optimized settings
|
||||
const smartAi = new SmartAi({
|
||||
ollama: {
|
||||
baseUrl: OLLAMA_URL,
|
||||
model: EXTRACTION_MODEL,
|
||||
defaultOptions: {
|
||||
num_ctx: 32768, // Larger context for long invoices + thinking
|
||||
temperature: 0, // Deterministic for JSON extraction
|
||||
},
|
||||
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||
},
|
||||
});
|
||||
|
||||
// DualAgentOrchestrator for structured task execution
|
||||
let orchestrator: DualAgentOrchestrator;
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
interface IImageData {
|
||||
base64: string;
|
||||
width: number;
|
||||
height: number;
|
||||
pageNum: number;
|
||||
}
|
||||
|
||||
interface ITestCase {
|
||||
name: string;
|
||||
pdfPath: string;
|
||||
jsonPath: string;
|
||||
markdownPath?: string;
|
||||
}
|
||||
|
||||
// Nanonets-specific prompt for document OCR to markdown
|
||||
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
||||
Return the tables in html format.
|
||||
Return the equations in LaTeX representation.
|
||||
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
||||
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
||||
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
||||
|
||||
// JSON extraction prompt for GPT-OSS 20B (sent AFTER the invoice text is provided)
|
||||
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||
|
||||
WHERE TO FIND DATA:
|
||||
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer"). Use common sense. Btw. an invoice number might start on INV* .
|
||||
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||
|
||||
RULES:
|
||||
1. Use common sense.
|
||||
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||
3. vendor_name: The company issuing the invoice
|
||||
4. currency: EUR, USD, or GBP
|
||||
5. net_amount: Total before tax
|
||||
6. vat_amount: Tax amount
|
||||
7. total_amount: Final total with tax
|
||||
|
||||
JSON only:
|
||||
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}
|
||||
|
||||
Double check for valid JSON syntax.
|
||||
|
||||
`;
|
||||
|
||||
// Constants for smart batching
|
||||
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||
|
||||
/**
|
||||
* Estimate visual tokens for an image based on dimensions
|
||||
*/
|
||||
function estimateVisualTokens(width: number, height: number): number {
|
||||
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Process images one page at a time for reliability
|
||||
*/
|
||||
function batchImages(images: IImageData[]): IImageData[][] {
|
||||
// One page per batch for reliable processing
|
||||
return images.map(img => [img]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): IImageData[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
||||
const images: IImageData[] = [];
|
||||
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = files[i];
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
|
||||
// Get image dimensions using identify command
|
||||
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
||||
const [width, height] = dimensions.split(' ').map(Number);
|
||||
|
||||
images.push({
|
||||
base64: imageData.toString('base64'),
|
||||
width,
|
||||
height,
|
||||
pageNum: i + 1,
|
||||
});
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
||||
*/
|
||||
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
||||
const startTime = Date.now();
|
||||
const pageNums = batch.map(img => img.pageNum).join(', ');
|
||||
|
||||
// Build content array with all images first, then the prompt
|
||||
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
||||
|
||||
for (const img of batch) {
|
||||
content.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
||||
});
|
||||
}
|
||||
|
||||
// Add prompt with page separator instruction if multiple pages
|
||||
const promptText = batch.length > 1
|
||||
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
||||
: NANONETS_OCR_PROMPT;
|
||||
|
||||
content.push({ type: 'text', text: promptText });
|
||||
|
||||
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer dummy',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: NANONETS_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content,
|
||||
}],
|
||||
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
|
||||
temperature: 0.0,
|
||||
}),
|
||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for OCR
|
||||
});
|
||||
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
||||
|
||||
// For single-page batches, add page marker if not present
|
||||
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
||||
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
||||
}
|
||||
|
||||
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert all pages of a document to markdown using smart batching
|
||||
*/
|
||||
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
||||
const batches = batchImages(images);
|
||||
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
||||
|
||||
const markdownParts: string[] = [];
|
||||
|
||||
for (let i = 0; i < batches.length; i++) {
|
||||
const batch = batches[i];
|
||||
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
||||
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
||||
const markdown = await convertBatchToMarkdown(batch);
|
||||
markdownParts.push(markdown);
|
||||
}
|
||||
|
||||
const fullMarkdown = markdownParts.join('\n\n');
|
||||
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
||||
return fullMarkdown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop Nanonets container
|
||||
*/
|
||||
function stopNanonets(): void {
|
||||
console.log(' [Docker] Stopping Nanonets container...');
|
||||
try {
|
||||
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
||||
execSync('sleep 5', { stdio: 'pipe' });
|
||||
console.log(' [Docker] Nanonets stopped');
|
||||
} catch {
|
||||
console.log(' [Docker] Nanonets was not running');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure GPT-OSS 20B model is available
|
||||
*/
|
||||
async function ensureExtractionModel(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
||||
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse amount from string (handles European format)
|
||||
*/
|
||||
function parseAmount(s: string | number | undefined): number {
|
||||
if (s === undefined || s === null) return 0;
|
||||
if (typeof s === 'number') return s;
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice number from potentially verbose response
|
||||
*/
|
||||
function extractInvoiceNumber(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const patterns = [
|
||||
/\b([A-Z]{2,3}\d{10,})\b/i,
|
||||
/\b([A-Z]\d{8,})\b/i,
|
||||
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i,
|
||||
/\b(\d{7,})\b/,
|
||||
];
|
||||
for (const pattern of patterns) {
|
||||
const match = clean.match(pattern);
|
||||
if (match) return match[1];
|
||||
}
|
||||
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract date (YYYY-MM-DD) from response
|
||||
*/
|
||||
function extractDate(s: string | undefined): string {
|
||||
if (!s) return '';
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
||||
if (dmyMatch) {
|
||||
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
||||
}
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract currency
|
||||
*/
|
||||
function extractCurrency(s: string | undefined): string {
|
||||
if (!s) return 'EUR';
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract valid JSON from a response string
|
||||
*/
|
||||
function tryExtractJson(response: string): Record<string, unknown> | null {
|
||||
// Remove thinking tags
|
||||
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||
|
||||
// Try code block
|
||||
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||
|
||||
try {
|
||||
return JSON.parse(jsonStr);
|
||||
} catch {
|
||||
// Try to find JSON object
|
||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice from markdown using smartagent DualAgentOrchestrator
|
||||
* Validates JSON and retries if invalid
|
||||
*/
|
||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||
const startTime = Date.now();
|
||||
const maxRetries = 2;
|
||||
|
||||
console.log(` [${queryId}] Invoice: ${markdown.length} chars`);
|
||||
|
||||
// Build the extraction task with document context
|
||||
const taskPrompt = `Extract the invoice data from this document and output ONLY the JSON:
|
||||
|
||||
${markdown}
|
||||
|
||||
${JSON_EXTRACTION_PROMPT}`;
|
||||
|
||||
try {
|
||||
let result = await orchestrator.run(taskPrompt);
|
||||
let elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||
|
||||
// Try to parse JSON from result
|
||||
let jsonData: Record<string, unknown> | null = null;
|
||||
let responseText = result.result || '';
|
||||
|
||||
if (result.success && responseText) {
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
|
||||
// Fallback: try parsing from history
|
||||
if (!jsonData && result.history?.length > 0) {
|
||||
const lastMessage = result.history[result.history.length - 1];
|
||||
if (lastMessage?.content) {
|
||||
responseText = lastMessage.content;
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
}
|
||||
|
||||
// If JSON is invalid, retry with correction request
|
||||
let retries = 0;
|
||||
while (!jsonData && retries < maxRetries) {
|
||||
retries++;
|
||||
console.log(` [${queryId}] Invalid JSON, requesting correction (retry ${retries}/${maxRetries})...`);
|
||||
|
||||
result = await orchestrator.continueTask(
|
||||
`Your response was not valid JSON. Please output ONLY the JSON object with no markdown, no explanation, no thinking tags. Just the raw JSON starting with { and ending with }. Format:
|
||||
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`
|
||||
);
|
||||
|
||||
elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] Retry ${retries}: ${result.status} (${elapsed}s)`);
|
||||
|
||||
responseText = result.result || '';
|
||||
if (responseText) {
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
|
||||
if (!jsonData && result.history?.length > 0) {
|
||||
const lastMessage = result.history[result.history.length - 1];
|
||||
if (lastMessage?.content) {
|
||||
responseText = lastMessage.content;
|
||||
jsonData = tryExtractJson(responseText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!jsonData) {
|
||||
console.log(` [${queryId}] Failed to get valid JSON after ${retries} retries`);
|
||||
return null;
|
||||
}
|
||||
|
||||
console.log(` [${queryId}] Valid JSON extracted`);
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(String(jsonData.invoice_number || '')),
|
||||
invoice_date: extractDate(String(jsonData.invoice_date || '')),
|
||||
vendor_name: String(jsonData.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||
currency: extractCurrency(String(jsonData.currency || '')),
|
||||
net_amount: parseAmount(jsonData.net_amount as string | number),
|
||||
vat_amount: parseAmount(jsonData.vat_amount as string | number),
|
||||
total_amount: parseAmount(jsonData.total_amount as string | number),
|
||||
};
|
||||
} catch (error) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice (single pass - GPT-OSS is more reliable)
|
||||
*/
|
||||
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
||||
console.log(` [${docName}] Extracting...`);
|
||||
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
||||
if (!invoice) {
|
||||
return {
|
||||
invoice_number: '',
|
||||
invoice_date: '',
|
||||
vendor_name: '',
|
||||
currency: 'EUR',
|
||||
net_amount: 0,
|
||||
vat_amount: 0,
|
||||
total_amount: 0,
|
||||
};
|
||||
}
|
||||
console.log(` [${docName}] Extracted: ${invoice.invoice_number}`);
|
||||
return invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: exp "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: exp "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: exp ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: exp "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases
|
||||
*/
|
||||
function findTestCases(): ITestCase[] {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: ITestCase[] = [];
|
||||
|
||||
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
// ============ TESTS ============
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
||||
|
||||
// Ensure cache directory exists
|
||||
if (!fs.existsSync(MD_CACHE_DIR)) {
|
||||
fs.mkdirSync(MD_CACHE_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// -------- STAGE 1: OCR with Nanonets --------
|
||||
|
||||
tap.test('Stage 1: Convert invoices to markdown (with caching)', async () => {
|
||||
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||
|
||||
// Check which invoices need OCR conversion
|
||||
const needsConversion: ITestCase[] = [];
|
||||
let cachedCount = 0;
|
||||
|
||||
for (const tc of testCases) {
|
||||
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||
if (fs.existsSync(mdPath)) {
|
||||
cachedCount++;
|
||||
tc.markdownPath = mdPath;
|
||||
console.log(` [CACHED] ${tc.name} - using cached markdown`);
|
||||
} else {
|
||||
needsConversion.push(tc);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n Summary: ${cachedCount} cached, ${needsConversion.length} need conversion\n`);
|
||||
|
||||
if (needsConversion.length === 0) {
|
||||
console.log(' All invoices already cached, skipping Nanonets OCR\n');
|
||||
return;
|
||||
}
|
||||
|
||||
// Start Nanonets only if there are files to convert
|
||||
console.log(' Starting Nanonets for OCR conversion...\n');
|
||||
const ok = await ensureNanonetsOcr();
|
||||
expect(ok).toBeTrue();
|
||||
|
||||
// Convert only the invoices that need conversion
|
||||
for (const tc of needsConversion) {
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
|
||||
const images = convertPdfToImages(tc.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||
|
||||
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||
fs.writeFileSync(mdPath, markdown);
|
||||
tc.markdownPath = mdPath;
|
||||
console.log(` Saved: ${mdPath}`);
|
||||
}
|
||||
|
||||
console.log(`\n Stage 1 complete: ${needsConversion.length} invoices converted to markdown\n`);
|
||||
});
|
||||
|
||||
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||
stopNanonets();
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
||||
});
|
||||
|
||||
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
||||
|
||||
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
||||
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
||||
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
const extractionOk = await ensureExtractionModel();
|
||||
expect(extractionOk).toBeTrue();
|
||||
|
||||
// Initialize SmartAi and DualAgentOrchestrator
|
||||
console.log(' [SmartAgent] Starting SmartAi...');
|
||||
await smartAi.start();
|
||||
|
||||
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
|
||||
orchestrator = new DualAgentOrchestrator({
|
||||
smartAiInstance: smartAi,
|
||||
defaultProvider: 'ollama',
|
||||
guardianPolicyPrompt: `
|
||||
JSON EXTRACTION POLICY:
|
||||
- APPROVE all JSON extraction tasks
|
||||
- APPROVE all json.validate tool calls
|
||||
- This is a read-only operation - no file system or network access needed
|
||||
- The task is to extract structured data from document text
|
||||
`,
|
||||
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract invoice data from documents.
|
||||
|
||||
CRITICAL RULES:
|
||||
1. Output valid JSON with the exact format requested
|
||||
2. If you cannot find a value, use empty string "" or 0 for numbers
|
||||
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
|
||||
|
||||
<tool_call>
|
||||
<tool>json</tool>
|
||||
<action>validate</action>
|
||||
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||
</tool_call>
|
||||
|
||||
4. Only complete after validation passes
|
||||
|
||||
When done, wrap your JSON in <task_complete></task_complete> tags.`,
|
||||
maxIterations: 5,
|
||||
// Enable streaming for real-time progress visibility
|
||||
onToken: (token, source) => {
|
||||
if (source === 'driver') {
|
||||
process.stdout.write(token);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Register JsonValidatorTool for self-validation
|
||||
orchestrator.registerTool(new JsonValidatorTool());
|
||||
|
||||
console.log(' [SmartAgent] Starting orchestrator...');
|
||||
await orchestrator.start();
|
||||
console.log(' [SmartAgent] Ready for extraction');
|
||||
});
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
const processingTimes: number[] = [];
|
||||
|
||||
for (const tc of testCases) {
|
||||
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
||||
console.log(`\n === ${tc.name} ===`);
|
||||
console.log(` Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||
if (!fs.existsSync(mdPath)) {
|
||||
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||
}
|
||||
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
||||
console.log(` Markdown: ${markdown.length} chars`);
|
||||
|
||||
const extracted = await extractInvoice(markdown, tc.name);
|
||||
|
||||
const elapsedMs = Date.now() - startTime;
|
||||
processingTimes.push(elapsedMs);
|
||||
|
||||
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
result.errors.forEach(e => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('Summary', async () => {
|
||||
// Cleanup orchestrator and SmartAi
|
||||
if (orchestrator) {
|
||||
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||
await orchestrator.stop();
|
||||
}
|
||||
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||
await smartAi.stop();
|
||||
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||
|
||||
console.log(`\n========================================`);
|
||||
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
|
||||
console.log(`========================================`);
|
||||
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
|
||||
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (md -> JSON)`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`----------------------------------------`);
|
||||
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||
console.log(`========================================\n`);
|
||||
console.log(` Cache location: ${MD_CACHE_DIR}\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -1,461 +0,0 @@
|
||||
/**
|
||||
* Invoice extraction test using PaddleOCR-VL Full Pipeline
|
||||
*
|
||||
* This tests the complete PaddleOCR-VL pipeline:
|
||||
* 1. PP-DocLayoutV2 for layout detection
|
||||
* 2. PaddleOCR-VL for recognition
|
||||
* 3. Structured HTML output (semantic tags with proper tables)
|
||||
* 4. Qwen2.5 extracts invoice fields from structured HTML
|
||||
*
|
||||
* HTML output is used instead of Markdown because:
|
||||
* - <table> tags are unambiguous (no parser variations)
|
||||
* - LLMs are heavily trained on web/HTML data
|
||||
* - Semantic tags (header, footer, section) provide clear structure
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensurePaddleOcrVlFull, ensureQwen25 } from './helpers/docker.js';
|
||||
|
||||
const PADDLEOCR_VL_URL = 'http://localhost:8000';
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
// Use Qwen2.5 for text-only JSON extraction (not MiniCPM which is vision-focused)
|
||||
const TEXT_MODEL = 'qwen2.5:7b';
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
execSync(
|
||||
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse document using PaddleOCR-VL Full Pipeline (returns structured HTML)
|
||||
*/
|
||||
async function parseDocument(imageBase64: string): Promise<string> {
|
||||
const response = await fetch(`${PADDLEOCR_VL_URL}/parse`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
image: imageBase64,
|
||||
output_format: 'html',
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`PaddleOCR-VL API error: ${response.status} - ${text}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (!data.success) {
|
||||
throw new Error(`PaddleOCR-VL error: ${data.error}`);
|
||||
}
|
||||
|
||||
return data.result?.html || '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice fields from structured HTML using Qwen2.5 (text-only model)
|
||||
*/
|
||||
async function extractInvoiceFromHtml(html: string): Promise<IInvoice> {
|
||||
// Truncate if too long (HTML is more valuable per byte, allow more)
|
||||
const truncated = html.length > 16000 ? html.slice(0, 16000) : html;
|
||||
console.log(` [Extract] Processing ${truncated.length} chars of HTML`);
|
||||
|
||||
const prompt = `You are an invoice data extractor. Extract the following fields from this HTML document (OCR output with semantic structure) and return ONLY a valid JSON object.
|
||||
|
||||
The HTML uses semantic tags:
|
||||
- <table> with <thead>/<tbody> for structured tables (invoice line items, totals)
|
||||
- <header> for document header (company info, invoice number)
|
||||
- <footer> for document footer (payment terms, legal text)
|
||||
- <section class="table-region"> for table regions
|
||||
- data-type and data-y attributes indicate block type and vertical position
|
||||
|
||||
Required fields:
|
||||
- invoice_number: The invoice/receipt/document number
|
||||
- invoice_date: Date in YYYY-MM-DD format (convert from any format)
|
||||
- vendor_name: Company that issued the invoice
|
||||
- currency: EUR, USD, GBP, etc.
|
||||
- net_amount: Amount before tax (number)
|
||||
- vat_amount: Tax/VAT amount (number, use 0 if reverse charge or not shown)
|
||||
- total_amount: Final total amount (number)
|
||||
|
||||
Example output format:
|
||||
{"invoice_number":"INV-123","invoice_date":"2022-01-28","vendor_name":"Adobe","currency":"EUR","net_amount":24.99,"vat_amount":0,"total_amount":24.99}
|
||||
|
||||
Rules:
|
||||
- Return ONLY the JSON object, no explanation or markdown
|
||||
- Use null for missing string fields
|
||||
- Use 0 for missing numeric fields
|
||||
- Convert dates to YYYY-MM-DD format (e.g., "28-JAN-2022" becomes "2022-01-28")
|
||||
- Extract numbers without currency symbols
|
||||
- Look for totals in <table> sections, especially rows with "Total", "Amount Due", "Grand Total"
|
||||
|
||||
HTML Document:
|
||||
${truncated}
|
||||
|
||||
JSON:`;
|
||||
|
||||
const payload = {
|
||||
model: TEXT_MODEL,
|
||||
prompt,
|
||||
stream: true,
|
||||
options: {
|
||||
num_predict: 512,
|
||||
temperature: 0.1,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let fullText = '';
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n').filter((l) => l.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const json = JSON.parse(line);
|
||||
if (json.response) {
|
||||
fullText += json.response;
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid JSON lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract JSON from response
|
||||
const startIdx = fullText.indexOf('{');
|
||||
const endIdx = fullText.lastIndexOf('}') + 1;
|
||||
|
||||
if (startIdx < 0 || endIdx <= startIdx) {
|
||||
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
|
||||
}
|
||||
|
||||
const jsonStr = fullText.substring(startIdx, endIdx);
|
||||
const parsed = JSON.parse(jsonStr);
|
||||
|
||||
// Ensure numeric fields are actually numbers
|
||||
return {
|
||||
invoice_number: parsed.invoice_number || null,
|
||||
invoice_date: parsed.invoice_date || null,
|
||||
vendor_name: parsed.vendor_name || null,
|
||||
currency: parsed.currency || 'EUR',
|
||||
net_amount: parseFloat(parsed.net_amount) || 0,
|
||||
vat_amount: parseFloat(parsed.vat_amount) || 0,
|
||||
total_amount: parseFloat(parsed.total_amount) || 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Single extraction pass: Parse with PaddleOCR-VL Full, extract with Qwen2.5 (text-only)
|
||||
*/
|
||||
async function extractOnce(images: string[], passNum: number): Promise<IInvoice> {
|
||||
// Parse document with full pipeline (PaddleOCR-VL) -> returns HTML
|
||||
const html = await parseDocument(images[0]);
|
||||
console.log(` [Parse] Got ${html.split('\n').length} lines of HTML`);
|
||||
|
||||
// Extract invoice fields from HTML using text-only model (no images)
|
||||
return extractInvoiceFromHtml(html);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a hash of invoice for comparison (using key fields)
|
||||
*/
|
||||
function hashInvoice(invoice: IInvoice): string {
|
||||
// Ensure total_amount is a number
|
||||
const amount = typeof invoice.total_amount === 'number'
|
||||
? invoice.total_amount.toFixed(2)
|
||||
: String(invoice.total_amount || 0);
|
||||
return `${invoice.invoice_number}|${invoice.invoice_date}|${amount}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract with consensus voting
|
||||
*/
|
||||
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
|
||||
const results: Array<{ invoice: IInvoice; hash: string }> = [];
|
||||
const hashCounts: Map<string, number> = new Map();
|
||||
|
||||
const addResult = (invoice: IInvoice, passLabel: string): number => {
|
||||
const hash = hashInvoice(invoice);
|
||||
results.push({ invoice, hash });
|
||||
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
|
||||
console.log(` [${passLabel}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||
return hashCounts.get(hash)!;
|
||||
};
|
||||
|
||||
for (let pass = 1; pass <= maxPasses; pass++) {
|
||||
try {
|
||||
const invoice = await extractOnce(images, pass);
|
||||
const count = addResult(invoice, `Pass ${pass}`);
|
||||
|
||||
if (count >= 2) {
|
||||
console.log(` [Consensus] Reached after ${pass} passes`);
|
||||
return invoice;
|
||||
}
|
||||
} catch (err) {
|
||||
console.log(` [Pass ${pass}] Error: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// No consensus reached - return the most common result
|
||||
let bestHash = '';
|
||||
let bestCount = 0;
|
||||
for (const [hash, count] of hashCounts) {
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
bestHash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bestHash) {
|
||||
throw new Error(`No valid results for ${invoiceName}`);
|
||||
}
|
||||
|
||||
const best = results.find((r) => r.hash === bestHash)!;
|
||||
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
|
||||
return best.invoice;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD format
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
|
||||
// Already in correct format
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) {
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
// Handle DD-MMM-YYYY format (e.g., "28-JUN-2022")
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
const match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
const day = match[1].padStart(2, '0');
|
||||
const month = monthMap[match[2].toUpperCase()] || '01';
|
||||
const year = match[3];
|
||||
return `${year}-${month}-${day}`;
|
||||
}
|
||||
|
||||
// Handle DD/MM/YYYY or DD.MM.YYYY
|
||||
const match2 = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match2) {
|
||||
const day = match2[1].padStart(2, '0');
|
||||
const month = match2[2].padStart(2, '0');
|
||||
const year = match2[3];
|
||||
return `${year}-${month}-${day}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted invoice against expected
|
||||
*/
|
||||
function compareInvoice(
|
||||
extracted: IInvoice,
|
||||
expected: IInvoice
|
||||
): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Compare invoice number (normalize by removing spaces and case)
|
||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
// Compare date (normalize format first)
|
||||
const extDate = normalizeDate(extracted.invoice_date);
|
||||
const expDate = normalizeDate(expected.invoice_date);
|
||||
if (extDate !== expDate) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
// Compare total amount (with tolerance)
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
// Compare currency
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of pdfFiles) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Sort alphabetically
|
||||
testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
return testCases;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Docker containers are running', async () => {
|
||||
console.log('\n[Setup] Checking Docker containers...\n');
|
||||
|
||||
// Ensure PaddleOCR-VL Full Pipeline is running
|
||||
const paddleOk = await ensurePaddleOcrVlFull();
|
||||
expect(paddleOk).toBeTrue();
|
||||
|
||||
// Ensure Qwen2.5 is available (for text-only JSON extraction)
|
||||
const qwenOk = await ensureQwen25();
|
||||
expect(qwenOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] All containers ready!\n');
|
||||
});
|
||||
|
||||
// Dynamic test for each PDF/JSON pair
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} invoice test cases (PaddleOCR-VL Full Pipeline)\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
const processingTimes: number[] = [];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
||||
// Load expected data
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Convert PDF to images
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
// Extract with consensus voting (PaddleOCR-VL Full -> MiniCPM)
|
||||
const extracted = await extractWithConsensus(images, testCase.name);
|
||||
|
||||
const endTime = Date.now();
|
||||
const elapsedMs = endTime - startTime;
|
||||
processingTimes.push(elapsedMs);
|
||||
|
||||
// Compare results
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
// Assert match
|
||||
expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const totalInvoices = testCases.length;
|
||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
|
||||
const avgTimeSec = avgTimeMs / 1000;
|
||||
const totalTimeSec = totalTimeMs / 1000;
|
||||
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Invoice Extraction Summary (PaddleOCR-VL Full)`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Method: PaddleOCR-VL Full Pipeline (HTML) -> Qwen2.5 (text-only)`);
|
||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`------------------------------------------------------`);
|
||||
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
|
||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||
console.log(`======================================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
351
test/test.invoices.qwen3vl.ts
Normal file
351
test/test.invoices.qwen3vl.ts
Normal file
@@ -0,0 +1,351 @@
|
||||
/**
|
||||
* Invoice extraction using Qwen3-VL 8B Vision (Direct)
|
||||
*
|
||||
* Multi-query approach: 5 parallel simple queries to avoid token exhaustion.
|
||||
* Single pass, no consensus voting.
|
||||
*/
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { execSync } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import { ensureMiniCpm } from './helpers/docker.js';
|
||||
|
||||
const OLLAMA_URL = 'http://localhost:11434';
|
||||
const VISION_MODEL = 'qwen3-vl:8b';
|
||||
|
||||
interface IInvoice {
|
||||
invoice_number: string;
|
||||
invoice_date: string;
|
||||
vendor_name: string;
|
||||
currency: string;
|
||||
net_amount: number;
|
||||
vat_amount: number;
|
||||
total_amount: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert PDF to PNG images using ImageMagick
|
||||
*/
|
||||
function convertPdfToImages(pdfPath: string): string[] {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
||||
const outputPattern = path.join(tempDir, 'page-%d.png');
|
||||
|
||||
try {
|
||||
// 150 DPI is sufficient for invoice extraction, reduces context size
|
||||
execSync(
|
||||
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
||||
{ stdio: 'pipe' }
|
||||
);
|
||||
|
||||
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
|
||||
const images: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const imagePath = path.join(tempDir, file);
|
||||
const imageData = fs.readFileSync(imagePath);
|
||||
images.push(imageData.toString('base64'));
|
||||
}
|
||||
|
||||
return images;
|
||||
} finally {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query Qwen3-VL for a single field
|
||||
* Uses simple prompts to minimize thinking tokens
|
||||
*/
|
||||
async function queryField(images: string[], question: string): Promise<string> {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: VISION_MODEL,
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: `${question} Reply with just the value, nothing else.`,
|
||||
images: images,
|
||||
}],
|
||||
stream: false,
|
||||
options: {
|
||||
num_predict: 500,
|
||||
temperature: 0.1,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return (data.message?.content || '').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract invoice data using multiple simple queries
|
||||
* Each query asks for 1-2 fields to minimize thinking tokens
|
||||
* (Qwen3's thinking mode uses all tokens on complex prompts)
|
||||
*/
|
||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
|
||||
|
||||
// Query each field separately to avoid excessive thinking tokens
|
||||
// Use explicit questions to avoid confusion between similar fields
|
||||
// Log each result as it comes in (not waiting for all to complete)
|
||||
const queryAndLog = async (name: string, question: string): Promise<string> => {
|
||||
const result = await queryField(images, question);
|
||||
console.log(` [Query] ${name}: "${result}"`);
|
||||
return result;
|
||||
};
|
||||
|
||||
const [invoiceNum, invoiceDate, vendor, currency, totalAmount, netAmount, vatAmount] = await Promise.all([
|
||||
queryAndLog('Invoice Number', 'What is the INVOICE NUMBER (not VAT number, not customer ID)? Look for "Invoice No", "Invoice #", "Rechnung Nr", "Facture". Just the number/code.'),
|
||||
queryAndLog('Invoice Date ', 'What is the INVOICE DATE (not due date, not delivery date)? The date the invoice was issued. Format: YYYY-MM-DD'),
|
||||
queryAndLog('Vendor ', 'What company ISSUED this invoice (the seller/vendor, not the buyer)? Look at the letterhead or "From" section.'),
|
||||
queryAndLog('Currency ', 'What CURRENCY is used? Look for € (EUR), $ (USD), or £ (GBP). Answer with 3-letter code: EUR, USD, or GBP'),
|
||||
queryAndLog('Total Amount ', 'What is the TOTAL AMOUNT INCLUDING TAX (the final amount to pay, with VAT/tax included)? Just the number, e.g. 24.99'),
|
||||
queryAndLog('Net Amount ', 'What is the NET AMOUNT (subtotal before VAT/tax)? Just the number, e.g. 20.99'),
|
||||
queryAndLog('VAT Amount ', 'What is the VAT/TAX AMOUNT? Just the number, e.g. 4.00'),
|
||||
]);
|
||||
|
||||
// Parse amount from string (handles European format)
|
||||
const parseAmount = (s: string): number => {
|
||||
if (!s) return 0;
|
||||
// Extract number from the response
|
||||
const match = s.match(/([\d.,]+)/);
|
||||
if (!match) return 0;
|
||||
const numStr = match[1];
|
||||
// Handle European format: 1.234,56 → 1234.56
|
||||
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
||||
? numStr.replace(/\./g, '').replace(',', '.')
|
||||
: numStr.replace(/,/g, '');
|
||||
return parseFloat(normalized) || 0;
|
||||
};
|
||||
|
||||
// Extract invoice number from potentially verbose response
|
||||
const extractInvoiceNumber = (s: string): string => {
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
// Look for common invoice number patterns
|
||||
const patterns = [
|
||||
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
|
||||
/\b([A-Z]\d{8,})\b/i, // R0014359508
|
||||
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
|
||||
/\b(\d{7,})\b/, // 1579087430
|
||||
];
|
||||
for (const pattern of patterns) {
|
||||
const match = clean.match(pattern);
|
||||
if (match) return match[1];
|
||||
}
|
||||
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
|
||||
};
|
||||
|
||||
// Extract date (YYYY-MM-DD) from response
|
||||
const extractDate = (s: string): string => {
|
||||
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
||||
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
if (isoMatch) return isoMatch[1];
|
||||
return clean.replace(/[^\d-]/g, '').trim();
|
||||
};
|
||||
|
||||
// Extract currency
|
||||
const extractCurrency = (s: string): string => {
|
||||
const upper = s.toUpperCase();
|
||||
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
||||
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
||||
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
||||
return 'EUR';
|
||||
};
|
||||
|
||||
return {
|
||||
invoice_number: extractInvoiceNumber(invoiceNum),
|
||||
invoice_date: extractDate(invoiceDate),
|
||||
vendor_name: vendor.replace(/\*\*/g, '').replace(/`/g, '').trim() || '',
|
||||
currency: extractCurrency(currency),
|
||||
net_amount: parseAmount(netAmount),
|
||||
vat_amount: parseAmount(vatAmount),
|
||||
total_amount: parseAmount(totalAmount),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize date to YYYY-MM-DD
|
||||
*/
|
||||
function normalizeDate(dateStr: string | null): string {
|
||||
if (!dateStr) return '';
|
||||
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
||||
|
||||
const monthMap: Record<string, string> = {
|
||||
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
||||
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
||||
};
|
||||
|
||||
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
||||
if (match) {
|
||||
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
||||
if (match) {
|
||||
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
return dateStr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare extracted vs expected
|
||||
*/
|
||||
function compareInvoice(extracted: IInvoice, expected: IInvoice): { match: boolean; errors: string[] } {
|
||||
const errors: string[] = [];
|
||||
|
||||
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
|
||||
if (extNum !== expNum) {
|
||||
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
||||
}
|
||||
|
||||
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
||||
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
||||
}
|
||||
|
||||
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
||||
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
||||
}
|
||||
|
||||
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
||||
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
||||
}
|
||||
|
||||
return { match: errors.length === 0, errors };
|
||||
}
|
||||
|
||||
/**
|
||||
* Find test cases
|
||||
*/
|
||||
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
|
||||
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
||||
if (!fs.existsSync(testDir)) return [];
|
||||
|
||||
const files = fs.readdirSync(testDir);
|
||||
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
|
||||
|
||||
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
|
||||
const baseName = pdf.replace('.pdf', '');
|
||||
const jsonFile = `${baseName}.json`;
|
||||
if (files.includes(jsonFile)) {
|
||||
testCases.push({
|
||||
name: baseName,
|
||||
pdfPath: path.join(testDir, pdf),
|
||||
jsonPath: path.join(testDir, jsonFile),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return testCases.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure Qwen3-VL 8B model is available
|
||||
*/
|
||||
async function ensureQwen3Vl(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const models = data.models || [];
|
||||
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
|
||||
console.log(`[Ollama] Model already available: ${VISION_MODEL}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
console.log('[Ollama] Cannot check models');
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(`[Ollama] Pulling model: ${VISION_MODEL}...`);
|
||||
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
|
||||
});
|
||||
|
||||
return pullResponse.ok;
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
tap.test('setup: ensure Qwen3-VL is running', async () => {
|
||||
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
|
||||
|
||||
// Ensure Ollama service is running
|
||||
const ollamaOk = await ensureMiniCpm();
|
||||
expect(ollamaOk).toBeTrue();
|
||||
|
||||
// Ensure Qwen3-VL 8B model
|
||||
const visionOk = await ensureQwen3Vl();
|
||||
expect(visionOk).toBeTrue();
|
||||
|
||||
console.log('\n[Setup] Ready!\n');
|
||||
});
|
||||
|
||||
const testCases = findTestCases();
|
||||
console.log(`\nFound ${testCases.length} invoice test cases (Qwen3-VL Vision)\n`);
|
||||
|
||||
let passedCount = 0;
|
||||
let failedCount = 0;
|
||||
const times: number[] = [];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
tap.test(`should extract invoice: ${testCase.name}`, async () => {
|
||||
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
|
||||
console.log(`\n=== ${testCase.name} ===`);
|
||||
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
|
||||
|
||||
const start = Date.now();
|
||||
const images = convertPdfToImages(testCase.pdfPath);
|
||||
console.log(` Pages: ${images.length}`);
|
||||
|
||||
const extracted = await extractInvoiceFromImages(images);
|
||||
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
|
||||
const elapsed = Date.now() - start;
|
||||
times.push(elapsed);
|
||||
|
||||
const result = compareInvoice(extracted, expected);
|
||||
|
||||
if (result.match) {
|
||||
passedCount++;
|
||||
console.log(` Result: MATCH (${(elapsed / 1000).toFixed(1)}s)`);
|
||||
} else {
|
||||
failedCount++;
|
||||
console.log(` Result: MISMATCH (${(elapsed / 1000).toFixed(1)}s)`);
|
||||
result.errors.forEach((e) => console.log(` - ${e}`));
|
||||
}
|
||||
|
||||
expect(result.match).toBeTrue();
|
||||
});
|
||||
}
|
||||
|
||||
tap.test('summary', async () => {
|
||||
const total = testCases.length;
|
||||
const accuracy = total > 0 ? (passedCount / total) * 100 : 0;
|
||||
const totalTime = times.reduce((a, b) => a + b, 0) / 1000;
|
||||
const avgTime = times.length > 0 ? totalTime / times.length : 0;
|
||||
|
||||
console.log(`\n======================================================`);
|
||||
console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`);
|
||||
console.log(`======================================================`);
|
||||
console.log(` Method: Multi-query (single pass)`);
|
||||
console.log(` Passed: ${passedCount}/${total}`);
|
||||
console.log(` Failed: ${failedCount}/${total}`);
|
||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||
console.log(`------------------------------------------------------`);
|
||||
console.log(` Total time: ${totalTime.toFixed(1)}s`);
|
||||
console.log(` Avg per inv: ${avgTime.toFixed(1)}s`);
|
||||
console.log(`======================================================\n`);
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
Reference in New Issue
Block a user