28 Commits

Author SHA1 Message Date
bd5bb5d874 v1.13.0
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 13:56:46 +00:00
d91df70fff feat(tests): revamp tests and remove legacy Dockerfiles: adopt JSON/consensus workflows, switch MiniCPM model, and delete deprecated Docker/test variants 2026-01-18 13:56:46 +00:00
d6c97a9625 v1.12.0
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 57s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 11:26:38 +00:00
76b21f1f7b feat(tests): switch vision tests to multi-query extraction (count then per-row/field queries) and add logging/summaries 2026-01-18 11:26:38 +00:00
4c368dfef9 v1.11.0
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 04:50:57 +00:00
e76768da55 feat(vision): process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction 2026-01-18 04:50:57 +00:00
63d72a52c9 update 2026-01-18 04:28:57 +00:00
386122c8c7 v1.10.1
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 04:17:30 +00:00
7c8f10497e fix(tests): improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options 2026-01-18 04:17:30 +00:00
9f9ec0a671 v1.10.0
Some checks failed
Docker (tags) / security (push) Successful in 32s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 03:35:06 +00:00
3780105c6f feat(vision): add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting 2026-01-18 03:35:05 +00:00
d237ad19f4 v1.9.0
Some checks failed
Docker (tags) / security (push) Successful in 33s
Docker (tags) / test (push) Failing after 39s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 02:53:24 +00:00
7652a2df52 feat(tests): add Ministral 3 vision tests and improve invoice extraction pipeline to use Ollama chat schema, sanitization, and multi-page support 2026-01-18 02:53:24 +00:00
b316d98f24 v1.8.0
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 41s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 00:11:17 +00:00
f0d88fcbe0 feat(paddleocr-vl): add structured HTML output and table parsing for PaddleOCR-VL, update API, tests, and README 2026-01-18 00:11:17 +00:00
0d8a1ebac2 v1.7.1
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 39s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-17 23:13:47 +00:00
5a311dca2d fix(docker): standardize Dockerfile and entrypoint filenames; add GPU-specific Dockerfiles and update build and test references 2026-01-17 23:13:47 +00:00
ab288380f1 v1.7.0
Some checks failed
Docker (tags) / security (push) Successful in 30s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-17 21:50:09 +00:00
30c73b24c1 feat(tests): use Qwen2.5 (Ollama) for invoice extraction tests and add helpers for model management; normalize dates and coerce numeric fields 2026-01-17 21:50:09 +00:00
311e7a8fd4 v1.6.0
Some checks failed
Docker (tags) / security (push) Successful in 32s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-17 20:22:23 +00:00
80e6866442 feat(paddleocr-vl): add PaddleOCR-VL full pipeline Docker image and API server, plus integration tests and docker helpers 2026-01-17 20:22:23 +00:00
addae20cbd v1.5.0
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-17 16:57:26 +00:00
0482c35b69 feat(paddleocr-vl): add PaddleOCR-VL GPU Dockerfile, pin vllm, update CPU image deps, and improve entrypoint and tests 2026-01-17 16:57:26 +00:00
15ac1fcf67 update 2026-01-16 16:21:44 +00:00
3c5cf578a5 v1.4.0
Some checks failed
Docker (tags) / security (push) Successful in 28s
Docker (tags) / test (push) Failing after 54s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-16 14:24:37 +00:00
82358b2d5d feat(invoices): add hybrid OCR + vision invoice/document parsing with PaddleOCR, consensus voting, and prompt/test refactors 2026-01-16 14:24:37 +00:00
acded2a165 v1.3.0
Some checks failed
Docker (tags) / security (push) Successful in 30s
Docker (tags) / test (push) Failing after 41s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-16 13:23:01 +00:00
bec379e9ca feat(paddleocr): add PaddleOCR OCR service (Docker images, server, tests, docs) and CI workflows 2026-01-16 13:23:01 +00:00
27 changed files with 4126 additions and 1262 deletions

View File

@@ -0,0 +1,67 @@
name: Docker (no tags)
on:
push:
tags-ignore:
- '**'
env:
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
NPMCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.NPMCI_LOGIN_DOCKER_DOCKERREGISTRY }}
jobs:
security:
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
continue-on-error: true
steps:
- uses: actions/checkout@v3
- name: Prepare
run: |
pnpm install -g pnpm
pnpm install -g @ship.zone/npmci
npmci npm prepare
- name: Audit production dependencies
run: |
npmci command npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --prod
continue-on-error: true
- name: Audit development dependencies
run: |
npmci command npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --dev
continue-on-error: true
test:
needs: security
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
steps:
- uses: actions/checkout@v3
- name: Prepare
run: |
pnpm install -g pnpm
pnpm install -g @ship.zone/npmci
npmci npm prepare
- name: Test stable
run: |
npmci node install stable
npmci npm install
npmci npm test
continue-on-error: true
- name: Test build
run: |
npmci node install stable
npmci npm install
npmci command npm run build

View File

@@ -0,0 +1,101 @@
name: Docker (tags)
on:
push:
tags:
- '*'
env:
IMAGE: code.foss.global/host.today/ht-docker-node:npmci
NPMCI_COMPUTED_REPOURL: https://${{gitea.repository_owner}}:${{secrets.GITEA_TOKEN}}@gitea.lossless.digital/${{gitea.repository}}.git
NPMCI_LOGIN_DOCKER_DOCKERREGISTRY: ${{ secrets.NPMCI_LOGIN_DOCKER_DOCKERREGISTRY }}
jobs:
security:
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
continue-on-error: true
steps:
- uses: actions/checkout@v3
- name: Prepare
run: |
pnpm install -g pnpm
pnpm install -g @ship.zone/npmci
npmci npm prepare
- name: Audit production dependencies
run: |
npmci command npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --prod
continue-on-error: true
- name: Audit development dependencies
run: |
npmci command npm config set registry https://registry.npmjs.org
npmci command pnpm audit --audit-level=high --dev
continue-on-error: true
test:
needs: security
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
steps:
- uses: actions/checkout@v3
- name: Prepare
run: |
pnpm install -g pnpm
pnpm install -g @ship.zone/npmci
npmci npm prepare
- name: Test stable
run: |
npmci node install stable
npmci npm install
npmci npm test
continue-on-error: true
- name: Test build
run: |
npmci node install stable
npmci npm install
npmci command npm run build
release:
needs: test
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-dbase:npmci
steps:
- uses: actions/checkout@v3
- name: Prepare
run: |
pnpm install -g pnpm
pnpm install -g @ship.zone/npmci
- name: Release
run: |
npmci docker login
npmci docker build
npmci docker push code.foss.global
metadata:
needs: test
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
container:
image: ${{ env.IMAGE }}
steps:
- uses: actions/checkout@v3
- name: Trigger
run: npmci trigger

View File

@@ -1,27 +0,0 @@
# MiniCPM-V 4.5 CPU Variant
# Vision-Language Model optimized for CPU-only inference
FROM ollama/ollama:latest
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
LABEL description="MiniCPM-V 4.5 Vision-Language Model - CPU optimized (GGUF)"
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
# Environment configuration for CPU-only mode
ENV MODEL_NAME="minicpm-v"
ENV OLLAMA_HOST="0.0.0.0"
ENV OLLAMA_ORIGINS="*"
# Disable GPU usage for CPU-only variant
ENV CUDA_VISIBLE_DEVICES=""
# Copy and setup entrypoint
COPY image_support_files/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
# Expose Ollama API port
EXPOSE 11434
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
CMD curl -f http://localhost:11434/api/tags || exit 1
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]

View File

@@ -12,7 +12,7 @@ ENV OLLAMA_HOST="0.0.0.0"
ENV OLLAMA_ORIGINS="*" ENV OLLAMA_ORIGINS="*"
# Copy and setup entrypoint # Copy and setup entrypoint
COPY image_support_files/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh COPY image_support_files/minicpm45v_entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh RUN chmod +x /usr/local/bin/docker-entrypoint.sh
# Expose Ollama API port # Expose Ollama API port

View File

@@ -1,51 +0,0 @@
# PaddleOCR GPU Variant
# OCR processing with NVIDIA GPU support using PaddlePaddle
FROM paddlepaddle/paddle:3.0.0-gpu-cuda11.8-cudnn8.9-trt8.6
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
LABEL description="PaddleOCR PP-OCRv4 - GPU optimized"
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
# Environment configuration
ENV OCR_LANGUAGE="en"
ENV SERVER_PORT="5000"
ENV SERVER_HOST="0.0.0.0"
ENV PYTHONUNBUFFERED=1
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx \
libglib2.0-0 \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
RUN pip install --no-cache-dir \
paddleocr \
fastapi \
uvicorn[standard] \
python-multipart \
opencv-python-headless \
pillow
# Copy server files
COPY image_support_files/paddleocr_server.py /app/paddleocr_server.py
COPY image_support_files/paddleocr-entrypoint.sh /usr/local/bin/paddleocr-entrypoint.sh
RUN chmod +x /usr/local/bin/paddleocr-entrypoint.sh
# Pre-download OCR models during build (PP-OCRv4)
RUN python -c "from paddleocr import PaddleOCR; \
ocr = PaddleOCR(use_angle_cls=True, lang='en', use_gpu=False, show_log=True); \
print('English model downloaded')"
# Expose API port
EXPOSE 5000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:5000/health || exit 1
ENTRYPOINT ["/usr/local/bin/paddleocr-entrypoint.sh"]

View File

@@ -1,54 +0,0 @@
# PaddleOCR CPU Variant
# OCR processing optimized for CPU-only inference
FROM python:3.10-slim
LABEL maintainer="Task Venture Capital GmbH <hello@task.vc>"
LABEL description="PaddleOCR PP-OCRv4 - CPU optimized"
LABEL org.opencontainers.image.source="https://code.foss.global/host.today/ht-docker-ai"
# Environment configuration for CPU-only mode
ENV OCR_LANGUAGE="en"
ENV SERVER_PORT="5000"
ENV SERVER_HOST="0.0.0.0"
ENV PYTHONUNBUFFERED=1
# Disable GPU usage for CPU-only variant
ENV CUDA_VISIBLE_DEVICES="-1"
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx \
libglib2.0-0 \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies (CPU version of PaddlePaddle)
RUN pip install --no-cache-dir \
paddlepaddle \
paddleocr \
fastapi \
uvicorn[standard] \
python-multipart \
opencv-python-headless \
pillow
# Copy server files
COPY image_support_files/paddleocr_server.py /app/paddleocr_server.py
COPY image_support_files/paddleocr-entrypoint.sh /usr/local/bin/paddleocr-entrypoint.sh
RUN chmod +x /usr/local/bin/paddleocr-entrypoint.sh
# Pre-download OCR models during build (PP-OCRv4)
RUN python -c "from paddleocr import PaddleOCR; \
ocr = PaddleOCR(use_angle_cls=True, lang='en', use_gpu=False, show_log=True); \
print('English model downloaded')"
# Expose API port
EXPOSE 5000
# Health check (longer start-period for CPU variant)
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
CMD curl -f http://localhost:5000/health || exit 1
ENTRYPOINT ["/usr/local/bin/paddleocr-entrypoint.sh"]

26
Dockerfile_qwen3vl Normal file
View File

@@ -0,0 +1,26 @@
# Qwen3-VL-30B-A3B Vision Language Model
# Q4_K_M quantization (~20GB model)
#
# Most powerful Qwen vision model:
# - 256K context (expandable to 1M)
# - Visual agent capabilities
# - Code generation from images
#
# Build: docker build -f Dockerfile_qwen3vl -t qwen3vl .
# Run: docker run --gpus all -p 11434:11434 -v ht-ollama-models:/root/.ollama qwen3vl
FROM ollama/ollama:latest
# Pre-pull the model during build (optional - can also pull at runtime)
# This makes the image larger but faster to start
# RUN ollama serve & sleep 5 && ollama pull qwen3-vl:30b-a3b && pkill ollama
# Expose Ollama API port
EXPOSE 11434
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:11434/api/tags || exit 1
# Start Ollama server
CMD ["serve"]

View File

@@ -16,7 +16,7 @@ echo -e "${BLUE}Building ht-docker-ai images...${NC}"
# Build GPU variant # Build GPU variant
echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}" echo -e "${GREEN}Building MiniCPM-V 4.5 GPU variant...${NC}"
docker build \ docker build \
-f Dockerfile_minicpm45v \ -f Dockerfile_minicpm45v_gpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-gpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest \
@@ -29,19 +29,19 @@ docker build \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu \
. .
# Build PaddleOCR GPU variant # Build PaddleOCR-VL GPU variant
echo -e "${GREEN}Building PaddleOCR GPU variant...${NC}" echo -e "${GREEN}Building PaddleOCR-VL GPU variant...${NC}"
docker build \ docker build \
-f Dockerfile_paddleocr \ -f Dockerfile_paddleocr_vl_gpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-gpu \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu \
. .
# Build PaddleOCR CPU variant # Build PaddleOCR-VL CPU variant
echo -e "${GREEN}Building PaddleOCR CPU variant...${NC}" echo -e "${GREEN}Building PaddleOCR-VL CPU variant...${NC}"
docker build \ docker build \
-f Dockerfile_paddleocr_cpu \ -f Dockerfile_paddleocr_vl_cpu \
-t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-cpu \ -t ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu \
. .
echo -e "${GREEN}All images built successfully!${NC}" echo -e "${GREEN}All images built successfully!${NC}"
@@ -52,7 +52,7 @@ echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v (GPU)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)" echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:minicpm45v-cpu (CPU)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)" echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:latest (GPU)"
echo "" echo ""
echo " PaddleOCR:" echo " PaddleOCR-VL (Vision-Language Model):"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr (GPU)" echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl (GPU/vLLM)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-gpu (GPU)" echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-gpu (GPU/vLLM)"
echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-cpu (CPU)" echo " - ${REGISTRY}/${NAMESPACE}/${IMAGE_NAME}:paddleocr-vl-cpu (CPU)"

View File

@@ -1,5 +1,128 @@
# Changelog # Changelog
## 2026-01-18 - 1.13.0 - feat(tests)
revamp tests and remove legacy Dockerfiles: adopt JSON/consensus workflows, switch MiniCPM model, and delete deprecated Docker/test variants
- Removed multiple Dockerfiles and related entrypoints for MiniCPM and PaddleOCR-VL (cpu/gpu/full), cleaning up legacy image recipes.
- Pruned many older test files (combined, ministral3, paddleocr-vl, and several invoice/test variants) to consolidate the test suite.
- Updated bank statement MiniCPM test: now uses MODEL='openbmb/minicpm-v4.5:q8_0', JSON per-page extraction prompt, consensus retry logic, expanded logging, and stricter result matching.
- Updated invoice MiniCPM test: switched to a consensus flow (fast JSON pass + thinking pass), increased PDF conversion quality, endpoints migrated to chat-style API calls with image-in-message payloads, and improved finalization logic.
- API usage changed from /api/generate to /api/chat with message-based payloads and embedded images — CI and local test runners will need model availability and possible pipeline adjustments.
## 2026-01-18 - 1.12.0 - feat(tests)
switch vision tests to multi-query extraction (count then per-row/field queries) and add logging/summaries
- Replace streaming + consensus pipeline with multi-query approach: count rows per page, then query each transaction/field individually (batched parallel queries).
- Introduce unified helpers (queryVision / queryField / getTransaction / countTransactions) and simplify Ollama requests (stream:false, reduced num_predict, /no_think prompts).
- Improve parsing and normalization for amounts (European formats), invoice numbers, dates and currency extraction.
- Adjust model checks to look for generic 'minicpm' and update test names/messages; add pass/fail counters and a summary test output.
- Remove previous consensus voting and streaming JSON accumulation logic, and add immediate per-transaction logging and batching.
## 2026-01-18 - 1.11.0 - feat(vision)
process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction
- Bank statements: split extraction into extractTransactionsFromPage and sequentially process pages to avoid thinking-token exhaustion
- Bank statements: reduced num_predict from 8000 to 4000, send single image per request, added per-page logging and non-throwing handling for empty or non-JSON responses
- Bank statements: catch JSON.parse errors and return empty array instead of throwing
- Invoices: introduced queryField to request single values and perform multiple simple queries (reduces model thinking usage)
- Invoices: reduced num_predict for invoice queries from 4000 to 500 and parse amounts robustly (handles European formats like 1.234,56)
- Invoices: normalize currency to uppercase 3-letter code, return safe defaults (empty strings / 0) instead of nulls, and parse net/vat/total with fallbacks
- General: simplified Ollama API error messages to avoid including response body content in thrown errors
## 2026-01-18 - 1.10.1 - fix(tests)
improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options
- Replaced streaming reader logic with direct JSON parsing of the /api/chat response
- Added ensureQwen3Vl() to check and pull the Qwen3-VL:8b model from Ollama
- Switched to ensureMiniCpm() to verify Ollama service is running before model checks
- Use /no_think prompt for direct JSON output and set temperature to 0.0 and num_predict to 512
- Removed retry loop and streaming parsing; improved error messages to include response body
- Updated logging and test setup messages for clarity
## 2026-01-18 - 1.10.0 - feat(vision)
add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting
- Add Dockerfile_qwen3vl to provide an Ollama-based image for Qwen3-VL and expose the Ollama API on port 11434
- Introduce test/test.invoices.qwen3vl.ts and ensureQwen3Vl() helper to pull and test qwen3-vl:8b
- Improve PDF->PNG conversion and prompt in ministral3 tests (higher DPI, max quality, sharpen) and increase num_predict from 512 to 1024
- Simplify extraction pipeline: remove consensus voting, log single-pass results, and simplify OCR HTML sanitization/truncation logic
## 2026-01-18 - 1.9.0 - feat(tests)
add Ministral 3 vision tests and improve invoice extraction pipeline to use Ollama chat schema, sanitization, and multi-page support
- Add new vision-based test suites for Ministral 3: test/test.invoices.ministral3.ts and test/test.bankstatements.ministral3.ts (model ministral-3:8b).
- Introduce ensureMinistral3() helper to start/check Ollama/MiniCPM model in test/helpers/docker.ts.
- Switch invoice extraction to use Ollama /api/chat with a JSON schema (format) and streaming support (reads message.content).
- Improve HTML handling: sanitizeHtml() to remove OCR artifacts, concatenate multi-page HTML with page markers, and increase truncation limits.
- Enhance response parsing: strip Markdown code fences, robustly locate JSON object boundaries, and provide clearer JSON parse errors.
- Add PDF->PNG conversion (ImageMagick) and direct image-based extraction flow for vision model tests.
## 2026-01-18 - 1.8.0 - feat(paddleocr-vl)
add structured HTML output and table parsing for PaddleOCR-VL, update API, tests, and README
- Add result_to_html(), parse_markdown_table(), and parse_paddleocr_table() to emit semantic HTML and convert OCR/markdown tables to proper <table> elements
- Enhance result_to_markdown() with positional/type hints (header/footer/title/table/figure) to improve downstream LLM processing
- Expose 'html' in supported formats and handle output_format='html' in parse endpoints and CLI flow
- Update tests to request HTML output and extract invoice fields from structured HTML (test/test.invoices.paddleocr-vl.ts)
- Refresh README with usage, new images/tags, architecture notes, and troubleshooting for the updated pipeline
## 2026-01-17 - 1.7.1 - fix(docker)
standardize Dockerfile and entrypoint filenames; add GPU-specific Dockerfiles and update build and test references
- Added Dockerfile_minicpm45v_gpu and image_support_files/minicpm45v_entrypoint.sh; removed the old Dockerfile_minicpm45v and docker-entrypoint.sh
- Renamed and simplified PaddleOCR entrypoint to image_support_files/paddleocr_vl_entrypoint.sh and updated CPU/GPU Dockerfile references
- Updated build-images.sh to use *_gpu Dockerfiles and clarified PaddleOCR GPU build log
- Updated test/helpers/docker.ts to point to Dockerfile_minicpm45v_gpu so tests build the GPU variant
## 2026-01-17 - 1.7.0 - feat(tests)
use Qwen2.5 (Ollama) for invoice extraction tests and add helpers for model management; normalize dates and coerce numeric fields
- Added ensureOllamaModel and ensureQwen25 test helpers to pull/check Ollama models via localhost:11434
- Updated invoices test to use qwen2.5:7b instead of MiniCPM and removed image payload from the text-only extraction step
- Increased Markdown truncate limit from 8000 to 12000 and reduced model num_predict from 2048 to 512
- Rewrote extraction prompt to require strict JSON output and added post-processing to parse/convert numeric fields
- Added normalizeDate and improved compareInvoice to normalize dates and handle numeric formatting/tolerance
- Updated test setup to ensure Qwen2.5 is available and adjusted logging/messages to reflect the Qwen2.5-based workflow
## 2026-01-17 - 1.6.0 - feat(paddleocr-vl)
add PaddleOCR-VL full pipeline Docker image and API server, plus integration tests and docker helpers
- Add Dockerfile_paddleocr_vl_full and entrypoint script to build a GPU-enabled image with PP-DocLayoutV2 + PaddleOCR-VL and a FastAPI server
- Introduce image_support_files/paddleocr_vl_full_server.py implementing the full pipeline API (/parse, OpenAI-compatible /v1/chat/completions) and a /formats endpoint
- Improve image handling: decode_image supports data URLs, HTTP(S), raw base64 and file paths; add optimize_image_resolution to auto-scale images into the recommended 1080-2048px range
- Add test helpers (test/helpers/docker.ts) to build/start/health-check Docker images and new ensurePaddleOcrVlFull workflow
- Add comprehensive integration tests for bank statements and invoices (MiniCPM and PaddleOCR-VL variants) and update tests to ensure required containers are running before tests
- Switch MiniCPM model references to 'minicpm-v:latest' and increase health/timeout expectations for the full pipeline
## 2026-01-17 - 1.5.0 - feat(paddleocr-vl)
add PaddleOCR-VL GPU Dockerfile, pin vllm, update CPU image deps, and improve entrypoint and tests
- Add a new GPU Dockerfile for PaddleOCR-VL (transformers-based) with CUDA support, healthcheck, and entrypoint.
- Pin vllm to 0.11.1 in Dockerfile_paddleocr_vl to use the first stable release with PaddleOCR-VL support.
- Update CPU image: add torchvision==0.20.1 and extra Python deps (protobuf, sentencepiece, einops) required by the transformers-based server.
- Rewrite paddleocr-vl-entrypoint.sh to build vllm args array, add MAX_MODEL_LEN and ENFORCE_EAGER env vars, include --limit-mm-per-prompt and optional --enforce-eager, and switch to exec vllm with constructed args.
- Update tests to use the OpenAI-compatible PaddleOCR-VL chat completions API (/v1/chat/completions) with image+text message payload and model 'paddleocr-vl'.
- Add @types/node to package.json dependencies and tidy devDependencies ordering.
## 2026-01-16 - 1.4.0 - feat(invoices)
add hybrid OCR + vision invoice/document parsing with PaddleOCR, consensus voting, and prompt/test refactors
- Add hybrid pipeline documentation and examples (PaddleOCR + MiniCPM-V) and architecture diagram in recipes/document.md
- Integrate PaddleOCR: new OCR extraction functions and OCR-only prompt flow in test/test.node.ts
- Add consensus voting and parallel-pass optimization to improve reliability (multiple passes, hashing, and majority voting)
- Refactor prompts and tests: introduce /nothink token, OCR truncation limits, separate visual and OCR-only prompts, and improved prompt building in test/test.invoices.ts
- Update image conversion defaults (200 DPI, filename change) and add TypeScript helper functions for extraction and consensus handling
## 2026-01-16 - 1.3.0 - feat(paddleocr)
add PaddleOCR OCR service (Docker images, server, tests, docs) and CI workflows
- Add GPU and CPU PaddleOCR Dockerfiles; pin paddlepaddle/paddle and paddleocr to stable 2.x and install libgomp1 for CPU builds
- Avoid pre-downloading OCR models at build-time to prevent build-time segfaults; models are downloaded on first run
- Refactor PaddleOCR FastAPI server: respect CUDA_VISIBLE_DEVICES, support per-request language, cache default language instance and create temporary instances for other languages
- Add comprehensive tests (test.paddleocr.ts) and improve invoice extraction tests (parallelize passes, JSON OCR API usage, prioritize certain test cases)
- Add Gitea CI workflows for tag and non-tag Docker runs and release pipeline (docker build/push, metadata trigger)
- Update documentation (readme.hints.md) with PaddleOCR usage and add docker registry entry to npmextra.json
## 2026-01-16 - 1.2.0 - feat(paddleocr) ## 2026-01-16 - 1.2.0 - feat(paddleocr)
add PaddleOCR support: Docker images, FastAPI server, entrypoint and tests add PaddleOCR support: Docker images, FastAPI server, entrypoint and tests

View File

@@ -1,25 +0,0 @@
#!/bin/bash
set -e
# Configuration from environment
OCR_LANGUAGE="${OCR_LANGUAGE:-en}"
SERVER_PORT="${SERVER_PORT:-5000}"
SERVER_HOST="${SERVER_HOST:-0.0.0.0}"
echo "Starting PaddleOCR Server..."
echo " Language: ${OCR_LANGUAGE}"
echo " Host: ${SERVER_HOST}"
echo " Port: ${SERVER_PORT}"
# Check GPU availability
if [ "${CUDA_VISIBLE_DEVICES}" = "-1" ]; then
echo " GPU: Disabled (CPU mode)"
else
echo " GPU: Enabled"
fi
# Start the FastAPI server with uvicorn
exec python -m uvicorn paddleocr_server:app \
--host "${SERVER_HOST}" \
--port "${SERVER_PORT}" \
--workers 1

View File

@@ -1,258 +0,0 @@
#!/usr/bin/env python3
"""
PaddleOCR FastAPI Server
Provides REST API for OCR operations using PaddleOCR
"""
import os
import io
import base64
import logging
from typing import Optional, List, Any
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import numpy as np
from PIL import Image
from paddleocr import PaddleOCR
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Environment configuration
OCR_LANGUAGE = os.environ.get('OCR_LANGUAGE', 'en')
USE_GPU = os.environ.get('CUDA_VISIBLE_DEVICES', '') != '-1'
# Initialize FastAPI app
app = FastAPI(
title="PaddleOCR Server",
description="REST API for OCR operations using PaddleOCR PP-OCRv4",
version="1.0.0"
)
# Global OCR instance
ocr_instance: Optional[PaddleOCR] = None
class OCRRequest(BaseModel):
"""Request model for base64 image OCR"""
image: str
language: Optional[str] = None
class BoundingBox(BaseModel):
"""Bounding box for detected text"""
points: List[List[float]]
class OCRResult(BaseModel):
"""Single OCR detection result"""
text: str
confidence: float
box: List[List[float]]
class OCRResponse(BaseModel):
"""OCR response model"""
success: bool
results: List[OCRResult]
error: Optional[str] = None
class HealthResponse(BaseModel):
"""Health check response"""
status: str
model: str
language: str
gpu_enabled: bool
def get_ocr() -> PaddleOCR:
"""Get or initialize the OCR instance"""
global ocr_instance
if ocr_instance is None:
logger.info(f"Initializing PaddleOCR with language={OCR_LANGUAGE}, use_gpu={USE_GPU}")
ocr_instance = PaddleOCR(
use_angle_cls=True,
lang=OCR_LANGUAGE,
use_gpu=USE_GPU,
show_log=False
)
logger.info("PaddleOCR initialized successfully")
return ocr_instance
def decode_base64_image(base64_string: str) -> np.ndarray:
"""Decode base64 string to numpy array"""
# Remove data URL prefix if present
if ',' in base64_string:
base64_string = base64_string.split(',')[1]
image_data = base64.b64decode(base64_string)
image = Image.open(io.BytesIO(image_data))
# Convert to RGB if necessary
if image.mode != 'RGB':
image = image.convert('RGB')
return np.array(image)
def process_ocr_result(result: Any) -> List[OCRResult]:
"""Process PaddleOCR result into structured format"""
results = []
if result is None or len(result) == 0:
return results
# PaddleOCR returns list of results per image
# Each result is a list of [box, (text, confidence)]
for line in result[0] if result[0] else []:
if line is None:
continue
box = line[0] # [[x1,y1], [x2,y2], [x3,y3], [x4,y4]]
text_info = line[1] # (text, confidence)
results.append(OCRResult(
text=text_info[0],
confidence=float(text_info[1]),
box=[[float(p[0]), float(p[1])] for p in box]
))
return results
@app.on_event("startup")
async def startup_event():
"""Pre-warm the OCR model on startup"""
logger.info("Pre-warming OCR model...")
try:
ocr = get_ocr()
# Create a small test image to warm up the model
test_image = np.zeros((100, 100, 3), dtype=np.uint8)
test_image.fill(255) # White image
ocr.ocr(test_image, cls=True)
logger.info("OCR model pre-warmed successfully")
except Exception as e:
logger.error(f"Failed to pre-warm OCR model: {e}")
@app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint"""
try:
# Ensure OCR is initialized
get_ocr()
return HealthResponse(
status="healthy",
model="PP-OCRv4",
language=OCR_LANGUAGE,
gpu_enabled=USE_GPU
)
except Exception as e:
logger.error(f"Health check failed: {e}")
raise HTTPException(status_code=503, detail=str(e))
@app.post("/ocr", response_model=OCRResponse)
async def ocr_base64(request: OCRRequest):
"""
Perform OCR on a base64-encoded image
Args:
request: OCRRequest with base64 image and optional language
Returns:
OCRResponse with detected text, confidence scores, and bounding boxes
"""
try:
# Decode image
image = decode_base64_image(request.image)
# Get OCR instance (use request language if provided)
ocr = get_ocr()
# If a different language is requested, create a new instance
if request.language and request.language != OCR_LANGUAGE:
logger.info(f"Creating OCR instance for language: {request.language}")
temp_ocr = PaddleOCR(
use_angle_cls=True,
lang=request.language,
use_gpu=USE_GPU,
show_log=False
)
result = temp_ocr.ocr(image, cls=True)
else:
result = ocr.ocr(image, cls=True)
# Process results
results = process_ocr_result(result)
return OCRResponse(success=True, results=results)
except Exception as e:
logger.error(f"OCR processing failed: {e}")
return OCRResponse(success=False, results=[], error=str(e))
@app.post("/ocr/upload", response_model=OCRResponse)
async def ocr_upload(
img: UploadFile = File(...),
language: Optional[str] = Form(None)
):
"""
Perform OCR on an uploaded image file
Args:
img: Uploaded image file
language: Optional language code (default: env OCR_LANGUAGE)
Returns:
OCRResponse with detected text, confidence scores, and bounding boxes
"""
try:
# Read image
contents = await img.read()
image = Image.open(io.BytesIO(contents))
# Convert to RGB if necessary
if image.mode != 'RGB':
image = image.convert('RGB')
image_array = np.array(image)
# Get OCR instance
ocr = get_ocr()
# If a different language is requested, create a new instance
if language and language != OCR_LANGUAGE:
logger.info(f"Creating OCR instance for language: {language}")
temp_ocr = PaddleOCR(
use_angle_cls=True,
lang=language,
use_gpu=USE_GPU,
show_log=False
)
result = temp_ocr.ocr(image_array, cls=True)
else:
result = ocr.ocr(image_array, cls=True)
# Process results
results = process_ocr_result(result)
return OCRResponse(success=True, results=results)
except Exception as e:
logger.error(f"OCR processing failed: {e}")
return OCRResponse(success=False, results=[], error=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=5000)

View File

@@ -0,0 +1,636 @@
#!/usr/bin/env python3
"""
PaddleOCR-VL Full Pipeline API Server (Transformers backend)
Provides REST API for document parsing using:
- PP-DocLayoutV2 for layout detection
- PaddleOCR-VL (transformers) for recognition
- Structured JSON/Markdown output
"""
import os
import io
import re
import base64
import logging
import tempfile
import time
import json
from typing import Optional, List, Union
from pathlib import Path
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from PIL import Image
import torch
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Environment configuration
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
MODEL_NAME = "PaddlePaddle/PaddleOCR-VL"
# Device configuration
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {DEVICE}")
# Task prompts
TASK_PROMPTS = {
"ocr": "OCR:",
"table": "Table Recognition:",
"formula": "Formula Recognition:",
"chart": "Chart Recognition:",
}
# Initialize FastAPI app
app = FastAPI(
title="PaddleOCR-VL Full Pipeline Server",
description="Document parsing with PP-DocLayoutV2 + PaddleOCR-VL (transformers)",
version="1.0.0"
)
# Global model instances
vl_model = None
vl_processor = None
layout_model = None
def load_vl_model():
"""Load the PaddleOCR-VL model for element recognition"""
global vl_model, vl_processor
if vl_model is not None:
return
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
from transformers import AutoModelForCausalLM, AutoProcessor
vl_processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
if DEVICE == "cuda":
vl_model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
).to(DEVICE).eval()
else:
vl_model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
).eval()
logger.info("PaddleOCR-VL model loaded successfully")
def load_layout_model():
"""Load the LayoutDetection model for layout detection"""
global layout_model
if layout_model is not None:
return
try:
logger.info("Loading LayoutDetection model (PP-DocLayout_plus-L)...")
from paddleocr import LayoutDetection
layout_model = LayoutDetection()
logger.info("LayoutDetection model loaded successfully")
except Exception as e:
logger.warning(f"Could not load LayoutDetection: {e}")
logger.info("Falling back to VL-only mode (no layout detection)")
def recognize_element(image: Image.Image, task: str = "ocr") -> str:
"""Recognize a single element using PaddleOCR-VL"""
load_vl_model()
prompt = TASK_PROMPTS.get(task, TASK_PROMPTS["ocr"])
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompt},
]
}
]
inputs = vl_processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
if DEVICE == "cuda":
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
with torch.inference_mode():
outputs = vl_model.generate(
**inputs,
max_new_tokens=4096,
do_sample=False,
use_cache=True
)
response = vl_processor.batch_decode(outputs, skip_special_tokens=True)[0]
# Extract only the assistant's response content
# The response format is: "User: <prompt>\nAssistant: <content>"
# We want to extract just the content after "Assistant:"
if "Assistant:" in response:
parts = response.split("Assistant:")
if len(parts) > 1:
response = parts[-1].strip()
elif "assistant:" in response.lower():
# Case-insensitive fallback
import re
match = re.split(r'[Aa]ssistant:', response)
if len(match) > 1:
response = match[-1].strip()
return response
def detect_layout(image: Image.Image) -> List[dict]:
"""Detect layout regions in the image"""
load_layout_model()
if layout_model is None:
# No layout model - return a single region covering the whole image
return [{
"type": "text",
"bbox": [0, 0, image.width, image.height],
"score": 1.0
}]
# Save image to temp file
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
image.save(tmp.name, "PNG")
tmp_path = tmp.name
try:
results = layout_model.predict(tmp_path)
regions = []
for res in results:
# LayoutDetection returns boxes in 'boxes' key
for box in res.get("boxes", []):
coord = box.get("coordinate", [0, 0, image.width, image.height])
# Convert numpy floats to regular floats
bbox = [float(c) for c in coord]
regions.append({
"type": box.get("label", "text"),
"bbox": bbox,
"score": float(box.get("score", 1.0))
})
# Sort regions by vertical position (top to bottom)
regions.sort(key=lambda r: r["bbox"][1])
return regions if regions else [{
"type": "text",
"bbox": [0, 0, image.width, image.height],
"score": 1.0
}]
finally:
os.unlink(tmp_path)
def process_document(image: Image.Image) -> dict:
"""Process a document through the full pipeline"""
logger.info(f"Processing document: {image.size}")
# Step 1: Detect layout
regions = detect_layout(image)
logger.info(f"Detected {len(regions)} layout regions")
# Step 2: Recognize each region
blocks = []
for i, region in enumerate(regions):
region_type = region["type"].lower()
bbox = region["bbox"]
# Crop region from image
x1, y1, x2, y2 = [int(c) for c in bbox]
region_image = image.crop((x1, y1, x2, y2))
# Determine task based on region type
if "table" in region_type:
task = "table"
elif "formula" in region_type or "math" in region_type:
task = "formula"
elif "chart" in region_type or "figure" in region_type:
task = "chart"
else:
task = "ocr"
# Recognize the region
try:
content = recognize_element(region_image, task)
blocks.append({
"index": i,
"type": region_type,
"bbox": bbox,
"content": content,
"task": task
})
logger.info(f" Region {i} ({region_type}): {len(content)} chars")
except Exception as e:
logger.error(f" Region {i} error: {e}")
blocks.append({
"index": i,
"type": region_type,
"bbox": bbox,
"content": "",
"error": str(e)
})
return {"blocks": blocks, "image_size": list(image.size)}
def result_to_markdown(result: dict) -> str:
"""Convert result to Markdown format with structural hints for LLM processing.
Adds positional and type-based formatting to help downstream LLMs
understand document structure:
- Tables are marked with **[TABLE]** prefix
- Header zone content (top 15%) is bolded
- Footer zone content (bottom 15%) is separated with horizontal rule
- Titles are formatted as # headers
- Figures/charts are marked with *[Figure: ...]*
"""
lines = []
image_height = result.get("image_size", [0, 1000])[1]
for block in result.get("blocks", []):
block_type = block.get("type", "text").lower()
content = block.get("content", "").strip()
bbox = block.get("bbox", [])
if not content:
continue
# Determine position zone (top 15%, middle, bottom 15%)
y_pos = bbox[1] if bbox and len(bbox) > 1 else 0
y_end = bbox[3] if bbox and len(bbox) > 3 else y_pos
is_header_zone = y_pos < image_height * 0.15
is_footer_zone = y_end > image_height * 0.85
# Format based on type and position
if "table" in block_type:
lines.append(f"\n**[TABLE]**\n{content}\n")
elif "title" in block_type:
lines.append(f"# {content}")
elif "formula" in block_type or "math" in block_type:
lines.append(f"\n$$\n{content}\n$$\n")
elif "figure" in block_type or "chart" in block_type:
lines.append(f"*[Figure: {content}]*")
elif is_header_zone:
lines.append(f"**{content}**")
elif is_footer_zone:
lines.append(f"---\n{content}")
else:
lines.append(content)
return "\n\n".join(lines)
def parse_markdown_table(content: str) -> str:
"""Convert table content to HTML table.
Handles:
- PaddleOCR-VL format: <fcel>cell<lcel>cell<nl> (detected by <fcel> tags)
- Pipe-delimited tables: | Header | Header |
- Separator rows: |---|---|
- Returns HTML <table> structure
"""
content_stripped = content.strip()
# Check for PaddleOCR-VL table format (<fcel>, <lcel>, <ecel>, <nl>)
if '<fcel>' in content_stripped or '<nl>' in content_stripped:
return parse_paddleocr_table(content_stripped)
lines = content_stripped.split('\n')
if not lines:
return f'<pre>{content}</pre>'
# Check if it looks like a markdown table
if not any('|' in line for line in lines):
return f'<pre>{content}</pre>'
html_rows = []
is_header = True
for line in lines:
line = line.strip()
if not line or line.startswith('|') == False and '|' not in line:
continue
# Skip separator rows (|---|---|)
if re.match(r'^[\|\s\-:]+$', line):
is_header = False
continue
# Parse cells
cells = [c.strip() for c in line.split('|')]
cells = [c for c in cells if c] # Remove empty from edges
if is_header:
row = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
html_rows.append(f'<thead>{row}</thead>')
is_header = False
else:
row = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
html_rows.append(row)
if html_rows:
# Wrap body rows in tbody
header = html_rows[0] if '<thead>' in html_rows[0] else ''
body_rows = [r for r in html_rows if '<thead>' not in r]
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
return f'<table>{header}{body}</table>'
return f'<pre>{content}</pre>'
def parse_paddleocr_table(content: str) -> str:
"""Convert PaddleOCR-VL table format to HTML table.
PaddleOCR-VL uses:
- <fcel> = first cell in a row
- <lcel> = subsequent cells
- <ecel> = empty cell
- <nl> = row separator (newline)
Example input:
<fcel>Header1<lcel>Header2<nl><fcel>Value1<lcel>Value2<nl>
"""
# Split into rows by <nl>
rows_raw = re.split(r'<nl>', content)
html_rows = []
is_first_row = True
for row_content in rows_raw:
row_content = row_content.strip()
if not row_content:
continue
# Extract cells: split by <fcel>, <lcel>, or <ecel>
# Each cell is the text between these markers
cells = []
# Pattern to match cell markers and capture content
# Content is everything between markers
parts = re.split(r'<fcel>|<lcel>|<ecel>', row_content)
for part in parts:
part = part.strip()
if part:
cells.append(part)
if not cells:
continue
# First row is header
if is_first_row:
row_html = '<tr>' + ''.join(f'<th>{c}</th>' for c in cells) + '</tr>'
html_rows.append(f'<thead>{row_html}</thead>')
is_first_row = False
else:
row_html = '<tr>' + ''.join(f'<td>{c}</td>' for c in cells) + '</tr>'
html_rows.append(row_html)
if html_rows:
header = html_rows[0] if '<thead>' in html_rows[0] else ''
body_rows = [r for r in html_rows if '<thead>' not in r]
body = f'<tbody>{"".join(body_rows)}</tbody>' if body_rows else ''
return f'<table>{header}{body}</table>'
return f'<pre>{content}</pre>'
def result_to_html(result: dict) -> str:
"""Convert result to semantic HTML for optimal LLM processing.
Uses semantic HTML5 tags with position metadata as data-* attributes.
Markdown tables are converted to proper HTML <table> tags for
unambiguous parsing by downstream LLMs.
"""
parts = []
image_height = result.get("image_size", [0, 1000])[1]
parts.append('<!DOCTYPE html><html><body>')
for block in result.get("blocks", []):
block_type = block.get("type", "text").lower()
content = block.get("content", "").strip()
bbox = block.get("bbox", [])
if not content:
continue
# Position metadata
y_pos = bbox[1] / image_height if bbox and len(bbox) > 1 else 0
data_attrs = f'data-type="{block_type}" data-y="{y_pos:.2f}"'
# Format based on type
if "table" in block_type:
table_html = parse_markdown_table(content)
parts.append(f'<section {data_attrs} class="table-region">{table_html}</section>')
elif "title" in block_type:
parts.append(f'<h1 {data_attrs}>{content}</h1>')
elif "formula" in block_type or "math" in block_type:
parts.append(f'<div {data_attrs} class="formula"><code>{content}</code></div>')
elif "figure" in block_type or "chart" in block_type:
parts.append(f'<figure {data_attrs}><figcaption>{content}</figcaption></figure>')
elif y_pos < 0.15:
parts.append(f'<header {data_attrs}><strong>{content}</strong></header>')
elif y_pos > 0.85:
parts.append(f'<footer {data_attrs}>{content}</footer>')
else:
parts.append(f'<p {data_attrs}>{content}</p>')
parts.append('</body></html>')
return '\n'.join(parts)
# Request/Response models
class ParseRequest(BaseModel):
image: str # base64 encoded image
output_format: Optional[str] = "json"
class ParseResponse(BaseModel):
success: bool
format: str
result: Union[dict, str]
processing_time: float
error: Optional[str] = None
def decode_image(image_source: str) -> Image.Image:
"""Decode image from base64 or data URL"""
if image_source.startswith("data:"):
header, data = image_source.split(",", 1)
image_data = base64.b64decode(data)
else:
image_data = base64.b64decode(image_source)
return Image.open(io.BytesIO(image_data)).convert("RGB")
@app.on_event("startup")
async def startup_event():
"""Pre-load models on startup"""
logger.info("Starting PaddleOCR-VL Full Pipeline Server...")
try:
load_vl_model()
load_layout_model()
logger.info("Models loaded successfully")
except Exception as e:
logger.error(f"Failed to pre-load models: {e}")
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {
"status": "healthy" if vl_model is not None else "loading",
"service": "PaddleOCR-VL Full Pipeline (Transformers)",
"device": DEVICE,
"vl_model_loaded": vl_model is not None,
"layout_model_loaded": layout_model is not None
}
@app.get("/formats")
async def supported_formats():
"""List supported output formats"""
return {
"output_formats": ["json", "markdown", "html"],
"image_formats": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
"capabilities": [
"Layout detection (PP-DocLayoutV2)",
"Text recognition (OCR)",
"Table recognition",
"Formula recognition (LaTeX)",
"Chart recognition",
"Multi-language support (109 languages)"
]
}
@app.post("/parse", response_model=ParseResponse)
async def parse_document_endpoint(request: ParseRequest):
"""Parse a document image and return structured output"""
try:
start_time = time.time()
image = decode_image(request.image)
result = process_document(image)
if request.output_format == "markdown":
markdown = result_to_markdown(result)
output = {"markdown": markdown}
elif request.output_format == "html":
html = result_to_html(result)
output = {"html": html}
else:
output = result
elapsed = time.time() - start_time
logger.info(f"Processing complete in {elapsed:.2f}s")
return ParseResponse(
success=True,
format=request.output_format,
result=output,
processing_time=elapsed
)
except Exception as e:
logger.error(f"Error processing document: {e}", exc_info=True)
return ParseResponse(
success=False,
format=request.output_format,
result={},
processing_time=0,
error=str(e)
)
@app.post("/v1/chat/completions")
async def chat_completions(request: dict):
"""OpenAI-compatible chat completions endpoint"""
try:
messages = request.get("messages", [])
output_format = request.get("output_format", "json")
# Find user message with image
image = None
for msg in reversed(messages):
if msg.get("role") == "user":
content = msg.get("content", [])
if isinstance(content, list):
for item in content:
if item.get("type") == "image_url":
url = item.get("image_url", {}).get("url", "")
image = decode_image(url)
break
break
if image is None:
raise HTTPException(status_code=400, detail="No image provided")
start_time = time.time()
result = process_document(image)
if output_format == "markdown":
content = result_to_markdown(result)
elif output_format == "html":
content = result_to_html(result)
else:
content = json.dumps(result, ensure_ascii=False, indent=2)
elapsed = time.time() - start_time
return {
"id": f"chatcmpl-{int(time.time()*1000)}",
"object": "chat.completion",
"created": int(time.time()),
"model": "paddleocr-vl-full",
"choices": [{
"index": 0,
"message": {"role": "assistant", "content": content},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 100,
"completion_tokens": len(content) // 4,
"total_tokens": 100 + len(content) // 4
},
"processing_time": elapsed
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error in chat completions: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)

View File

@@ -0,0 +1,465 @@
#!/usr/bin/env python3
"""
PaddleOCR-VL FastAPI Server (CPU variant)
Provides OpenAI-compatible REST API for document parsing using PaddleOCR-VL
"""
import os
import io
import base64
import logging
import time
from typing import Optional, List, Any, Dict, Union
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import torch
from PIL import Image
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Environment configuration
SERVER_HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
SERVER_PORT = int(os.environ.get('SERVER_PORT', '8000'))
MODEL_NAME = os.environ.get('MODEL_NAME', 'PaddlePaddle/PaddleOCR-VL')
# Device configuration
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {DEVICE}")
# Task prompts for PaddleOCR-VL
TASK_PROMPTS = {
"ocr": "OCR:",
"table": "Table Recognition:",
"formula": "Formula Recognition:",
"chart": "Chart Recognition:",
}
# Initialize FastAPI app
app = FastAPI(
title="PaddleOCR-VL Server",
description="OpenAI-compatible REST API for document parsing using PaddleOCR-VL",
version="1.0.0"
)
# Global model instances
model = None
processor = None
# Request/Response models (OpenAI-compatible)
class ImageUrl(BaseModel):
url: str
class ContentItem(BaseModel):
type: str
text: Optional[str] = None
image_url: Optional[ImageUrl] = None
class Message(BaseModel):
role: str
content: Union[str, List[ContentItem]]
class ChatCompletionRequest(BaseModel):
model: str = "paddleocr-vl"
messages: List[Message]
temperature: Optional[float] = 0.0
max_tokens: Optional[int] = 4096
class Choice(BaseModel):
index: int
message: Message
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[Choice]
usage: Usage
class HealthResponse(BaseModel):
status: str
model: str
device: str
def load_model():
"""Load the PaddleOCR-VL model and processor"""
global model, processor
if model is not None:
return
logger.info(f"Loading PaddleOCR-VL model: {MODEL_NAME}")
from transformers import AutoModelForCausalLM, AutoProcessor
# Load processor
processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
# Load model with appropriate settings for CPU/GPU
if DEVICE == "cuda":
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
).to(DEVICE).eval()
else:
# CPU mode - use float32 for compatibility
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.float32,
low_cpu_mem_usage=True,
).eval()
logger.info("PaddleOCR-VL model loaded successfully")
def optimize_image_resolution(image: Image.Image, max_size: int = 2048, min_size: int = 1080) -> Image.Image:
"""
Optimize image resolution for PaddleOCR-VL.
Best results are achieved with images in the 1080p-2K range.
- Images larger than max_size are scaled down
- Very small images are scaled up to min_size
"""
width, height = image.size
max_dim = max(width, height)
min_dim = min(width, height)
# Scale down if too large (4K+ images often miss text)
if max_dim > max_size:
scale = max_size / max_dim
new_width = int(width * scale)
new_height = int(height * scale)
logger.info(f"Scaling down image from {width}x{height} to {new_width}x{new_height}")
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
# Scale up if too small
elif max_dim < min_size and min_dim < min_size:
scale = min_size / max_dim
new_width = int(width * scale)
new_height = int(height * scale)
logger.info(f"Scaling up image from {width}x{height} to {new_width}x{new_height}")
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
else:
logger.info(f"Image size {width}x{height} is optimal, no scaling needed")
return image
def decode_image(image_source: str, optimize: bool = True) -> Image.Image:
"""
Decode image from various sources.
Supported formats:
- Base64 data URL: data:image/png;base64,... or data:image/jpeg;base64,...
- HTTP/HTTPS URL: https://example.com/image.png
- Raw base64 string
- Local file path
Supported image types: PNG, JPEG, WebP, BMP, GIF, TIFF
"""
image = None
if image_source.startswith("data:"):
# Base64 encoded image with MIME type header
# Supports: data:image/png;base64,... data:image/jpeg;base64,... etc.
header, data = image_source.split(",", 1)
image_data = base64.b64decode(data)
image = Image.open(io.BytesIO(image_data)).convert("RGB")
logger.debug(f"Decoded base64 image with header: {header}")
elif image_source.startswith("http://") or image_source.startswith("https://"):
# URL - fetch image
import httpx
response = httpx.get(image_source, timeout=30.0)
response.raise_for_status()
image = Image.open(io.BytesIO(response.content)).convert("RGB")
logger.debug(f"Fetched image from URL: {image_source[:50]}...")
else:
# Assume it's a file path or raw base64
try:
image_data = base64.b64decode(image_source)
image = Image.open(io.BytesIO(image_data)).convert("RGB")
logger.debug("Decoded raw base64 image")
except:
# Try as file path
image = Image.open(image_source).convert("RGB")
logger.debug(f"Loaded image from file: {image_source}")
# Optimize resolution for best OCR results
if optimize:
image = optimize_image_resolution(image)
return image
def extract_image_and_text(content: Union[str, List[ContentItem]]) -> tuple:
"""Extract image and text prompt from message content"""
if isinstance(content, str):
return None, content
image = None
text = ""
for item in content:
if item.type == "image_url" and item.image_url:
image = decode_image(item.image_url.url)
elif item.type == "text" and item.text:
text = item.text
return image, text
def generate_response(image: Image.Image, prompt: str, max_tokens: int = 4096) -> str:
"""Generate response using PaddleOCR-VL"""
load_model()
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompt},
]
}
]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
if DEVICE == "cuda":
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
with torch.inference_mode():
outputs = model.generate(
**inputs,
max_new_tokens=max_tokens,
do_sample=False,
use_cache=True
)
response = processor.batch_decode(outputs, skip_special_tokens=True)[0]
# Extract the assistant's response (after the prompt)
if "assistant" in response.lower():
parts = response.split("assistant")
if len(parts) > 1:
response = parts[-1].strip()
return response
@app.on_event("startup")
async def startup_event():
"""Pre-load the model on startup"""
logger.info("Pre-loading PaddleOCR-VL model...")
try:
load_model()
logger.info("Model pre-loaded successfully")
except Exception as e:
logger.error(f"Failed to pre-load model: {e}")
# Don't fail startup - model will be loaded on first request
@app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint"""
return HealthResponse(
status="healthy" if model is not None else "loading",
model=MODEL_NAME,
device=DEVICE
)
@app.get("/formats")
async def supported_formats():
"""List supported image formats and input methods"""
return {
"image_formats": {
"supported": ["PNG", "JPEG", "WebP", "BMP", "GIF", "TIFF"],
"recommended": ["PNG", "JPEG"],
"mime_types": [
"image/png",
"image/jpeg",
"image/webp",
"image/bmp",
"image/gif",
"image/tiff"
]
},
"input_methods": {
"base64_data_url": {
"description": "Base64 encoded image with MIME type header",
"example": "data:image/png;base64,iVBORw0KGgo..."
},
"http_url": {
"description": "Direct HTTP/HTTPS URL to image",
"example": "https://example.com/image.png"
},
"raw_base64": {
"description": "Raw base64 string without header",
"example": "iVBORw0KGgo..."
}
},
"resolution": {
"optimal_range": "1080p to 2K (1080-2048 pixels on longest side)",
"auto_scaling": True,
"note": "Images are automatically scaled to optimal range. 4K+ images are scaled down for better accuracy."
},
"task_prompts": TASK_PROMPTS
}
@app.get("/v1/models")
async def list_models():
"""List available models (OpenAI-compatible)"""
return {
"object": "list",
"data": [
{
"id": "paddleocr-vl",
"object": "model",
"created": int(time.time()),
"owned_by": "paddlepaddle"
}
]
}
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(request: ChatCompletionRequest):
"""
OpenAI-compatible chat completions endpoint for PaddleOCR-VL
Supports tasks:
- "OCR:" - Text recognition
- "Table Recognition:" - Table extraction
- "Formula Recognition:" - Formula extraction
- "Chart Recognition:" - Chart extraction
"""
try:
# Get the last user message
user_message = None
for msg in reversed(request.messages):
if msg.role == "user":
user_message = msg
break
if not user_message:
raise HTTPException(status_code=400, detail="No user message found")
# Extract image and prompt
image, prompt = extract_image_and_text(user_message.content)
if image is None:
raise HTTPException(status_code=400, detail="No image provided in message")
# Default to OCR if no specific prompt
if not prompt or prompt.strip() == "":
prompt = "OCR:"
logger.info(f"Processing request with prompt: {prompt[:50]}...")
# Generate response
start_time = time.time()
response_text = generate_response(image, prompt, request.max_tokens or 4096)
elapsed = time.time() - start_time
logger.info(f"Generated response in {elapsed:.2f}s ({len(response_text)} chars)")
# Build OpenAI-compatible response
return ChatCompletionResponse(
id=f"chatcmpl-{int(time.time()*1000)}",
created=int(time.time()),
model=request.model,
choices=[
Choice(
index=0,
message=Message(role="assistant", content=response_text),
finish_reason="stop"
)
],
usage=Usage(
prompt_tokens=100, # Approximate
completion_tokens=len(response_text) // 4,
total_tokens=100 + len(response_text) // 4
)
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error processing request: {e}")
raise HTTPException(status_code=500, detail=str(e))
# Legacy endpoint for compatibility with old PaddleOCR API
class LegacyOCRRequest(BaseModel):
image: str
task: Optional[str] = "ocr"
class LegacyOCRResponse(BaseModel):
success: bool
result: str
task: str
error: Optional[str] = None
@app.post("/ocr", response_model=LegacyOCRResponse)
async def legacy_ocr(request: LegacyOCRRequest):
"""
Legacy OCR endpoint for backwards compatibility
Tasks: ocr, table, formula, chart
"""
try:
image = decode_image(request.image)
prompt = TASK_PROMPTS.get(request.task, TASK_PROMPTS["ocr"])
result = generate_response(image, prompt)
return LegacyOCRResponse(
success=True,
result=result,
task=request.task
)
except Exception as e:
logger.error(f"Legacy OCR error: {e}")
return LegacyOCRResponse(
success=False,
result="",
task=request.task,
error=str(e)
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT)

View File

@@ -1,7 +1,10 @@
{ {
"npmci": { "npmci": {
"npmGlobalTools": [], "npmGlobalTools": [],
"npmAccessLevel": "public" "npmAccessLevel": "public",
"dockerRegistries": [
"code.foss.global"
]
}, },
"gitzone": { "gitzone": {
"projectType": "docker", "projectType": "docker",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@host.today/ht-docker-ai", "name": "@host.today/ht-docker-ai",
"version": "1.2.0", "version": "1.13.0",
"type": "module", "type": "module",
"private": false, "private": false,
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5", "description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
@@ -13,8 +13,8 @@
"test": "tstest test/ --verbose" "test": "tstest test/ --verbose"
}, },
"devDependencies": { "devDependencies": {
"@git.zone/tstest": "^1.0.90", "@git.zone/tsrun": "^1.3.3",
"@git.zone/tsrun": "^1.3.3" "@git.zone/tstest": "^1.0.90"
}, },
"repository": { "repository": {
"type": "git", "type": "git",
@@ -28,5 +28,8 @@
"minicpm", "minicpm",
"ollama", "ollama",
"multimodal" "multimodal"
] ],
"dependencies": {
"@types/node": "^25.0.9"
}
} }

4
pnpm-lock.yaml generated
View File

@@ -7,6 +7,10 @@ settings:
importers: importers:
.: .:
dependencies:
'@types/node':
specifier: ^25.0.9
version: 25.0.9
devDependencies: devDependencies:
'@git.zone/tsrun': '@git.zone/tsrun':
specifier: ^1.3.3 specifier: ^1.3.3

View File

@@ -77,6 +77,95 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CPU variant has longer `start-period` (120s) due to slower startup. CPU variant has longer `start-period` (120s) due to slower startup.
## PaddleOCR-VL (Recommended)
### Overview
PaddleOCR-VL is a 0.9B parameter Vision-Language Model specifically optimized for document parsing. It replaces the older PP-Structure approach with native VLM understanding.
**Key advantages over PP-Structure:**
- Native table understanding (no HTML parsing needed)
- 109 language support
- Better handling of complex multi-row tables
- Structured Markdown/JSON output
### Docker Images
| Tag | Description |
|-----|-------------|
| `paddleocr-vl` | GPU variant using vLLM (recommended) |
| `paddleocr-vl-cpu` | CPU variant using transformers |
### API Endpoints (OpenAI-compatible)
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check with model info |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
| `/ocr` | POST | Legacy OCR endpoint |
### Request/Response Format
**POST /v1/chat/completions (OpenAI-compatible)**
```json
{
"model": "paddleocr-vl",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
{"type": "text", "text": "Table Recognition:"}
]
}
],
"temperature": 0.0,
"max_tokens": 8192
}
```
**Task Prompts:**
- `"OCR:"` - Text recognition
- `"Table Recognition:"` - Table extraction (returns markdown)
- `"Formula Recognition:"` - Formula extraction
- `"Chart Recognition:"` - Chart extraction
**Response**
```json
{
"id": "chatcmpl-...",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "| Date | Description | Amount |\n|---|---|---|\n| 2021-06-01 | GITLAB INC | -119.96 |"
},
"finish_reason": "stop"
}
]
}
```
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | Model to load |
| `HOST` | `0.0.0.0` | Server host |
| `PORT` | `8000` | Server port |
| `MAX_BATCHED_TOKENS` | `16384` | vLLM max batch tokens |
| `GPU_MEMORY_UTILIZATION` | `0.9` | GPU memory usage (0-1) |
### Performance
- **GPU (vLLM)**: ~2-5 seconds per page
- **CPU**: ~30-60 seconds per page
---
## Adding New Models ## Adding New Models
To add a new model variant: To add a new model variant:
@@ -118,6 +207,43 @@ npmci docker build
npmci docker push code.foss.global npmci docker push code.foss.global
``` ```
## Multi-Pass Extraction Strategy
The bank statement extraction uses a dual-VLM consensus approach:
### Architecture: Dual-VLM Consensus
| VLM | Model | Purpose |
|-----|-------|---------|
| **MiniCPM-V 4.5** | 8B params | Primary visual extraction |
| **PaddleOCR-VL** | 0.9B params | Table-specialized extraction |
### Extraction Strategy
1. **Pass 1**: MiniCPM-V visual extraction (images → JSON)
2. **Pass 2**: PaddleOCR-VL table recognition (images → markdown → JSON)
3. **Consensus**: If Pass 1 == Pass 2 → Done (fast path)
4. **Pass 3+**: MiniCPM-V visual if no consensus
### Why Dual-VLM Works
- **Different architectures**: Two independent models cross-check each other
- **Specialized strengths**: PaddleOCR-VL optimized for tables, MiniCPM-V for general vision
- **No structure loss**: Both VLMs see the original images directly
- **Fast consensus**: Most documents complete in 2 passes when VLMs agree
### Comparison vs Old PP-Structure Approach
| Approach | Bank Statement Result | Issue |
|----------|----------------------|-------|
| MiniCPM-V Visual | 28 transactions ✓ | - |
| PP-Structure HTML + Visual | 13 transactions ✗ | HTML merged rows incorrectly |
| PaddleOCR-VL Table | 28 transactions ✓ | Native table understanding |
**Key insight**: PP-Structure's HTML output loses structure for complex tables. PaddleOCR-VL's native VLM approach maintains table integrity.
---
## Related Resources ## Related Resources
- [Ollama Documentation](https://ollama.ai/docs) - [Ollama Documentation](https://ollama.ai/docs)

296
readme.md
View File

@@ -1,23 +1,40 @@
# @host.today/ht-docker-ai # @host.today/ht-docker-ai 🚀
Docker images for AI vision-language models, starting with MiniCPM-V 4.5. Production-ready Docker images for state-of-the-art AI Vision-Language Models. Run powerful multimodal AI locally with GPU acceleration or CPU fallback—no cloud API keys required.
## Overview ## Issue Reporting and Security
This project provides ready-to-use Docker containers for running state-of-the-art AI vision-language models. Built on Ollama for simplified model management and a consistent REST API. For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
## Available Images ## 🎯 What's Included
| Tag | Description | Requirements | | Model | Parameters | Best For | API |
|-----|-------------|--------------| |-------|-----------|----------|-----|
| `minicpm45v` | MiniCPM-V 4.5 with GPU support | NVIDIA GPU, 9-18GB VRAM | | **MiniCPM-V 4.5** | 8B | General vision understanding, image analysis, multi-image | Ollama-compatible |
| `minicpm45v-cpu` | MiniCPM-V 4.5 CPU-only | 8GB+ RAM | | **PaddleOCR-VL** | 0.9B | Document parsing, table extraction, OCR | OpenAI-compatible |
| `latest` | Alias for `minicpm45v` | NVIDIA GPU |
## Quick Start ## 📦 Available Images
### GPU (Recommended) ```
code.foss.global/host.today/ht-docker-ai:<tag>
```
| Tag | Model | Hardware | Port |
|-----|-------|----------|------|
| `minicpm45v` / `latest` | MiniCPM-V 4.5 | NVIDIA GPU (9-18GB VRAM) | 11434 |
| `minicpm45v-cpu` | MiniCPM-V 4.5 | CPU only (8GB+ RAM) | 11434 |
| `paddleocr-vl` / `paddleocr-vl-gpu` | PaddleOCR-VL | NVIDIA GPU | 8000 |
| `paddleocr-vl-cpu` | PaddleOCR-VL | CPU only | 8000 |
---
## 🖼️ MiniCPM-V 4.5
A GPT-4o level multimodal LLM from OpenBMB—handles image understanding, OCR, multi-image analysis, and visual reasoning across 30+ languages.
### Quick Start
**GPU (Recommended):**
```bash ```bash
docker run -d \ docker run -d \
--name minicpm \ --name minicpm \
@@ -27,8 +44,7 @@ docker run -d \
code.foss.global/host.today/ht-docker-ai:minicpm45v code.foss.global/host.today/ht-docker-ai:minicpm45v
``` ```
### CPU Only **CPU Only:**
```bash ```bash
docker run -d \ docker run -d \
--name minicpm \ --name minicpm \
@@ -37,18 +53,16 @@ docker run -d \
code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu code.foss.global/host.today/ht-docker-ai:minicpm45v-cpu
``` ```
## API Usage > 💡 **Pro tip:** Mount the volume to persist downloaded models (~5GB). Without it, models re-download on every container start.
The container exposes the Ollama API on port 11434. ### API Examples
### List Available Models
**List models:**
```bash ```bash
curl http://localhost:11434/api/tags curl http://localhost:11434/api/tags
``` ```
### Generate Text from Image **Analyze an image:**
```bash ```bash
curl http://localhost:11434/api/generate -d '{ curl http://localhost:11434/api/generate -d '{
"model": "minicpm-v", "model": "minicpm-v",
@@ -57,60 +71,128 @@ curl http://localhost:11434/api/generate -d '{
}' }'
``` ```
### Chat with Vision **Chat with vision:**
```bash ```bash
curl http://localhost:11434/api/chat -d '{ curl http://localhost:11434/api/chat -d '{
"model": "minicpm-v", "model": "minicpm-v",
"messages": [ "messages": [{
{
"role": "user", "role": "user",
"content": "Describe this image in detail", "content": "Describe this image in detail",
"images": ["<base64-encoded-image>"] "images": ["<base64-encoded-image>"]
} }]
]
}' }'
``` ```
## Environment Variables ### Hardware Requirements
| Variable | Default | Description | | Variant | VRAM/RAM | Notes |
|----------|---------|-------------| |---------|----------|-------|
| `MODEL_NAME` | `minicpm-v` | Model to pull on startup | | GPU (int4 quantized) | 9GB VRAM | Recommended for most use cases |
| `OLLAMA_HOST` | `0.0.0.0` | Host address for API | | GPU (full precision) | 18GB VRAM | Maximum quality |
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins | | CPU (GGUF) | 8GB+ RAM | Slower but accessible |
## Hardware Requirements ---
### GPU Variant (`minicpm45v`) ## 📄 PaddleOCR-VL
- NVIDIA GPU with CUDA support A specialized 0.9B Vision-Language Model optimized for document parsing. Native support for tables, formulas, charts, and text extraction in 109 languages.
- Minimum 9GB VRAM (int4 quantized)
- Recommended 18GB VRAM (full precision)
- NVIDIA Container Toolkit installed
### CPU Variant (`minicpm45v-cpu`) ### Quick Start
- Minimum 8GB RAM **GPU:**
- Recommended 16GB+ RAM for better performance ```bash
- No GPU required docker run -d \
--name paddleocr \
--gpus all \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl
```
## Model Information **CPU:**
```bash
docker run -d \
--name paddleocr \
-p 8000:8000 \
-v hf-cache:/root/.cache/huggingface \
code.foss.global/host.today/ht-docker-ai:paddleocr-vl-cpu
```
**MiniCPM-V 4.5** is a GPT-4o level multimodal large language model developed by OpenBMB. ### OpenAI-Compatible API
- **Parameters**: 8B (Qwen3-8B + SigLIP2-400M) PaddleOCR-VL exposes a fully OpenAI-compatible `/v1/chat/completions` endpoint:
- **Capabilities**: Image understanding, OCR, multi-image analysis
- **Languages**: 30+ languages including English, Chinese, French, Spanish
## Docker Compose Example ```bash
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "paddleocr-vl",
"messages": [{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "data:image/png;base64,<base64>"}},
{"type": "text", "text": "Table Recognition:"}
]
}],
"max_tokens": 8192
}'
```
### Task Prompts
| Prompt | Output | Use Case |
|--------|--------|----------|
| `OCR:` | Plain text | General text extraction |
| `Table Recognition:` | Markdown table | Invoices, bank statements, spreadsheets |
| `Formula Recognition:` | LaTeX | Math equations, scientific notation |
| `Chart Recognition:` | Description | Graphs and visualizations |
### API Endpoints
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check with model/device info |
| `/formats` | GET | Supported image formats and input methods |
| `/v1/models` | GET | List available models |
| `/v1/chat/completions` | POST | OpenAI-compatible chat completions |
| `/ocr` | POST | Legacy OCR endpoint |
### Image Input Methods
PaddleOCR-VL accepts images in multiple formats:
```javascript
// Base64 data URL
"data:image/png;base64,iVBORw0KGgo..."
// HTTP URL
"https://example.com/document.png"
// Raw base64
"iVBORw0KGgo..."
```
**Supported formats:** PNG, JPEG, WebP, BMP, GIF, TIFF
**Optimal resolution:** 1080p2K. Images are automatically scaled for best results.
### Performance
| Mode | Speed per Page |
|------|----------------|
| GPU (CUDA) | 25 seconds |
| CPU | 3060 seconds |
---
## 🐳 Docker Compose
```yaml ```yaml
version: '3.8' version: '3.8'
services: services:
# General vision tasks
minicpm: minicpm:
image: code.foss.global/host.today/ht-docker-ai:minicpm45v image: code.foss.global/host.today/ht-docker-ai:minicpm45v
container_name: minicpm
ports: ports:
- "11434:11434" - "11434:11434"
volumes: volumes:
@@ -124,11 +206,50 @@ services:
capabilities: [gpu] capabilities: [gpu]
restart: unless-stopped restart: unless-stopped
# Document parsing / OCR
paddleocr:
image: code.foss.global/host.today/ht-docker-ai:paddleocr-vl
ports:
- "8000:8000"
volumes:
- hf-cache:/root/.cache/huggingface
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
restart: unless-stopped
volumes: volumes:
ollama-data: ollama-data:
hf-cache:
``` ```
## Building Locally ---
## ⚙️ Environment Variables
### MiniCPM-V 4.5
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `minicpm-v` | Ollama model to pull on startup |
| `OLLAMA_HOST` | `0.0.0.0` | API bind address |
| `OLLAMA_ORIGINS` | `*` | Allowed CORS origins |
### PaddleOCR-VL
| Variable | Default | Description |
|----------|---------|-------------|
| `MODEL_NAME` | `PaddlePaddle/PaddleOCR-VL` | HuggingFace model ID |
| `SERVER_HOST` | `0.0.0.0` | API bind address |
| `SERVER_PORT` | `8000` | API port |
---
## 🔧 Building from Source
```bash ```bash
# Clone the repository # Clone the repository
@@ -142,6 +263,77 @@ cd ht-docker-ai
./test-images.sh ./test-images.sh
``` ```
## License ---
MIT - Task Venture Capital GmbH ## 🏗️ Architecture Notes
### Dual-VLM Consensus Strategy
For production document extraction, consider using both models together:
1. **Pass 1:** MiniCPM-V visual extraction (images → JSON)
2. **Pass 2:** PaddleOCR-VL table recognition (images → markdown → JSON)
3. **Consensus:** If results match → Done (fast path)
4. **Pass 3+:** Additional visual passes if needed
This dual-VLM approach catches extraction errors that single models miss.
### Why This Works
- **Different architectures:** Two independent models cross-validate each other
- **Specialized strengths:** PaddleOCR-VL excels at tables; MiniCPM-V handles general vision
- **Native processing:** Both VLMs see original images—no intermediate HTML/structure loss
---
## 🔍 Troubleshooting
### Model download hangs
```bash
docker logs -f <container-name>
```
Model downloads can take several minutes (~5GB for MiniCPM-V).
### Out of memory
- **GPU:** Use the CPU variant or upgrade VRAM
- **CPU:** Increase container memory: `--memory=16g`
### API not responding
1. Check container health: `docker ps`
2. Review logs: `docker logs <container>`
3. Verify port: `curl localhost:11434/api/tags` or `curl localhost:8000/health`
### Enable NVIDIA GPU support on host
```bash
# Install NVIDIA Container Toolkit
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker
```
---
## License and Legal Information
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
### Trademarks
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
### Company Information
Task Venture Capital GmbH
Registered at District Court Bremen HRB 35230 HB, Germany
For any legal inquiries or further information, please contact us via email at hello@task.vc.
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.

View File

@@ -1,129 +1,250 @@
# Bank Statement Parsing with MiniCPM-V 4.5 # Document Recognition with Hybrid OCR + Vision AI
Recipe for extracting transactions from bank statement PDFs using vision-language AI. Recipe for extracting structured data from invoices and documents using a hybrid approach:
PaddleOCR for text extraction + MiniCPM-V 4.5 for intelligent parsing.
## Model ## Architecture
- **Model**: MiniCPM-V 4.5 (8B parameters) ```
- **Ollama Name**: `openbmb/minicpm-v4.5:q8_0` ┌──────────────┐ ┌──────────────┐ ┌──────────────┐
- **Quantization**: Q8_0 (9.8GB VRAM) │ PDF/Image │ ───> │ PaddleOCR │ ───> │ Raw Text │
- **Runtime**: Ollama on GPU └──────────────┘ └──────────────┘ └──────┬───────┘
┌──────────────┐ │
│ MiniCPM-V │ <───────────┘
│ 4.5 VLM │ <─── Image
└──────┬───────┘
┌──────▼───────┐
│ Structured │
│ JSON │
└──────────────┘
```
## Why Hybrid?
| Approach | Accuracy | Speed | Best For |
|----------|----------|-------|----------|
| VLM Only | 85-90% | Fast | Simple layouts |
| OCR Only | N/A | Fast | Just text extraction |
| **Hybrid** | **91%+** | Medium | Complex invoices |
The hybrid approach provides OCR text as context to the VLM, improving accuracy on:
- Small text and numbers
- Low contrast documents
- Dense tables
## Services
| Service | Port | Purpose |
|---------|------|---------|
| PaddleOCR | 5000 | Text extraction |
| Ollama (MiniCPM-V) | 11434 | Intelligent parsing |
## Running the Containers
**Start both services:**
```bash
# PaddleOCR (CPU is sufficient for OCR)
docker run -d --name paddleocr -p 5000:5000 \
code.foss.global/host.today/ht-docker-ai:paddleocr-cpu
# MiniCPM-V 4.5 (GPU recommended)
docker run -d --name minicpm --gpus all -p 11434:11434 \
-v ollama-data:/root/.ollama \
code.foss.global/host.today/ht-docker-ai:minicpm45v
```
## Image Conversion ## Image Conversion
Convert PDF to PNG at 300 DPI for optimal OCR accuracy. Convert PDF to PNG at 200 DPI:
```bash ```bash
convert -density 300 -quality 100 input.pdf \ convert -density 200 -quality 90 input.pdf \
-background white -alpha remove \ -background white -alpha remove \
output-%d.png page-%d.png
``` ```
**Parameters:** ## Step 1: Extract OCR Text
- `-density 300`: 300 DPI resolution (critical for accuracy)
- `-quality 100`: Maximum quality
- `-background white -alpha remove`: Remove transparency
- `output-%d.png`: Outputs page-0.png, page-1.png, etc.
**Dependencies:** ```typescript
```bash async function extractOcrText(imageBase64: string): Promise<string> {
apt-get install imagemagick const response = await fetch('http://localhost:5000/ocr', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ image: imageBase64 }),
});
const data = await response.json();
if (data.success && data.results) {
return data.results.map((r: { text: string }) => r.text).join('\n');
}
return '';
}
``` ```
## Prompt ## Step 2: Build Enhanced Prompt
``` ```typescript
You are a bank statement parser. Extract EVERY transaction from the table. function buildPrompt(ocrText: string): string {
const base = `You are an invoice parser. Extract the following fields:
Read the Amount column carefully: 1. invoice_number: The invoice/receipt number
- "- 21,47 €" means DEBIT, output as: -21.47 2. invoice_date: Date in YYYY-MM-DD format
- "+ 1.000,00 €" means CREDIT, output as: 1000.00 3. vendor_name: Company that issued the invoice
- European format: comma = decimal point 4. currency: EUR, USD, etc.
5. net_amount: Amount before tax (if shown)
6. vat_amount: Tax/VAT amount (0 if reverse charge)
7. total_amount: Final amount due
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47} Return ONLY valid JSON:
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}`;
Do not skip any rows. Return complete JSON array: if (ocrText) {
return `${base}
OCR text extracted from the invoice:
---
${ocrText}
---
Cross-reference the image with the OCR text above for accuracy.`;
}
return base;
}
``` ```
## API Call ## Step 3: Call Vision-Language Model
```python ```typescript
import base64 async function extractInvoice(images: string[], ocrText: string): Promise<Invoice> {
import requests const payload = {
model: 'openbmb/minicpm-v4.5:q8_0',
prompt: buildPrompt(ocrText),
images, // Base64 encoded
stream: false,
options: {
num_predict: 2048,
temperature: 0.1,
},
};
# Load images const response = await fetch('http://localhost:11434/api/generate', {
with open('page-0.png', 'rb') as f: method: 'POST',
page0 = base64.b64encode(f.read()).decode('utf-8') headers: { 'Content-Type': 'application/json' },
with open('page-1.png', 'rb') as f: body: JSON.stringify(payload),
page1 = base64.b64encode(f.read()).decode('utf-8') });
payload = { const result = await response.json();
"model": "openbmb/minicpm-v4.5:q8_0", return JSON.parse(result.response);
"prompt": prompt, }
"images": [page0, page1], # Multiple pages supported ```
"stream": False,
"options": { ## Consensus Voting
"num_predict": 16384,
"temperature": 0.1 For production reliability, run multiple extraction passes and require consensus:
```typescript
async function extractWithConsensus(images: string[], maxPasses: number = 5): Promise<Invoice> {
const results: Map<string, { invoice: Invoice; count: number }> = new Map();
// Optimization: Run Pass 1 (no OCR) parallel with OCR + Pass 2
const [pass1Result, ocrText] = await Promise.all([
extractInvoice(images, ''),
extractOcrText(images[0]),
]);
// Add Pass 1 result
addResult(results, pass1Result);
// Pass 2 with OCR context
const pass2Result = await extractInvoice(images, ocrText);
addResult(results, pass2Result);
// Check for consensus (2 matching results)
for (const [hash, data] of results) {
if (data.count >= 2) {
return data.invoice; // Consensus reached!
} }
} }
response = requests.post( // Continue until consensus or max passes
'http://localhost:11434/api/generate', for (let pass = 3; pass <= maxPasses; pass++) {
json=payload, const result = await extractInvoice(images, ocrText);
timeout=600 addResult(results, result);
) // Check consensus...
}
result = response.json()['response'] // Return most common result
return getMostCommon(results);
}
function hashInvoice(inv: Invoice): string {
return `${inv.invoice_number}|${inv.invoice_date}|${inv.total_amount.toFixed(2)}`;
}
``` ```
## Output Format ## Output Format
```json ```json
[ {
{"date":"2022-04-01","counterparty":"DIGITALOCEAN.COM","amount":-21.47}, "invoice_number": "INV-2024-001234",
{"date":"2022-04-01","counterparty":"DIGITALOCEAN.COM","amount":-58.06}, "invoice_date": "2024-08-15",
{"date":"2022-04-12","counterparty":"LOSSLESS GMBH","amount":1000.00} "vendor_name": "Hetzner Online GmbH",
] "currency": "EUR",
"net_amount": 167.52,
"vat_amount": 31.83,
"total_amount": 199.35
}
``` ```
## Running the Container
**GPU (recommended):**
```bash
docker run -d --gpus all -p 11434:11434 \
-v ollama-data:/root/.ollama \
-e MODEL_NAME="openbmb/minicpm-v4.5:q8_0" \
ht-docker-ai:minicpm45v
```
**CPU (slower):**
```bash
docker run -d -p 11434:11434 \
-v ollama-data:/root/.ollama \
-e MODEL_NAME="openbmb/minicpm-v4.5:q4_0" \
ht-docker-ai:minicpm45v-cpu
```
## Hardware Requirements
| Quantization | VRAM/RAM | Speed |
|--------------|----------|-------|
| Q8_0 (GPU) | 10GB | Fast |
| Q4_0 (CPU) | 8GB | Slow |
## Test Results ## Test Results
| Statement | Pages | Transactions | Accuracy | Tested on 46 real invoices from various vendors:
|-----------|-------|--------------|----------|
| bunq-2022-04 | 2 | 26 | 100% | | Metric | Value |
| bunq-2021-06 | 3 | 28 | 100% | |--------|-------|
| **Accuracy** | 91.3% (42/46) |
| **Avg Time** | 42.7s per invoice |
| **Consensus Rate** | 85% in 2 passes |
### Per-Vendor Results
| Vendor | Invoices | Accuracy |
|--------|----------|----------|
| Hetzner | 3 | 100% |
| DigitalOcean | 4 | 100% |
| Adobe | 3 | 100% |
| Cloudflare | 1 | 100% |
| Wasabi | 4 | 100% |
| Figma | 3 | 100% |
| Google Cloud | 1 | 100% |
| MongoDB | 3 | 0% (date parsing) |
## Hardware Requirements
| Component | Minimum | Recommended |
|-----------|---------|-------------|
| PaddleOCR (CPU) | 4GB RAM | 8GB RAM |
| MiniCPM-V (GPU) | 10GB VRAM | 12GB VRAM |
| MiniCPM-V (CPU) | 16GB RAM | 32GB RAM |
## Tips ## Tips
1. **DPI matters**: 150 DPI causes missed rows; 300 DPI is optimal 1. **Use hybrid approach**: OCR text dramatically improves number/date accuracy
2. **PNG over JPEG**: PNG preserves text clarity better 2. **Consensus voting**: Run 2-5 passes to catch hallucinations
3. **Remove alpha**: Some models struggle with transparency 3. **200 DPI is optimal**: Higher doesn't help, lower loses detail
4. **Multi-page**: Pass all pages in single request for context 4. **PNG over JPEG**: Preserves text clarity
5. **Temperature 0.1**: Low temperature for consistent output 5. **Temperature 0.1**: Low temperature for consistent output
6. **European format**: Explicitly explain comma=decimal in prompt 6. **Multi-page support**: Pass all pages in single request for context
7. **Normalize for comparison**: Ignore case/whitespace when comparing invoice numbers
## Common Issues
| Issue | Cause | Solution |
|-------|-------|----------|
| Wrong date | Multiple dates on invoice | Be specific in prompt about which date |
| Wrong currency | Symbol vs code mismatch | OCR helps disambiguate |
| Missing digits | Low resolution | Increase density to 300 DPI |
| Hallucinated data | VLM uncertainty | Use consensus voting |

385
test/helpers/docker.ts Normal file
View File

@@ -0,0 +1,385 @@
import { execSync } from 'child_process';
// Project container names (only manage these)
const PROJECT_CONTAINERS = [
'paddleocr-vl-test',
'paddleocr-vl-gpu-test',
'paddleocr-vl-cpu-test',
'paddleocr-vl-full-test',
'minicpm-test',
];
// Image configurations
export interface IImageConfig {
name: string;
dockerfile: string;
buildContext: string;
containerName: string;
ports: string[];
volumes?: string[];
gpus?: boolean;
healthEndpoint?: string;
healthTimeout?: number;
}
export const IMAGES = {
paddleocrVlGpu: {
name: 'paddleocr-vl-gpu',
dockerfile: 'Dockerfile_paddleocr_vl_gpu',
buildContext: '.',
containerName: 'paddleocr-vl-test',
ports: ['8000:8000'],
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
gpus: true,
healthEndpoint: 'http://localhost:8000/health',
healthTimeout: 300000, // 5 minutes for model loading
} as IImageConfig,
paddleocrVlCpu: {
name: 'paddleocr-vl-cpu',
dockerfile: 'Dockerfile_paddleocr_vl_cpu',
buildContext: '.',
containerName: 'paddleocr-vl-test',
ports: ['8000:8000'],
volumes: ['ht-huggingface-cache:/root/.cache/huggingface'],
gpus: false,
healthEndpoint: 'http://localhost:8000/health',
healthTimeout: 300000,
} as IImageConfig,
minicpm: {
name: 'minicpm45v',
dockerfile: 'Dockerfile_minicpm45v_gpu',
buildContext: '.',
containerName: 'minicpm-test',
ports: ['11434:11434'],
volumes: ['ht-ollama-models:/root/.ollama'],
gpus: true,
healthEndpoint: 'http://localhost:11434/api/tags',
healthTimeout: 120000,
} as IImageConfig,
// Full PaddleOCR-VL pipeline with PP-DocLayoutV2 + structured JSON output
paddleocrVlFull: {
name: 'paddleocr-vl-full',
dockerfile: 'Dockerfile_paddleocr_vl_full',
buildContext: '.',
containerName: 'paddleocr-vl-full-test',
ports: ['8000:8000'],
volumes: [
'ht-huggingface-cache:/root/.cache/huggingface',
'ht-paddleocr-cache:/root/.paddleocr',
],
gpus: true,
healthEndpoint: 'http://localhost:8000/health',
healthTimeout: 600000, // 10 minutes for model loading (vLLM + PP-DocLayoutV2)
} as IImageConfig,
};
/**
* Execute a shell command and return output
*/
function exec(command: string, silent = false): string {
try {
return execSync(command, {
encoding: 'utf-8',
stdio: silent ? 'pipe' : 'inherit',
});
} catch (err: unknown) {
if (silent) return '';
throw err;
}
}
/**
* Check if a Docker image exists locally
*/
export function imageExists(imageName: string): boolean {
const result = exec(`docker images -q ${imageName}`, true);
return result.trim().length > 0;
}
/**
* Check if a container is running
*/
export function isContainerRunning(containerName: string): boolean {
const result = exec(`docker ps --filter "name=^${containerName}$" --format "{{.Names}}"`, true);
return result.trim() === containerName;
}
/**
* Check if a container exists (running or stopped)
*/
export function containerExists(containerName: string): boolean {
const result = exec(`docker ps -a --filter "name=^${containerName}$" --format "{{.Names}}"`, true);
return result.trim() === containerName;
}
/**
* Stop and remove a container
*/
export function removeContainer(containerName: string): void {
if (containerExists(containerName)) {
console.log(`[Docker] Removing container: ${containerName}`);
exec(`docker rm -f ${containerName}`, true);
}
}
/**
* Stop all project containers that conflict with the required one
*/
export function stopConflictingContainers(requiredContainer: string, requiredPort: string): void {
// Stop project containers using the same port
for (const container of PROJECT_CONTAINERS) {
if (container === requiredContainer) continue;
if (isContainerRunning(container)) {
// Check if this container uses the same port
const ports = exec(`docker port ${container} 2>/dev/null || true`, true);
if (ports.includes(requiredPort.split(':')[0])) {
console.log(`[Docker] Stopping conflicting container: ${container}`);
exec(`docker stop ${container}`, true);
}
}
}
}
/**
* Build a Docker image
*/
export function buildImage(config: IImageConfig): void {
console.log(`[Docker] Building image: ${config.name}`);
const cmd = `docker build --load -f ${config.dockerfile} -t ${config.name} ${config.buildContext}`;
exec(cmd);
}
/**
* Start a container from an image
*/
export function startContainer(config: IImageConfig): void {
// Remove existing container if it exists
removeContainer(config.containerName);
console.log(`[Docker] Starting container: ${config.containerName}`);
const portArgs = config.ports.map((p) => `-p ${p}`).join(' ');
const volumeArgs = config.volumes?.map((v) => `-v ${v}`).join(' ') || '';
const gpuArgs = config.gpus ? '--gpus all' : '';
const cmd = `docker run -d --name ${config.containerName} ${gpuArgs} ${portArgs} ${volumeArgs} ${config.name}`;
exec(cmd);
}
/**
* Wait for a container to become healthy
*/
export async function waitForHealth(
endpoint: string,
timeoutMs: number = 120000,
intervalMs: number = 5000
): Promise<boolean> {
const startTime = Date.now();
console.log(`[Docker] Waiting for health: ${endpoint}`);
while (Date.now() - startTime < timeoutMs) {
try {
const response = await fetch(endpoint, {
method: 'GET',
signal: AbortSignal.timeout(5000),
});
if (response.ok) {
console.log(`[Docker] Service healthy!`);
return true;
}
} catch {
// Service not ready yet
}
const elapsed = Math.round((Date.now() - startTime) / 1000);
console.log(`[Docker] Waiting... (${elapsed}s)`);
await new Promise((resolve) => setTimeout(resolve, intervalMs));
}
console.log(`[Docker] Health check timeout after ${timeoutMs / 1000}s`);
return false;
}
/**
* Ensure a service is running and healthy
* - Builds image if missing
* - Stops conflicting project containers
* - Starts container if not running
* - Waits for health check
*/
export async function ensureService(config: IImageConfig): Promise<boolean> {
console.log(`\n[Docker] Ensuring service: ${config.name}`);
// Build image if it doesn't exist
if (!imageExists(config.name)) {
console.log(`[Docker] Image not found, building...`);
buildImage(config);
}
// Stop conflicting containers on the same port
const mainPort = config.ports[0];
stopConflictingContainers(config.containerName, mainPort);
// Start container if not running
if (!isContainerRunning(config.containerName)) {
startContainer(config);
} else {
console.log(`[Docker] Container already running: ${config.containerName}`);
}
// Wait for health
if (config.healthEndpoint) {
return waitForHealth(config.healthEndpoint, config.healthTimeout);
}
return true;
}
/**
* Ensure PaddleOCR-VL GPU service is running
*/
export async function ensurePaddleOcrVlGpu(): Promise<boolean> {
return ensureService(IMAGES.paddleocrVlGpu);
}
/**
* Ensure PaddleOCR-VL CPU service is running
*/
export async function ensurePaddleOcrVlCpu(): Promise<boolean> {
return ensureService(IMAGES.paddleocrVlCpu);
}
/**
* Ensure MiniCPM service is running
*/
export async function ensureMiniCpm(): Promise<boolean> {
return ensureService(IMAGES.minicpm);
}
/**
* Check if GPU is available
*/
export function isGpuAvailable(): boolean {
try {
const result = exec('nvidia-smi --query-gpu=name --format=csv,noheader 2>/dev/null', true);
return result.trim().length > 0;
} catch {
return false;
}
}
/**
* Ensure PaddleOCR-VL service (auto-detect GPU/CPU)
*/
export async function ensurePaddleOcrVl(): Promise<boolean> {
if (isGpuAvailable()) {
console.log('[Docker] GPU detected, using GPU image');
return ensurePaddleOcrVlGpu();
} else {
console.log('[Docker] No GPU detected, using CPU image');
return ensurePaddleOcrVlCpu();
}
}
/**
* Ensure PaddleOCR-VL Full Pipeline service (PP-DocLayoutV2 + structured output)
* This is the recommended service for production use - outputs structured JSON/Markdown
*/
export async function ensurePaddleOcrVlFull(): Promise<boolean> {
if (!isGpuAvailable()) {
console.log('[Docker] WARNING: Full pipeline requires GPU, but none detected');
}
return ensureService(IMAGES.paddleocrVlFull);
}
/**
* Ensure an Ollama model is pulled and available
* Uses the MiniCPM container (which runs Ollama) to pull the model
*/
export async function ensureOllamaModel(modelName: string): Promise<boolean> {
const OLLAMA_URL = 'http://localhost:11434';
console.log(`\n[Ollama] Ensuring model: ${modelName}`);
// Check if model exists
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
// Exact match required - don't match on prefix
const exists = models.some((m: { name: string }) => m.name === modelName);
if (exists) {
console.log(`[Ollama] Model already available: ${modelName}`);
return true;
}
}
} catch {
console.log(`[Ollama] Cannot check models, Ollama may not be running`);
return false;
}
// Pull the model
console.log(`[Ollama] Pulling model: ${modelName} (this may take a while)...`);
try {
const response = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: modelName, stream: false }),
});
if (response.ok) {
console.log(`[Ollama] Model pulled successfully: ${modelName}`);
return true;
} else {
console.log(`[Ollama] Failed to pull model: ${response.status}`);
return false;
}
} catch (err) {
console.log(`[Ollama] Error pulling model: ${err}`);
return false;
}
}
/**
* Ensure Qwen2.5 7B model is available (for text-only JSON extraction)
*/
export async function ensureQwen25(): Promise<boolean> {
// First ensure the Ollama service (MiniCPM container) is running
const ollamaOk = await ensureMiniCpm();
if (!ollamaOk) return false;
// Then ensure the Qwen2.5 model is pulled
return ensureOllamaModel('qwen2.5:7b');
}
/**
* Ensure Ministral 3 8B model is available (for structured JSON extraction)
* Ministral 3 has native JSON output support and OCR-style document extraction
*/
export async function ensureMinistral3(): Promise<boolean> {
// First ensure the Ollama service (MiniCPM container) is running
const ollamaOk = await ensureMiniCpm();
if (!ollamaOk) return false;
// Then ensure the Ministral 3 8B model is pulled
return ensureOllamaModel('ministral-3:8b');
}
/**
* Ensure Qwen3-VL 8B model is available (vision-language model)
* Q4_K_M quantization (~5GB) - fits in 15GB VRAM with room to spare
*/
export async function ensureQwen3Vl(): Promise<boolean> {
// First ensure the Ollama service is running
const ollamaOk = await ensureMiniCpm();
if (!ollamaOk) return false;
// Then ensure Qwen3-VL 8B is pulled
return ensureOllamaModel('qwen3-vl:8b');
}

View File

@@ -0,0 +1,536 @@
/**
* Bank statement extraction using MiniCPM-V (visual extraction)
*
* JSON per-page approach:
* 1. Ask for structured JSON of all transactions per page
* 2. Consensus: extract twice, compare, retry if mismatch
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
interface ITransaction {
date: string;
counterparty: string;
amount: number;
}
const JSON_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
IMPORTANT RULES:
1. Each transaction has: date, description/counterparty, and an amount
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
4. Date format: YYYY-MM-DD
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
6. Only include actual transactions with a specific date and amount
Return ONLY this JSON format, no explanation:
[
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
]`;
/**
* Convert PDF to PNG images using ImageMagick
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Query for JSON extraction
*/
async function queryJson(image: string, queryId: string): Promise<string> {
console.log(` [${queryId}] Sending request to ${MODEL}...`);
const startTime = Date.now();
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: JSON_PROMPT,
images: [image],
}],
stream: false,
options: {
num_predict: 4000,
temperature: 0.1,
},
}),
});
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
if (!response.ok) {
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
const content = (data.message?.content || '').trim();
console.log(` [${queryId}] Response received (${elapsed}s, ${content.length} chars)`);
return content;
}
/**
* Sanitize JSON string - fix common issues from vision model output
*/
function sanitizeJson(jsonStr: string): string {
let s = jsonStr;
// Fix +number (e.g., +93.80 -> 93.80) - JSON doesn't allow + prefix
// Handle various whitespace patterns
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
s = s.replace(/:\s*\+(\d)/g, ': $1');
// Fix European number format with thousands separator (e.g., 1.000.00 -> 1000.00)
// Pattern: "amount": X.XXX.XX where X.XXX is thousands and .XX is decimal
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
// Also handle larger numbers like 10.000.00
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3$4.$5');
// Fix trailing commas before ] or }
s = s.replace(/,\s*([}\]])/g, '$1');
// Fix unescaped newlines inside strings (replace with space)
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
// Fix unescaped tabs inside strings
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
// Fix unescaped backslashes (but not already escaped ones)
s = s.replace(/\\(?!["\\/bfnrtu])/g, '\\\\');
// Fix common issues with counterparty names containing special chars
s = s.replace(/"counterparty":\s*"([^"]*)'([^"]*)"/g, '"counterparty": "$1$2"');
// Remove control characters except newlines (which we handle above)
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
return s;
}
/**
* Parse JSON response into transactions
*/
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
console.log(` [${queryId}] Parsing response...`);
// Try to find JSON in markdown code block
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
if (codeBlockMatch) {
console.log(` [${queryId}] Found JSON in code block`);
}
// Sanitize JSON (fix +number issue)
jsonStr = sanitizeJson(jsonStr);
try {
const parsed = JSON.parse(jsonStr);
if (Array.isArray(parsed)) {
const txs = parsed.map(tx => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
console.log(` [${queryId}] Parsed ${txs.length} transactions (direct)`);
return txs;
}
console.log(` [${queryId}] Parsed JSON is not an array`);
} catch (e) {
const errMsg = (e as Error).message;
console.log(` [${queryId}] Direct parse failed: ${errMsg}`);
// Log problematic section with context
const posMatch = errMsg.match(/position (\d+)/);
if (posMatch) {
const pos = parseInt(posMatch[1]);
const start = Math.max(0, pos - 40);
const end = Math.min(jsonStr.length, pos + 40);
const context = jsonStr.substring(start, end);
const marker = ' '.repeat(pos - start) + '^';
console.log(` [${queryId}] Context around error position ${pos}:`);
console.log(` [${queryId}] ...${context}...`);
console.log(` [${queryId}] ${marker}`);
}
// Try to find JSON array pattern
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
if (arrayMatch) {
console.log(` [${queryId}] Found array pattern, trying to parse...`);
const sanitizedArray = sanitizeJson(arrayMatch[0]);
try {
const parsed = JSON.parse(sanitizedArray);
if (Array.isArray(parsed)) {
const txs = parsed.map(tx => ({
date: String(tx.date || ''),
counterparty: String(tx.counterparty || tx.description || ''),
amount: parseAmount(tx.amount),
}));
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
return txs;
}
} catch (e2) {
const errMsg2 = (e2 as Error).message;
console.log(` [${queryId}] Array parse failed: ${errMsg2}`);
const posMatch2 = errMsg2.match(/position (\d+)/);
if (posMatch2) {
const pos2 = parseInt(posMatch2[1]);
console.log(` [${queryId}] Context around error: ...${sanitizedArray.substring(Math.max(0, pos2 - 30), pos2 + 30)}...`);
}
// Try to extract individual objects from the malformed array
console.log(` [${queryId}] Attempting object-by-object extraction...`);
const extracted = extractTransactionsFromMalformedJson(sanitizedArray, queryId);
if (extracted.length > 0) {
console.log(` [${queryId}] Recovered ${extracted.length} transactions via object extraction`);
return extracted;
}
}
} else {
console.log(` [${queryId}] No array pattern found in response`);
console.log(` [${queryId}] Raw response preview: ${response.substring(0, 200)}...`);
}
}
console.log(` [${queryId}] PARSE FAILED - returning empty array`);
return [];
}
/**
* Extract transactions from malformed JSON by parsing objects individually
*/
function extractTransactionsFromMalformedJson(jsonStr: string, queryId: string): ITransaction[] {
const transactions: ITransaction[] = [];
// Match individual transaction objects
const objectPattern = /\{\s*"date"\s*:\s*"([^"]+)"\s*,\s*"counterparty"\s*:\s*"([^"]+)"\s*,\s*"amount"\s*:\s*([+-]?\d+\.?\d*)\s*\}/g;
let match;
while ((match = objectPattern.exec(jsonStr)) !== null) {
transactions.push({
date: match[1],
counterparty: match[2],
amount: parseFloat(match[3]),
});
}
// Also try with different field orders (amount before counterparty, etc.)
if (transactions.length === 0) {
const altPattern = /\{\s*"date"\s*:\s*"([^"]+)"[^}]*"amount"\s*:\s*([+-]?\d+\.?\d*)[^}]*\}/g;
while ((match = altPattern.exec(jsonStr)) !== null) {
// Try to extract counterparty from the match
const counterpartyMatch = match[0].match(/"counterparty"\s*:\s*"([^"]+)"/);
const descMatch = match[0].match(/"description"\s*:\s*"([^"]+)"/);
transactions.push({
date: match[1],
counterparty: counterpartyMatch?.[1] || descMatch?.[1] || 'UNKNOWN',
amount: parseFloat(match[2]),
});
}
}
return transactions;
}
/**
* Parse amount from various formats
*/
function parseAmount(value: unknown): number {
if (typeof value === 'number') return value;
if (typeof value !== 'string') return 0;
let s = value.replace(/[€$£\s]/g, '').replace('', '-').replace('', '-');
// European format: comma is decimal
if (s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')) {
s = s.replace(/\./g, '').replace(',', '.');
} else {
s = s.replace(/,/g, '');
}
return parseFloat(s) || 0;
}
/**
* Compare two transaction arrays for consensus
*/
function transactionArraysMatch(a: ITransaction[], b: ITransaction[]): boolean {
if (a.length !== b.length) return false;
for (let i = 0; i < a.length; i++) {
const dateMatch = a[i].date === b[i].date;
const amountMatch = Math.abs(a[i].amount - b[i].amount) < 0.01;
if (!dateMatch || !amountMatch) return false;
}
return true;
}
/**
* Compare two transaction arrays and log differences
*/
function compareAndLogDifferences(txs1: ITransaction[], txs2: ITransaction[], pageNum: number): void {
if (txs1.length !== txs2.length) {
console.log(` [Page ${pageNum}] Length mismatch: Q1=${txs1.length}, Q2=${txs2.length}`);
return;
}
for (let i = 0; i < txs1.length; i++) {
const dateMatch = txs1[i].date === txs2[i].date;
const amountMatch = Math.abs(txs1[i].amount - txs2[i].amount) < 0.01;
if (!dateMatch || !amountMatch) {
console.log(` [Page ${pageNum}] Tx ${i + 1} differs:`);
console.log(` Q1: ${txs1[i].date} | ${txs1[i].amount}`);
console.log(` Q2: ${txs2[i].date} | ${txs2[i].amount}`);
}
}
}
/**
* Extract transactions from a single page with consensus
*/
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
const MAX_ATTEMPTS = 5;
console.log(`\n ======== Page ${pageNum} ========`);
console.log(` [Page ${pageNum}] Starting JSON extraction...`);
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
console.log(`\n [Page ${pageNum}] --- Attempt ${attempt}/${MAX_ATTEMPTS} ---`);
// Extract twice in parallel
const q1Id = `P${pageNum}A${attempt}Q1`;
const q2Id = `P${pageNum}A${attempt}Q2`;
const [response1, response2] = await Promise.all([
queryJson(image, q1Id),
queryJson(image, q2Id),
]);
const txs1 = parseJsonResponse(response1, q1Id);
const txs2 = parseJsonResponse(response2, q2Id);
console.log(` [Page ${pageNum}] Results: Q1=${txs1.length} txs, Q2=${txs2.length} txs`);
if (txs1.length > 0 && transactionArraysMatch(txs1, txs2)) {
console.log(` [Page ${pageNum}] ✓ CONSENSUS REACHED: ${txs1.length} transactions`);
console.log(` [Page ${pageNum}] Transactions:`);
for (let i = 0; i < txs1.length; i++) {
const tx = txs1[i];
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
}
return txs1;
}
console.log(` [Page ${pageNum}] ✗ NO CONSENSUS`);
compareAndLogDifferences(txs1, txs2, pageNum);
if (attempt < MAX_ATTEMPTS) {
console.log(` [Page ${pageNum}] Retrying...`);
}
}
// Fallback: use last response
console.log(`\n [Page ${pageNum}] === FALLBACK (no consensus after ${MAX_ATTEMPTS} attempts) ===`);
const fallbackId = `P${pageNum}FALLBACK`;
const fallbackResponse = await queryJson(image, fallbackId);
const fallback = parseJsonResponse(fallbackResponse, fallbackId);
console.log(` [Page ${pageNum}] ~ FALLBACK RESULT: ${fallback.length} transactions`);
for (let i = 0; i < fallback.length; i++) {
const tx = fallback[i];
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
}
return fallback;
}
/**
* Extract all transactions from bank statement
*/
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (JSON consensus)`);
const allTransactions: ITransaction[] = [];
for (let i = 0; i < images.length; i++) {
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
allTransactions.push(...pageTransactions);
}
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
return allTransactions;
}
/**
* Compare extracted transactions against expected
*/
function compareTransactions(
extracted: ITransaction[],
expected: ITransaction[]
): { matches: number; total: number; errors: string[]; variations: string[] } {
const errors: string[] = [];
const variations: string[] = [];
let matches = 0;
for (let i = 0; i < expected.length; i++) {
const exp = expected[i];
const ext = extracted[i];
if (!ext) {
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
continue;
}
const dateMatch = ext.date === exp.date;
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
if (dateMatch && amountMatch) {
matches++;
// Track counterparty variations (date and amount match but name differs)
if (ext.counterparty !== exp.counterparty) {
variations.push(
`[${i}] "${exp.counterparty}" → "${ext.counterparty}"`
);
}
} else {
errors.push(
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
);
}
}
if (extracted.length > expected.length) {
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
}
return { matches, total: expected.length, errors, variations };
}
/**
* Find all test cases (PDF + JSON pairs) in .nogit/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit');
if (!fs.existsSync(testDir)) {
return [];
}
const files = fs.readdirSync(testDir);
const pdfFiles = files.filter((f: string) => f.endsWith('.pdf'));
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of pdfFiles) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases.sort((a, b) => a.name.localeCompare(b.name));
}
// Tests
tap.test('setup: ensure Docker containers are running', async () => {
console.log('\n[Setup] Checking Docker containers...\n');
const minicpmOk = await ensureMiniCpm();
expect(minicpmOk).toBeTrue();
console.log('\n[Setup] All containers ready!\n');
});
tap.test('should have MiniCPM-V model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
const modelNames = data.models.map((m: { name: string }) => m.name);
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V)\n`);
let passedCount = 0;
let failedCount = 0;
for (const testCase of testCases) {
tap.test(`should extract: ${testCase.name}`, async () => {
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.length} transactions`);
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
const extracted = await extractTransactions(images);
console.log(` Extracted: ${extracted.length} transactions`);
const result = compareTransactions(extracted, expected);
const perfectMatch = result.matches === result.total && extracted.length === expected.length;
if (perfectMatch) {
passedCount++;
console.log(` Result: PASS (${result.matches}/${result.total})`);
} else {
failedCount++;
console.log(` Result: FAIL (${result.matches}/${result.total})`);
result.errors.slice(0, 10).forEach((e) => console.log(` - ${e}`));
}
// Log counterparty variations (names that differ but date/amount matched)
if (result.variations.length > 0) {
console.log(` Counterparty variations (${result.variations.length}):`);
result.variations.forEach((v) => console.log(` ${v}`));
}
expect(result.matches).toEqual(result.total);
expect(extracted.length).toEqual(expected.length);
});
}
tap.test('summary', async () => {
const total = testCases.length;
console.log(`\n======================================================`);
console.log(` Bank Statement Summary (${MODEL})`);
console.log(`======================================================`);
console.log(` Method: JSON per-page + consensus`);
console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`);
console.log(`======================================================\n`);
});
export default tap.start();

View File

@@ -0,0 +1,345 @@
/**
* Bank statement extraction using Qwen3-VL 8B Vision (Direct)
*
* Multi-query approach:
* 1. First ask how many transactions on each page
* 2. Then query each transaction individually
* Single pass, no consensus voting.
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const VISION_MODEL = 'qwen3-vl:8b';
interface ITransaction {
date: string;
counterparty: string;
amount: number;
}
/**
* Convert PDF to PNG images
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Query Qwen3-VL with a simple prompt
*/
async function queryVision(image: string, prompt: string): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: VISION_MODEL,
messages: [{
role: 'user',
content: prompt,
images: [image],
}],
stream: false,
options: {
num_predict: 500,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return (data.message?.content || '').trim();
}
/**
* Count transactions on a page
*/
async function countTransactions(image: string, pageNum: number): Promise<number> {
const response = await queryVision(image,
`How many transaction rows are in this bank statement table?
Count only the data rows (with dates like "01.01.2024" and amounts like "- 50,00 €").
Do NOT count the header row or summary/total rows.
Answer with just the number, for example: 7`
);
console.log(` [Page ${pageNum}] Count query response: "${response}"`);
const match = response.match(/(\d+)/);
const count = match ? parseInt(match[1], 10) : 0;
console.log(` [Page ${pageNum}] Parsed count: ${count}`);
return count;
}
/**
* Get a single transaction by index (logs immediately when complete)
*/
async function getTransaction(image: string, index: number, pageNum: number): Promise<ITransaction | null> {
const response = await queryVision(image,
`This is a bank statement. Look at transaction row #${index} in the table (counting from top, excluding headers).
Extract this transaction's details:
- Date in YYYY-MM-DD format
- Counterparty/description name
- Amount as number (negative for debits like "- 21,47 €" = -21.47, positive for credits like "+ 100,00 €" = 100.00)
Answer in format: DATE|COUNTERPARTY|AMOUNT
Example: 2024-01-15|Amazon|25.99`
);
// Parse the response
const lines = response.split('\n').filter(l => l.includes('|'));
const line = lines[lines.length - 1] || response;
const parts = line.split('|').map(p => p.trim());
if (parts.length >= 3) {
// Parse amount - handle various formats
let amountStr = parts[2].replace(/[€$£\s]/g, '').replace('', '-').replace('', '-');
// European format: comma is decimal
if (amountStr.includes(',')) {
amountStr = amountStr.replace(/\./g, '').replace(',', '.');
}
const amount = parseFloat(amountStr) || 0;
const tx = {
date: parts[0],
counterparty: parts[1],
amount: amount,
};
// Log immediately as this transaction completes
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] ${tx.date} | ${tx.counterparty.substring(0, 25).padEnd(25)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
return tx;
}
// Log raw response on parse failure
console.log(` [P${pageNum} Tx${index.toString().padStart(2, ' ')}] PARSE FAILED: "${response.replace(/\n/g, ' ').substring(0, 60)}..."`);
return null;
}
/**
* Extract transactions from a single page using multi-query approach
*/
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
// Step 1: Count transactions
const count = await countTransactions(image, pageNum);
if (count === 0) {
return [];
}
// Step 2: Query each transaction (in batches to avoid overwhelming)
// Each transaction logs itself as it completes
const transactions: ITransaction[] = [];
const batchSize = 5;
for (let start = 1; start <= count; start += batchSize) {
const end = Math.min(start + batchSize - 1, count);
const indices = Array.from({ length: end - start + 1 }, (_, i) => start + i);
// Query batch in parallel - each logs as it completes
const results = await Promise.all(
indices.map(i => getTransaction(image, i, pageNum))
);
for (const tx of results) {
if (tx) {
transactions.push(tx);
}
}
}
console.log(` [Page ${pageNum}] Complete: ${transactions.length}/${count} extracted`);
return transactions;
}
/**
* Extract all transactions from bank statement
*/
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
const allTransactions: ITransaction[] = [];
for (let i = 0; i < images.length; i++) {
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
allTransactions.push(...pageTransactions);
}
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
return allTransactions;
}
/**
* Compare transactions
*/
function compareTransactions(
extracted: ITransaction[],
expected: ITransaction[]
): { matches: number; total: number; errors: string[] } {
const errors: string[] = [];
let matches = 0;
for (let i = 0; i < expected.length; i++) {
const exp = expected[i];
const ext = extracted[i];
if (!ext) {
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
continue;
}
const dateMatch = ext.date === exp.date;
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
if (dateMatch && amountMatch) {
matches++;
} else {
errors.push(`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
}
}
if (extracted.length > expected.length) {
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
}
return { matches, total: expected.length, errors };
}
/**
* Find test cases in .nogit/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit');
if (!fs.existsSync(testDir)) return [];
const files = fs.readdirSync(testDir);
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases.sort((a, b) => a.name.localeCompare(b.name));
}
/**
* Ensure Qwen3-VL model is available
*/
async function ensureQwen3Vl(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
console.log(`[Ollama] Model available: ${VISION_MODEL}`);
return true;
}
}
} catch {
return false;
}
console.log(`[Ollama] Pulling ${VISION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
});
return pullResponse.ok;
}
// Tests
tap.test('setup: ensure Qwen3-VL is running', async () => {
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
const visionOk = await ensureQwen3Vl();
expect(visionOk).toBeTrue();
console.log('\n[Setup] Ready!\n');
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} bank statement test cases (Qwen3-VL)\n`);
let passedCount = 0;
let failedCount = 0;
for (const testCase of testCases) {
tap.test(`should extract: ${testCase.name}`, async () => {
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.length} transactions`);
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
const extracted = await extractTransactions(images);
console.log(` Extracted: ${extracted.length} transactions`);
const result = compareTransactions(extracted, expected);
const accuracy = result.total > 0 ? result.matches / result.total : 0;
if (accuracy >= 0.95 && extracted.length === expected.length) {
passedCount++;
console.log(` Result: PASS (${result.matches}/${result.total})`);
} else {
failedCount++;
console.log(` Result: FAIL (${result.matches}/${result.total})`);
result.errors.slice(0, 5).forEach((e) => console.log(` - ${e}`));
}
expect(accuracy).toBeGreaterThan(0.95);
expect(extracted.length).toEqual(expected.length);
});
}
tap.test('summary', async () => {
const total = testCases.length;
console.log(`\n======================================================`);
console.log(` Bank Statement Summary (Qwen3-VL Vision)`);
console.log(`======================================================`);
console.log(` Method: Multi-query (count then extract each)`);
console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`);
console.log(`======================================================\n`);
});
export default tap.start();

View File

@@ -0,0 +1,477 @@
/**
* Invoice extraction test using MiniCPM-V (visual extraction)
*
* Consensus approach:
* 1. Pass 1: Fast JSON extraction
* 2. Pass 2: Confirm with thinking enabled
* 3. If mismatch: repeat until consensus or max attempts
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
interface IInvoice {
invoice_number: string;
invoice_date: string;
vendor_name: string;
currency: string;
net_amount: number;
vat_amount: number;
total_amount: number;
}
/**
* Convert PDF to PNG images using ImageMagick
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 300 -quality 95 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON object with these exact fields:
{
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
"invoice_date": "YYYY-MM-DD format",
"vendor_name": "company that issued the invoice",
"currency": "EUR, USD, or GBP",
"net_amount": 0.00,
"vat_amount": 0.00,
"total_amount": 0.00
}
Return only the JSON, no explanation.`;
/**
* Query MiniCPM-V for JSON output (fast, no thinking)
*/
async function queryJsonFast(images: string[]): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: JSON_PROMPT,
images: images,
}],
stream: false,
options: {
num_predict: 1000,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return (data.message?.content || '').trim();
}
/**
* Query MiniCPM-V for JSON output with thinking enabled (slower, more accurate)
*/
async function queryJsonWithThinking(images: string[]): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: MODEL,
messages: [{
role: 'user',
content: `Think carefully about this invoice image, then ${JSON_PROMPT}`,
images: images,
}],
stream: false,
options: {
num_predict: 2000,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return (data.message?.content || '').trim();
}
/**
* Parse amount from string (handles European format)
*/
function parseAmount(s: string | number | undefined): number {
if (s === undefined || s === null) return 0;
if (typeof s === 'number') return s;
const match = s.match(/([\d.,]+)/);
if (!match) return 0;
const numStr = match[1];
// Handle European format: 1.234,56 → 1234.56
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
? numStr.replace(/\./g, '').replace(',', '.')
: numStr.replace(/,/g, '');
return parseFloat(normalized) || 0;
}
/**
* Extract invoice number from potentially verbose response
*/
function extractInvoiceNumber(s: string | undefined): string {
if (!s) return '';
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
const patterns = [
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
/\b([A-Z]\d{8,})\b/i, // R0014359508
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
/\b(\d{7,})\b/, // 1579087430
];
for (const pattern of patterns) {
const match = clean.match(pattern);
if (match) return match[1];
}
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
}
/**
* Extract date (YYYY-MM-DD) from response
*/
function extractDate(s: string | undefined): string {
if (!s) return '';
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
if (isoMatch) return isoMatch[1];
// Try DD/MM/YYYY or DD.MM.YYYY
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
if (dmyMatch) {
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
}
return clean.replace(/[^\d-]/g, '').trim();
}
/**
* Extract currency
*/
function extractCurrency(s: string | undefined): string {
if (!s) return 'EUR';
const upper = s.toUpperCase();
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
if (upper.includes('USD') || upper.includes('$')) return 'USD';
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
return 'EUR';
}
/**
* Extract JSON from response (handles markdown code blocks)
*/
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
// Try to find JSON in markdown code block
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
try {
return JSON.parse(jsonStr);
} catch {
// Try to find JSON object pattern
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch {
return null;
}
}
return null;
}
}
/**
* Parse JSON response into IInvoice
*/
function parseJsonToInvoice(response: string): IInvoice | null {
const parsed = extractJsonFromResponse(response);
if (!parsed) return null;
return {
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
invoice_date: extractDate(String(parsed.invoice_date || '')),
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
currency: extractCurrency(String(parsed.currency || '')),
net_amount: parseAmount(parsed.net_amount as string | number),
vat_amount: parseAmount(parsed.vat_amount as string | number),
total_amount: parseAmount(parsed.total_amount as string | number),
};
}
/**
* Compare two invoices for consensus (key fields must match)
*/
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
const dateMatch = a.invoice_date === b.invoice_date;
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
return numMatch && dateMatch && totalMatch;
}
/**
* Extract invoice data using consensus approach:
* 1. Pass 1: Fast JSON extraction
* 2. Pass 2: Confirm with thinking enabled
* 3. If mismatch: repeat until consensus or max 5 attempts
*/
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (consensus)`);
const MAX_ATTEMPTS = 5;
let attempt = 0;
while (attempt < MAX_ATTEMPTS) {
attempt++;
console.log(` [Attempt ${attempt}/${MAX_ATTEMPTS}]`);
// PASS 1: Fast JSON extraction
console.log(` [Pass 1] Fast extraction...`);
const fastResponse = await queryJsonFast(images);
const fastInvoice = parseJsonToInvoice(fastResponse);
if (!fastInvoice) {
console.log(` [Pass 1] JSON parsing failed, retrying...`);
continue;
}
console.log(` [Pass 1] Result: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount} ${fastInvoice.currency}`);
// PASS 2: Confirm with thinking
console.log(` [Pass 2] Thinking confirmation...`);
const thinkResponse = await queryJsonWithThinking(images);
const thinkInvoice = parseJsonToInvoice(thinkResponse);
if (!thinkInvoice) {
console.log(` [Pass 2] JSON parsing failed, retrying...`);
continue;
}
console.log(` [Pass 2] Result: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount} ${thinkInvoice.currency}`);
// Check consensus
if (invoicesMatch(fastInvoice, thinkInvoice)) {
console.log(` [Consensus] MATCH - using result`);
return thinkInvoice; // Prefer thinking result
}
console.log(` [Consensus] MISMATCH - repeating...`);
console.log(` Fast: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount}`);
console.log(` Think: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount}`);
}
// Max attempts reached - do one final thinking pass and use that
console.log(` [Final] Max attempts reached, using final thinking pass`);
const finalResponse = await queryJsonWithThinking(images);
const finalInvoice = parseJsonToInvoice(finalResponse);
if (finalInvoice) {
console.log(` [Final] Result: ${finalInvoice.invoice_number} | ${finalInvoice.invoice_date} | ${finalInvoice.total_amount} ${finalInvoice.currency}`);
return finalInvoice;
}
// Return empty invoice if all else fails
console.log(` [Final] All parsing failed, returning empty`);
return {
invoice_number: '',
invoice_date: '',
vendor_name: '',
currency: 'EUR',
net_amount: 0,
vat_amount: 0,
total_amount: 0,
};
}
/**
* Normalize date to YYYY-MM-DD
*/
function normalizeDate(dateStr: string | null): string {
if (!dateStr) return '';
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
const monthMap: Record<string, string> = {
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
};
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
if (match) {
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
}
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
if (match) {
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
}
return dateStr;
}
/**
* Compare extracted invoice against expected
*/
function compareInvoice(
extracted: IInvoice,
expected: IInvoice
): { match: boolean; errors: string[] } {
const errors: string[] = [];
// Compare invoice number (normalize by removing spaces and case)
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
if (extNum !== expNum) {
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
}
// Compare date
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
}
// Compare total amount (with tolerance)
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
}
// Compare currency
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
}
return { match: errors.length === 0, errors };
}
/**
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit/invoices');
if (!fs.existsSync(testDir)) {
return [];
}
const files = fs.readdirSync(testDir);
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of pdfFiles) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
testCases.sort((a, b) => a.name.localeCompare(b.name));
return testCases;
}
// Tests
tap.test('setup: ensure Docker containers are running', async () => {
console.log('\n[Setup] Checking Docker containers...\n');
const minicpmOk = await ensureMiniCpm();
expect(minicpmOk).toBeTrue();
console.log('\n[Setup] All containers ready!\n');
});
tap.test('should have MiniCPM-V model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
const modelNames = data.models.map((m: { name: string }) => m.name);
expect(modelNames.some((name: string) => name.includes('minicpm'))).toBeTrue();
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V)\n`);
let passedCount = 0;
let failedCount = 0;
const processingTimes: number[] = [];
for (const testCase of testCases) {
tap.test(`should extract invoice: ${testCase.name}`, async () => {
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
const startTime = Date.now();
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
const extracted = await extractInvoiceFromImages(images);
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
const elapsedMs = Date.now() - startTime;
processingTimes.push(elapsedMs);
const result = compareInvoice(extracted, expected);
if (result.match) {
passedCount++;
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
} else {
failedCount++;
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
result.errors.forEach((e) => console.log(` - ${e}`));
}
expect(result.match).toBeTrue();
});
}
tap.test('summary', async () => {
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
console.log(`\n========================================`);
console.log(` Invoice Extraction Summary (${MODEL})`);
console.log(`========================================`);
console.log(` Method: Consensus (fast + thinking)`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
console.log(`----------------------------------------`);
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
console.log(`========================================\n`);
});
export default tap.start();

View File

@@ -0,0 +1,351 @@
/**
* Invoice extraction using Qwen3-VL 8B Vision (Direct)
*
* Multi-query approach: 5 parallel simple queries to avoid token exhaustion.
* Single pass, no consensus voting.
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const VISION_MODEL = 'qwen3-vl:8b';
interface IInvoice {
invoice_number: string;
invoice_date: string;
vendor_name: string;
currency: string;
net_amount: number;
vat_amount: number;
total_amount: number;
}
/**
* Convert PDF to PNG images using ImageMagick
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
// 150 DPI is sufficient for invoice extraction, reduces context size
execSync(
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Query Qwen3-VL for a single field
* Uses simple prompts to minimize thinking tokens
*/
async function queryField(images: string[], question: string): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: VISION_MODEL,
messages: [{
role: 'user',
content: `${question} Reply with just the value, nothing else.`,
images: images,
}],
stream: false,
options: {
num_predict: 500,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return (data.message?.content || '').trim();
}
/**
* Extract invoice data using multiple simple queries
* Each query asks for 1-2 fields to minimize thinking tokens
* (Qwen3's thinking mode uses all tokens on complex prompts)
*/
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
// Query each field separately to avoid excessive thinking tokens
// Use explicit questions to avoid confusion between similar fields
// Log each result as it comes in (not waiting for all to complete)
const queryAndLog = async (name: string, question: string): Promise<string> => {
const result = await queryField(images, question);
console.log(` [Query] ${name}: "${result}"`);
return result;
};
const [invoiceNum, invoiceDate, vendor, currency, totalAmount, netAmount, vatAmount] = await Promise.all([
queryAndLog('Invoice Number', 'What is the INVOICE NUMBER (not VAT number, not customer ID)? Look for "Invoice No", "Invoice #", "Rechnung Nr", "Facture". Just the number/code.'),
queryAndLog('Invoice Date ', 'What is the INVOICE DATE (not due date, not delivery date)? The date the invoice was issued. Format: YYYY-MM-DD'),
queryAndLog('Vendor ', 'What company ISSUED this invoice (the seller/vendor, not the buyer)? Look at the letterhead or "From" section.'),
queryAndLog('Currency ', 'What CURRENCY is used? Look for € (EUR), $ (USD), or £ (GBP). Answer with 3-letter code: EUR, USD, or GBP'),
queryAndLog('Total Amount ', 'What is the TOTAL AMOUNT INCLUDING TAX (the final amount to pay, with VAT/tax included)? Just the number, e.g. 24.99'),
queryAndLog('Net Amount ', 'What is the NET AMOUNT (subtotal before VAT/tax)? Just the number, e.g. 20.99'),
queryAndLog('VAT Amount ', 'What is the VAT/TAX AMOUNT? Just the number, e.g. 4.00'),
]);
// Parse amount from string (handles European format)
const parseAmount = (s: string): number => {
if (!s) return 0;
// Extract number from the response
const match = s.match(/([\d.,]+)/);
if (!match) return 0;
const numStr = match[1];
// Handle European format: 1.234,56 → 1234.56
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
? numStr.replace(/\./g, '').replace(',', '.')
: numStr.replace(/,/g, '');
return parseFloat(normalized) || 0;
};
// Extract invoice number from potentially verbose response
const extractInvoiceNumber = (s: string): string => {
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
// Look for common invoice number patterns
const patterns = [
/\b([A-Z]{2,3}\d{10,})\b/i, // IEE2022006460244
/\b([A-Z]\d{8,})\b/i, // R0014359508
/\b(INV[-\s]?\d{4}[-\s]?\d+)\b/i, // INV-2024-001
/\b(\d{7,})\b/, // 1579087430
];
for (const pattern of patterns) {
const match = clean.match(pattern);
if (match) return match[1];
}
return clean.replace(/[^A-Z0-9-]/gi, '').trim() || clean;
};
// Extract date (YYYY-MM-DD) from response
const extractDate = (s: string): string => {
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
if (isoMatch) return isoMatch[1];
return clean.replace(/[^\d-]/g, '').trim();
};
// Extract currency
const extractCurrency = (s: string): string => {
const upper = s.toUpperCase();
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
if (upper.includes('USD') || upper.includes('$')) return 'USD';
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
return 'EUR';
};
return {
invoice_number: extractInvoiceNumber(invoiceNum),
invoice_date: extractDate(invoiceDate),
vendor_name: vendor.replace(/\*\*/g, '').replace(/`/g, '').trim() || '',
currency: extractCurrency(currency),
net_amount: parseAmount(netAmount),
vat_amount: parseAmount(vatAmount),
total_amount: parseAmount(totalAmount),
};
}
/**
* Normalize date to YYYY-MM-DD
*/
function normalizeDate(dateStr: string | null): string {
if (!dateStr) return '';
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
const monthMap: Record<string, string> = {
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
};
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
if (match) {
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
}
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
if (match) {
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
}
return dateStr;
}
/**
* Compare extracted vs expected
*/
function compareInvoice(extracted: IInvoice, expected: IInvoice): { match: boolean; errors: string[] } {
const errors: string[] = [];
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
if (extNum !== expNum) {
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
}
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
}
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
}
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
}
return { match: errors.length === 0, errors };
}
/**
* Find test cases
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit/invoices');
if (!fs.existsSync(testDir)) return [];
const files = fs.readdirSync(testDir);
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of files.filter((f) => f.endsWith('.pdf'))) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases.sort((a, b) => a.name.localeCompare(b.name));
}
/**
* Ensure Qwen3-VL 8B model is available
*/
async function ensureQwen3Vl(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
console.log(`[Ollama] Model already available: ${VISION_MODEL}`);
return true;
}
}
} catch {
console.log('[Ollama] Cannot check models');
return false;
}
console.log(`[Ollama] Pulling model: ${VISION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
});
return pullResponse.ok;
}
// Tests
tap.test('setup: ensure Qwen3-VL is running', async () => {
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
// Ensure Ollama service is running
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
// Ensure Qwen3-VL 8B model
const visionOk = await ensureQwen3Vl();
expect(visionOk).toBeTrue();
console.log('\n[Setup] Ready!\n');
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} invoice test cases (Qwen3-VL Vision)\n`);
let passedCount = 0;
let failedCount = 0;
const times: number[] = [];
for (const testCase of testCases) {
tap.test(`should extract invoice: ${testCase.name}`, async () => {
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
const start = Date.now();
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
const extracted = await extractInvoiceFromImages(images);
console.log(` Extracted: ${extracted.invoice_number} | ${extracted.invoice_date} | ${extracted.total_amount} ${extracted.currency}`);
const elapsed = Date.now() - start;
times.push(elapsed);
const result = compareInvoice(extracted, expected);
if (result.match) {
passedCount++;
console.log(` Result: MATCH (${(elapsed / 1000).toFixed(1)}s)`);
} else {
failedCount++;
console.log(` Result: MISMATCH (${(elapsed / 1000).toFixed(1)}s)`);
result.errors.forEach((e) => console.log(` - ${e}`));
}
expect(result.match).toBeTrue();
});
}
tap.test('summary', async () => {
const total = testCases.length;
const accuracy = total > 0 ? (passedCount / total) * 100 : 0;
const totalTime = times.reduce((a, b) => a + b, 0) / 1000;
const avgTime = times.length > 0 ? totalTime / times.length : 0;
console.log(`\n======================================================`);
console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`);
console.log(`======================================================`);
console.log(` Method: Multi-query (single pass)`);
console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
console.log(`------------------------------------------------------`);
console.log(` Total time: ${totalTime.toFixed(1)}s`);
console.log(` Avg per inv: ${avgTime.toFixed(1)}s`);
console.log(`======================================================\n`);
});
export default tap.start();

View File

@@ -1,377 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
const PADDLEOCR_URL = 'http://localhost:5000';
interface IInvoice {
invoice_number: string;
invoice_date: string;
vendor_name: string;
currency: string;
net_amount: number;
vat_amount: number;
total_amount: number;
}
/**
* Extract OCR text from an image using PaddleOCR
*/
async function extractOcrText(imageBase64: string): Promise<string> {
const formData = new FormData();
const imageBuffer = Buffer.from(imageBase64, 'base64');
const blob = new Blob([imageBuffer], { type: 'image/png' });
formData.append('img', blob, 'image.png');
formData.append('outtype', 'json');
try {
const response = await fetch(`${PADDLEOCR_URL}/ocr`, {
method: 'POST',
body: formData,
});
if (!response.ok) return '';
const data = await response.json();
if (data.success && data.results) {
return data.results.map((r: { text: string }) => r.text).join('\n');
}
} catch {
// PaddleOCR unavailable
}
return '';
}
/**
* Build prompt with optional OCR text
*/
function buildPrompt(ocrText: string): string {
const base = `You are an invoice parser. Extract the following fields from this invoice:
1. invoice_number: The invoice/receipt number
2. invoice_date: Date in YYYY-MM-DD format
3. vendor_name: Company that issued the invoice
4. currency: EUR, USD, etc.
5. net_amount: Amount before tax (if shown)
6. vat_amount: Tax/VAT amount (if shown, 0 if reverse charge or no tax)
7. total_amount: Final amount due
Return ONLY valid JSON in this exact format:
{"invoice_number":"XXX","invoice_date":"YYYY-MM-DD","vendor_name":"Company Name","currency":"EUR","net_amount":100.00,"vat_amount":19.00,"total_amount":119.00}
If a field is not visible, use null for strings or 0 for numbers.
No explanation, just the JSON object.`;
if (ocrText) {
return `${base}
OCR text extracted from the invoice:
---
${ocrText}
---
Cross-reference the image with the OCR text above for accuracy.`;
}
return base;
}
/**
* Convert PDF to PNG images using ImageMagick
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 200 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Single extraction pass
*/
async function extractOnce(images: string[], passNum: number, ocrText: string = ''): Promise<IInvoice> {
const payload = {
model: MODEL,
prompt: buildPrompt(ocrText),
images,
stream: true,
options: {
num_predict: 2048,
temperature: 0.1,
},
};
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let fullText = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n').filter((l) => l.trim());
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.response) {
fullText += json.response;
}
} catch {
// Skip invalid JSON lines
}
}
}
// Extract JSON from response
const startIdx = fullText.indexOf('{');
const endIdx = fullText.lastIndexOf('}') + 1;
if (startIdx < 0 || endIdx <= startIdx) {
throw new Error(`No JSON object found in response: ${fullText.substring(0, 200)}`);
}
const jsonStr = fullText.substring(startIdx, endIdx);
return JSON.parse(jsonStr);
}
/**
* Create a hash of invoice for comparison (using key fields)
*/
function hashInvoice(invoice: IInvoice): string {
return `${invoice.invoice_number}|${invoice.invoice_date}|${invoice.total_amount.toFixed(2)}`;
}
/**
* Extract with majority voting - run until 2 passes match
*/
async function extractWithConsensus(images: string[], invoiceName: string, maxPasses: number = 5): Promise<IInvoice> {
const results: Array<{ invoice: IInvoice; hash: string }> = [];
const hashCounts: Map<string, number> = new Map();
// Extract OCR text from first page
const ocrText = await extractOcrText(images[0]);
if (ocrText) {
console.log(` [OCR] Extracted ${ocrText.split('\n').length} text lines`);
}
for (let pass = 1; pass <= maxPasses; pass++) {
try {
const invoice = await extractOnce(images, pass, ocrText);
const hash = hashInvoice(invoice);
results.push({ invoice, hash });
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
console.log(` [Pass ${pass}] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
// Check if we have consensus (2+ matching)
const count = hashCounts.get(hash)!;
if (count >= 2) {
console.log(` [Consensus] Reached after ${pass} passes`);
return invoice;
}
} catch (err) {
console.log(` [Pass ${pass}] Error: ${err}`);
}
}
// No consensus reached - return the most common result
let bestHash = '';
let bestCount = 0;
for (const [hash, count] of hashCounts) {
if (count > bestCount) {
bestCount = count;
bestHash = hash;
}
}
if (!bestHash) {
throw new Error(`No valid results for ${invoiceName}`);
}
const best = results.find((r) => r.hash === bestHash)!;
console.log(` [No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
return best.invoice;
}
/**
* Compare extracted invoice against expected
*/
function compareInvoice(
extracted: IInvoice,
expected: IInvoice
): { match: boolean; errors: string[] } {
const errors: string[] = [];
// Compare invoice number (normalize by removing spaces and case)
const extNum = extracted.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
const expNum = expected.invoice_number?.replace(/\s/g, '').toLowerCase() || '';
if (extNum !== expNum) {
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
}
// Compare date
if (extracted.invoice_date !== expected.invoice_date) {
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
}
// Compare total amount (with tolerance)
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
}
// Compare currency
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
}
return { match: errors.length === 0, errors };
}
/**
* Find all test cases (PDF + JSON pairs) in .nogit/invoices/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit/invoices');
if (!fs.existsSync(testDir)) {
return [];
}
const files = fs.readdirSync(testDir);
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of pdfFiles) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases;
}
// Tests
tap.test('should connect to Ollama API', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
expect(response.ok).toBeTrue();
const data = await response.json();
expect(data.models).toBeArray();
});
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
const modelNames = data.models.map((m: { name: string }) => m.name);
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
});
// Dynamic test for each PDF/JSON pair
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} invoice test cases\n`);
let passedCount = 0;
let failedCount = 0;
const processingTimes: number[] = [];
for (const testCase of testCases) {
tap.test(`should extract invoice: ${testCase.name}`, async () => {
// Load expected data
const expected: IInvoice = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.invoice_number} | ${expected.invoice_date} | ${expected.total_amount} ${expected.currency}`);
const startTime = Date.now();
// Convert PDF to images
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
// Extract with consensus voting
const extracted = await extractWithConsensus(images, testCase.name);
const endTime = Date.now();
const elapsedMs = endTime - startTime;
processingTimes.push(elapsedMs);
// Compare results
const result = compareInvoice(extracted, expected);
if (result.match) {
passedCount++;
console.log(` Result: MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
} else {
failedCount++;
console.log(` Result: MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
result.errors.forEach((e) => console.log(` - ${e}`));
}
// Assert match
expect(result.match).toBeTrue();
});
}
tap.test('summary', async () => {
const totalInvoices = testCases.length;
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
const avgTimeMs = processingTimes.length > 0 ? totalTimeMs / processingTimes.length : 0;
const avgTimeSec = avgTimeMs / 1000;
const totalTimeSec = totalTimeMs / 1000;
console.log(`\n========================================`);
console.log(` Invoice Extraction Summary`);
console.log(`========================================`);
console.log(` Passed: ${passedCount}/${totalInvoices}`);
console.log(` Failed: ${failedCount}/${totalInvoices}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
console.log(`----------------------------------------`);
console.log(` Total time: ${totalTimeSec.toFixed(1)}s`);
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
console.log(`========================================\n`);
});
export default tap.start();

View File

@@ -1,305 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
const OLLAMA_URL = 'http://localhost:11434';
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
const EXTRACT_PROMPT = `You are a bank statement parser. Extract EVERY transaction from the table.
Read the Amount column carefully:
- "- 21,47 €" means DEBIT, output as: -21.47
- "+ 1.000,00 €" means CREDIT, output as: 1000.00
- European format: comma = decimal point
For each row output: {"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47}
Do not skip any rows. Return ONLY the JSON array, no explanation.`;
interface ITransaction {
date: string;
counterparty: string;
amount: number;
}
/**
* Convert PDF to PNG images using ImageMagick
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 300 -quality 100 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Single extraction pass
*/
async function extractOnce(images: string[], passNum: number): Promise<ITransaction[]> {
const payload = {
model: MODEL,
prompt: EXTRACT_PROMPT,
images,
stream: true,
options: {
num_predict: 16384,
temperature: 0.1,
},
};
const response = await fetch(`${OLLAMA_URL}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error('No response body');
}
const decoder = new TextDecoder();
let fullText = '';
let lineBuffer = '';
console.log(`[Pass ${passNum}] Extracting...`);
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n').filter((l) => l.trim());
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.response) {
fullText += json.response;
lineBuffer += json.response;
// Print complete lines
if (lineBuffer.includes('\n')) {
const parts = lineBuffer.split('\n');
for (let i = 0; i < parts.length - 1; i++) {
console.log(parts[i]);
}
lineBuffer = parts[parts.length - 1];
}
}
} catch {
// Skip invalid JSON lines
}
}
}
if (lineBuffer) {
console.log(lineBuffer);
}
console.log('');
const startIdx = fullText.indexOf('[');
const endIdx = fullText.lastIndexOf(']') + 1;
if (startIdx < 0 || endIdx <= startIdx) {
throw new Error('No JSON array found in response');
}
return JSON.parse(fullText.substring(startIdx, endIdx));
}
/**
* Create a hash of transactions for comparison
*/
function hashTransactions(transactions: ITransaction[]): string {
return transactions
.map((t) => `${t.date}|${t.amount.toFixed(2)}`)
.sort()
.join(';');
}
/**
* Extract with majority voting - run until 2 passes match
*/
async function extractWithConsensus(images: string[], maxPasses: number = 5): Promise<ITransaction[]> {
const results: Array<{ transactions: ITransaction[]; hash: string }> = [];
const hashCounts: Map<string, number> = new Map();
for (let pass = 1; pass <= maxPasses; pass++) {
const transactions = await extractOnce(images, pass);
const hash = hashTransactions(transactions);
results.push({ transactions, hash });
hashCounts.set(hash, (hashCounts.get(hash) || 0) + 1);
console.log(`[Pass ${pass}] Got ${transactions.length} transactions (hash: ${hash.substring(0, 20)}...)`);
// Check if we have consensus (2+ matching)
const count = hashCounts.get(hash)!;
if (count >= 2) {
console.log(`[Consensus] Reached after ${pass} passes (${count} matching results)`);
return transactions;
}
// After 2 passes, if no match yet, continue
if (pass >= 2) {
console.log(`[Pass ${pass}] No consensus yet, trying again...`);
}
}
// No consensus reached - return the most common result
let bestHash = '';
let bestCount = 0;
for (const [hash, count] of hashCounts) {
if (count > bestCount) {
bestCount = count;
bestHash = hash;
}
}
const best = results.find((r) => r.hash === bestHash)!;
console.log(`[No consensus] Using most common result (${bestCount}/${maxPasses} passes)`);
return best.transactions;
}
/**
* Compare extracted transactions against expected
*/
function compareTransactions(
extracted: ITransaction[],
expected: ITransaction[]
): { matches: number; total: number; errors: string[] } {
const errors: string[] = [];
let matches = 0;
for (let i = 0; i < expected.length; i++) {
const exp = expected[i];
const ext = extracted[i];
if (!ext) {
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
continue;
}
const dateMatch = ext.date === exp.date;
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
if (dateMatch && amountMatch) {
matches++;
} else {
errors.push(
`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`
);
}
}
if (extracted.length > expected.length) {
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
}
return { matches, total: expected.length, errors };
}
/**
* Find all test cases (PDF + JSON pairs) in .nogit/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit');
if (!fs.existsSync(testDir)) {
return [];
}
const files = fs.readdirSync(testDir);
const pdfFiles = files.filter((f) => f.endsWith('.pdf'));
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of pdfFiles) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases;
}
// Tests
tap.test('should connect to Ollama API', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
expect(response.ok).toBeTrue();
const data = await response.json();
expect(data.models).toBeArray();
});
tap.test('should have MiniCPM-V 4.5 model loaded', async () => {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
const data = await response.json();
const modelNames = data.models.map((m: { name: string }) => m.name);
expect(modelNames.some((name: string) => name.includes('minicpm-v4.5'))).toBeTrue();
});
// Dynamic test for each PDF/JSON pair
const testCases = findTestCases();
for (const testCase of testCases) {
tap.test(`should extract transactions from ${testCase.name}`, async () => {
// Load expected transactions
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.length} transactions`);
// Convert PDF to images
console.log('Converting PDF to images...');
const images = convertPdfToImages(testCase.pdfPath);
console.log(`Converted: ${images.length} pages\n`);
// Extract with consensus voting
const extracted = await extractWithConsensus(images);
console.log(`\nFinal: ${extracted.length} transactions`);
// Compare results
const result = compareTransactions(extracted, expected);
console.log(`Accuracy: ${result.matches}/${result.total}`);
if (result.errors.length > 0) {
console.log('Errors:');
result.errors.forEach((e) => console.log(` - ${e}`));
}
// Assert high accuracy
const accuracy = result.matches / result.total;
expect(accuracy).toBeGreaterThan(0.95);
expect(extracted.length).toEqual(expected.length);
});
}
export default tap.start();