5 Commits

Author SHA1 Message Date
4c368dfef9 v1.11.0
Some checks failed
Docker (tags) / security (push) Successful in 29s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 04:50:57 +00:00
e76768da55 feat(vision): process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction 2026-01-18 04:50:57 +00:00
63d72a52c9 update 2026-01-18 04:28:57 +00:00
386122c8c7 v1.10.1
Some checks failed
Docker (tags) / security (push) Successful in 31s
Docker (tags) / test (push) Failing after 40s
Docker (tags) / release (push) Has been skipped
Docker (tags) / metadata (push) Has been skipped
2026-01-18 04:17:30 +00:00
7c8f10497e fix(tests): improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options 2026-01-18 04:17:30 +00:00
4 changed files with 396 additions and 134 deletions

View File

@@ -1,5 +1,26 @@
# Changelog # Changelog
## 2026-01-18 - 1.11.0 - feat(vision)
process pages separately and make Qwen3-VL vision extraction more robust; add per-page parsing, safer JSON handling, reduced token usage, and multi-query invoice extraction
- Bank statements: split extraction into extractTransactionsFromPage and sequentially process pages to avoid thinking-token exhaustion
- Bank statements: reduced num_predict from 8000 to 4000, send single image per request, added per-page logging and non-throwing handling for empty or non-JSON responses
- Bank statements: catch JSON.parse errors and return empty array instead of throwing
- Invoices: introduced queryField to request single values and perform multiple simple queries (reduces model thinking usage)
- Invoices: reduced num_predict for invoice queries from 4000 to 500 and parse amounts robustly (handles European formats like 1.234,56)
- Invoices: normalize currency to uppercase 3-letter code, return safe defaults (empty strings / 0) instead of nulls, and parse net/vat/total with fallbacks
- General: simplified Ollama API error messages to avoid including response body content in thrown errors
## 2026-01-18 - 1.10.1 - fix(tests)
improve Qwen3-VL invoice extraction test by switching to non-stream API, adding model availability/pull checks, simplifying response parsing, and tightening model options
- Replaced streaming reader logic with direct JSON parsing of the /api/chat response
- Added ensureQwen3Vl() to check and pull the Qwen3-VL:8b model from Ollama
- Switched to ensureMiniCpm() to verify Ollama service is running before model checks
- Use /no_think prompt for direct JSON output and set temperature to 0.0 and num_predict to 512
- Removed retry loop and streaming parsing; improved error messages to include response body
- Updated logging and test setup messages for clarity
## 2026-01-18 - 1.10.0 - feat(vision) ## 2026-01-18 - 1.10.0 - feat(vision)
add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting add Qwen3-VL vision model support with Dockerfile and tests; improve invoice OCR conversion and prompts; simplify extraction flow by removing consensus voting

View File

@@ -1,6 +1,6 @@
{ {
"name": "@host.today/ht-docker-ai", "name": "@host.today/ht-docker-ai",
"version": "1.10.0", "version": "1.11.0",
"type": "module", "type": "module",
"private": false, "private": false,
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5", "description": "Docker images for AI vision-language models including MiniCPM-V 4.5",

View File

@@ -0,0 +1,284 @@
/**
* Bank statement extraction using Qwen3-VL 8B Vision (Direct)
*
* Single-step pipeline: PDF → Images → Qwen3-VL → JSON
*
* Key insights:
* - Use /no_think in prompt + think:false in API to disable reasoning
* - Need high num_predict (8000+) for many transactions
* - Single pass extraction, no consensus needed
*/
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs';
import * as path from 'path';
import { execSync } from 'child_process';
import * as os from 'os';
import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434';
const VISION_MODEL = 'qwen3-vl:8b';
interface ITransaction {
date: string;
counterparty: string;
amount: number;
}
/**
* Convert PDF to PNG images
*/
function convertPdfToImages(pdfPath: string): string[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
try {
execSync(
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
const images: string[] = [];
for (const file of files) {
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
}
return images;
} finally {
fs.rmSync(tempDir, { recursive: true, force: true });
}
}
/**
* Extract transactions from a single page
* Processes one page at a time to minimize thinking tokens
*/
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
const prompt = `/no_think
Extract transactions from this bank statement page.
Amount: "- 21,47 €" = -21.47, "+ 1.000,00 €" = 1000.00 (European format)
Return JSON array only: [{"date":"YYYY-MM-DD","counterparty":"NAME","amount":-21.47},...]`;
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: VISION_MODEL,
messages: [{
role: 'user',
content: prompt,
images: [image],
}],
stream: false,
think: false,
options: {
num_predict: 4000,
temperature: 0.1,
},
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
let content = data.message?.content || '';
if (!content) {
console.log(` [Page ${pageNum}] Empty response`);
return [];
}
// Parse JSON array
if (content.startsWith('```json')) content = content.slice(7);
else if (content.startsWith('```')) content = content.slice(3);
if (content.endsWith('```')) content = content.slice(0, -3);
content = content.trim();
const startIdx = content.indexOf('[');
const endIdx = content.lastIndexOf(']') + 1;
if (startIdx < 0 || endIdx <= startIdx) {
console.log(` [Page ${pageNum}] No JSON array found`);
return [];
}
try {
const transactions = JSON.parse(content.substring(startIdx, endIdx));
console.log(` [Page ${pageNum}] Found ${transactions.length} transactions`);
return transactions;
} catch {
console.log(` [Page ${pageNum}] JSON parse error`);
return [];
}
}
/**
* Extract transactions using Qwen3-VL vision
* Processes each page separately to avoid thinking token exhaustion
*/
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL`);
const allTransactions: ITransaction[] = [];
// Process pages sequentially to avoid overwhelming the model
for (let i = 0; i < images.length; i++) {
const pageTransactions = await extractTransactionsFromPage(images[i], i + 1);
allTransactions.push(...pageTransactions);
}
console.log(` [Vision] Total: ${allTransactions.length} transactions`);
return allTransactions;
}
/**
* Compare transactions
*/
function compareTransactions(
extracted: ITransaction[],
expected: ITransaction[]
): { matches: number; total: number; errors: string[] } {
const errors: string[] = [];
let matches = 0;
for (let i = 0; i < expected.length; i++) {
const exp = expected[i];
const ext = extracted[i];
if (!ext) {
errors.push(`Missing transaction ${i}: ${exp.date} ${exp.counterparty}`);
continue;
}
const dateMatch = ext.date === exp.date;
const amountMatch = Math.abs(ext.amount - exp.amount) < 0.01;
if (dateMatch && amountMatch) {
matches++;
} else {
errors.push(`Mismatch at ${i}: expected ${exp.date}/${exp.amount}, got ${ext.date}/${ext.amount}`);
}
}
if (extracted.length > expected.length) {
errors.push(`Extra transactions: ${extracted.length - expected.length}`);
}
return { matches, total: expected.length, errors };
}
/**
* Find test cases in .nogit/
*/
function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: string }> {
const testDir = path.join(process.cwd(), '.nogit');
if (!fs.existsSync(testDir)) return [];
const files = fs.readdirSync(testDir);
const testCases: Array<{ name: string; pdfPath: string; jsonPath: string }> = [];
for (const pdf of files.filter((f: string) => f.endsWith('.pdf'))) {
const baseName = pdf.replace('.pdf', '');
const jsonFile = `${baseName}.json`;
if (files.includes(jsonFile)) {
testCases.push({
name: baseName,
pdfPath: path.join(testDir, pdf),
jsonPath: path.join(testDir, jsonFile),
});
}
}
return testCases.sort((a, b) => a.name.localeCompare(b.name));
}
/**
* Ensure Qwen3-VL model is available
*/
async function ensureQwen3Vl(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
console.log(`[Ollama] Model available: ${VISION_MODEL}`);
return true;
}
}
} catch {
return false;
}
console.log(`[Ollama] Pulling ${VISION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
});
return pullResponse.ok;
}
// Tests
tap.test('setup: ensure Qwen3-VL is running', async () => {
console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
const visionOk = await ensureQwen3Vl();
expect(visionOk).toBeTrue();
console.log('\n[Setup] Ready!\n');
});
const testCases = findTestCases();
console.log(`\nFound ${testCases.length} bank statement test cases (Qwen3-VL)\n`);
let passedCount = 0;
let failedCount = 0;
for (const testCase of testCases) {
tap.test(`should extract: ${testCase.name}`, async () => {
const expected: ITransaction[] = JSON.parse(fs.readFileSync(testCase.jsonPath, 'utf-8'));
console.log(`\n=== ${testCase.name} ===`);
console.log(`Expected: ${expected.length} transactions`);
const images = convertPdfToImages(testCase.pdfPath);
console.log(` Pages: ${images.length}`);
const extracted = await extractTransactions(images);
console.log(` Extracted: ${extracted.length} transactions`);
const result = compareTransactions(extracted, expected);
const accuracy = result.total > 0 ? result.matches / result.total : 0;
if (accuracy >= 0.95 && extracted.length === expected.length) {
passedCount++;
console.log(` Result: PASS (${result.matches}/${result.total})`);
} else {
failedCount++;
console.log(` Result: FAIL (${result.matches}/${result.total})`);
result.errors.slice(0, 5).forEach((e) => console.log(` - ${e}`));
}
expect(accuracy).toBeGreaterThan(0.95);
expect(extracted.length).toEqual(expected.length);
});
}
tap.test('summary', async () => {
const total = testCases.length;
console.log(`\n======================================================`);
console.log(` Bank Statement Summary (Qwen3-VL Vision)`);
console.log(`======================================================`);
console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`);
console.log(`======================================================\n`);
});
export default tap.start();

View File

@@ -1,18 +1,17 @@
/** /**
* Invoice extraction using Qwen3-VL-8B Vision (Direct) * Invoice extraction using Qwen3-VL 8B Vision (Direct)
* *
* Qwen3-VL 8B is a capable vision-language model that fits in 15GB VRAM: * Single-step pipeline: PDF → Images → Qwen3-VL → JSON
* - Q4_K_M quantization (~5GB) * Uses /no_think to disable reasoning mode for fast, direct responses.
* - Good balance of speed and accuracy
* *
* Pipeline: PDF → Images → Qwen3-VL → JSON * Qwen3-VL outperforms PaddleOCR-VL on certain invoice formats.
*/ */
import { tap, expect } from '@git.zone/tstest/tapbundle'; import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as fs from 'fs'; import * as fs from 'fs';
import * as path from 'path'; import * as path from 'path';
import { execSync } from 'child_process'; import { execSync } from 'child_process';
import * as os from 'os'; import * as os from 'os';
import { ensureQwen3Vl } from './helpers/docker.js'; import { ensureMiniCpm } from './helpers/docker.js';
const OLLAMA_URL = 'http://localhost:11434'; const OLLAMA_URL = 'http://localhost:11434';
const VISION_MODEL = 'qwen3-vl:8b'; const VISION_MODEL = 'qwen3-vl:8b';
@@ -57,25 +56,25 @@ function convertPdfToImages(pdfPath: string): string[] {
} }
/** /**
* Single extraction attempt * Query Qwen3-VL for a single field
* Uses simple prompts to minimize thinking tokens
*/ */
async function tryExtractOnce(images: string[], prompt: string): Promise<string> { async function queryField(images: string[], question: string): Promise<string> {
const response = await fetch(`${OLLAMA_URL}/api/chat`, { const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ body: JSON.stringify({
model: VISION_MODEL, model: VISION_MODEL,
messages: [ messages: [{
{ role: 'user',
role: 'user', content: `/no_think\n${question} Reply with just the value, nothing else.`,
content: prompt, images: images,
images: images, }],
}, stream: false,
], think: false,
stream: true,
options: { options: {
num_predict: 1024, num_predict: 500,
temperature: 0.1, // Slight randomness helps avoid stuck states temperature: 0.1,
}, },
}), }),
}); });
@@ -84,126 +83,48 @@ async function tryExtractOnce(images: string[], prompt: string): Promise<string>
throw new Error(`Ollama API error: ${response.status}`); throw new Error(`Ollama API error: ${response.status}`);
} }
const reader = response.body?.getReader(); const data = await response.json();
if (!reader) { return (data.message?.content || '').trim();
throw new Error('No response body');
}
const decoder = new TextDecoder();
let fullText = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n').filter((l) => l.trim());
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.message?.content) {
fullText += json.message.content;
}
} catch {
// Skip invalid JSON lines
}
}
}
return fullText;
} }
/** /**
* Extract invoice data directly from images using Qwen3-VL Vision * Extract invoice data using multiple simple queries
* Includes retry logic for empty responses * Each query asks for 1-2 fields to minimize thinking tokens
* (Qwen3's thinking mode uses all tokens on complex prompts)
*/ */
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> { async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL`); console.log(` [Vision] Processing ${images.length} page(s) with Qwen3-VL (multi-query)`);
// JSON schema for structured output - force the model to output valid JSON // Query each field separately to avoid excessive thinking tokens
const invoiceSchema = { const [invoiceNum, invoiceDate, vendor, currency, amounts] = await Promise.all([
type: 'object', queryField(images, 'What is the invoice number on this document?'),
properties: { queryField(images, 'What is the invoice date? Format as YYYY-MM-DD.'),
invoice_number: { type: 'string' }, queryField(images, 'What company issued this invoice?'),
invoice_date: { type: 'string' }, queryField(images, 'What currency is used? Answer EUR, USD, or GBP.'),
vendor_name: { type: 'string' }, queryField(images, 'What are the net amount, VAT amount, and total amount? Format: net,vat,total'),
currency: { type: 'string' }, ]);
net_amount: { type: 'number' },
vat_amount: { type: 'number' }, console.log(` [Vision] Got: ${invoiceNum} | ${invoiceDate} | ${vendor} | ${currency}`);
total_amount: { type: 'number' },
}, // Parse amounts (format: "net,vat,total" or similar)
required: ['invoice_number', 'invoice_date', 'vendor_name', 'currency', 'net_amount', 'vat_amount', 'total_amount'], const amountMatch = amounts.match(/([\d.,]+)/g) || [];
const parseAmount = (s: string): number => {
if (!s) return 0;
// Handle European format: 1.234,56 → 1234.56
const normalized = s.includes(',') && s.indexOf(',') > s.lastIndexOf('.')
? s.replace(/\./g, '').replace(',', '.')
: s.replace(/,/g, '');
return parseFloat(normalized) || 0;
}; };
// Simple, direct prompt - don't overthink, just read the labeled fields
const prompt = `Extract invoice data from this image. Return JSON only.
Find these fields:
- invoice_number: The invoice/document number
- invoice_date: Date in YYYY-MM-DD format
- vendor_name: Company issuing the invoice
- currency: EUR, USD, or GBP
- net_amount: Amount before tax
- vat_amount: Tax/VAT amount
- total_amount: Final total amount
Return: {"invoice_number":"...", "invoice_date":"YYYY-MM-DD", "vendor_name":"...", "currency":"EUR", "net_amount":0.00, "vat_amount":0.00, "total_amount":0.00}`;
// Retry logic for empty responses (model sometimes returns nothing)
const MAX_RETRIES = 3;
let fullText = '';
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
fullText = await tryExtractOnce(images, prompt);
if (fullText.trim().length > 0) {
console.log(` [Attempt ${attempt}] Got ${fullText.length} chars`);
break;
}
console.log(` [Attempt ${attempt}] Empty response, retrying...`);
// Small delay before retry
await new Promise((r) => setTimeout(r, 1000));
}
if (fullText.trim().length === 0) {
throw new Error(`Model returned empty response after ${MAX_RETRIES} attempts`);
}
// Parse JSON response
let jsonStr = fullText.trim();
if (jsonStr.startsWith('```json')) jsonStr = jsonStr.slice(7);
else if (jsonStr.startsWith('```')) jsonStr = jsonStr.slice(3);
if (jsonStr.endsWith('```')) jsonStr = jsonStr.slice(0, -3);
jsonStr = jsonStr.trim();
const startIdx = jsonStr.indexOf('{');
const endIdx = jsonStr.lastIndexOf('}') + 1;
if (startIdx < 0 || endIdx <= startIdx) {
throw new Error(`No JSON found in: ${fullText.substring(0, 500)}`);
}
const extractedJson = jsonStr.substring(startIdx, endIdx);
console.log(` [Debug] Extracted JSON: ${extractedJson.substring(0, 200)}...`);
let parsed;
try {
parsed = JSON.parse(extractedJson);
} catch (e) {
throw new Error(`Invalid JSON: ${extractedJson.substring(0, 500)}`);
}
return { return {
invoice_number: parsed.invoice_number || null, invoice_number: invoiceNum || '',
invoice_date: parsed.invoice_date || null, invoice_date: invoiceDate || '',
vendor_name: parsed.vendor_name || null, vendor_name: vendor || '',
currency: parsed.currency || 'EUR', currency: (currency || 'EUR').toUpperCase().replace(/[^A-Z]/g, '').slice(0, 3) || 'EUR',
net_amount: parseFloat(parsed.net_amount) || 0, net_amount: parseAmount(amountMatch[0] || ''),
vat_amount: parseFloat(parsed.vat_amount) || 0, vat_amount: parseAmount(amountMatch[1] || ''),
total_amount: parseFloat(parsed.total_amount) || 0, total_amount: parseAmount(amountMatch[2] || amountMatch[0] || ''),
}; };
} }
@@ -284,12 +205,48 @@ function findTestCases(): Array<{ name: string; pdfPath: string; jsonPath: strin
return testCases.sort((a, b) => a.name.localeCompare(b.name)); return testCases.sort((a, b) => a.name.localeCompare(b.name));
} }
/**
* Ensure Qwen3-VL 8B model is available
*/
async function ensureQwen3Vl(): Promise<boolean> {
try {
const response = await fetch(`${OLLAMA_URL}/api/tags`);
if (response.ok) {
const data = await response.json();
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === VISION_MODEL)) {
console.log(`[Ollama] Model already available: ${VISION_MODEL}`);
return true;
}
}
} catch {
console.log('[Ollama] Cannot check models');
return false;
}
console.log(`[Ollama] Pulling model: ${VISION_MODEL}...`);
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: VISION_MODEL, stream: false }),
});
return pullResponse.ok;
}
// Tests // Tests
tap.test('setup: ensure Qwen3-VL is running', async () => { tap.test('setup: ensure Qwen3-VL is running', async () => {
console.log('\n[Setup] Checking Qwen3-VL 8B (~5GB)...\n'); console.log('\n[Setup] Checking Qwen3-VL 8B...\n');
const ok = await ensureQwen3Vl();
expect(ok).toBeTrue(); // Ensure Ollama service is running
const ollamaOk = await ensureMiniCpm();
expect(ollamaOk).toBeTrue();
// Ensure Qwen3-VL 8B model
const visionOk = await ensureQwen3Vl();
expect(visionOk).toBeTrue();
console.log('\n[Setup] Ready!\n'); console.log('\n[Setup] Ready!\n');
}); });
@@ -339,7 +296,7 @@ tap.test('summary', async () => {
console.log(`\n======================================================`); console.log(`\n======================================================`);
console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`); console.log(` Invoice Extraction Summary (Qwen3-VL Vision)`);
console.log(`======================================================`); console.log(`======================================================`);
console.log(` Method: Qwen3-VL 8B (Direct Vision)`); console.log(` Method: Qwen3-VL 8B Direct Vision (/no_think)`);
console.log(` Passed: ${passedCount}/${total}`); console.log(` Passed: ${passedCount}/${total}`);
console.log(` Failed: ${failedCount}/${total}`); console.log(` Failed: ${failedCount}/${total}`);
console.log(` Accuracy: ${accuracy.toFixed(1)}%`); console.log(` Accuracy: ${accuracy.toFixed(1)}%`);