Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cf282b2437 | |||
| 77d57e80bd | |||
| b202e024a4 | |||
| 2210611f70 | |||
| d8bdb18841 | |||
| d384c1d79b |
16
changelog.md
16
changelog.md
@@ -1,5 +1,21 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.15.0 - feat(tests)
|
||||||
|
integrate SmartAi/DualAgentOrchestrator into extraction tests and add JSON self-validation
|
||||||
|
|
||||||
|
- Integrate SmartAi and DualAgentOrchestrator into bankstatement and invoice tests to perform structured extraction with streaming
|
||||||
|
- Register and use JsonValidatorTool to validate outputs (json.validate) and enforce validation before task completion
|
||||||
|
- Add tryExtractJson parsing fallback, improved extraction prompts, retries and clearer parsing/logging
|
||||||
|
- Initialize and teardown SmartAi and orchestrator in test setup/summary, and enable onToken streaming handlers for real-time output
|
||||||
|
- Bump devDependencies: @push.rocks/smartagent to ^1.3.0 and @push.rocks/smartai to ^0.12.0
|
||||||
|
|
||||||
|
## 2026-01-20 - 1.14.3 - fix(repo)
|
||||||
|
no changes detected in the diff; no files modified and no release required
|
||||||
|
|
||||||
|
- Diff contained no changes
|
||||||
|
- No files were added, removed, or modified
|
||||||
|
- No code, dependency, or documentation updates to release
|
||||||
|
|
||||||
## 2026-01-19 - 1.14.2 - fix(readme)
|
## 2026-01-19 - 1.14.2 - fix(readme)
|
||||||
update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command
|
update README to document Nanonets-OCR2-3B (replaces Nanonets-OCR-s), adjust VRAM and context defaults, expand feature docs, and update examples/test command
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@host.today/ht-docker-ai",
|
"name": "@host.today/ht-docker-ai",
|
||||||
"version": "1.14.2",
|
"version": "1.15.0",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
"description": "Docker images for AI vision-language models including MiniCPM-V 4.5",
|
||||||
@@ -14,7 +14,9 @@
|
|||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsrun": "^2.0.1",
|
"@git.zone/tsrun": "^2.0.1",
|
||||||
"@git.zone/tstest": "^3.1.5"
|
"@git.zone/tstest": "^3.1.5",
|
||||||
|
"@push.rocks/smartagent": "^1.3.0",
|
||||||
|
"@push.rocks/smartai": "^0.12.0"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
|
|||||||
1170
pnpm-lock.yaml
generated
1170
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* Bank statement extraction using MiniCPM-V (visual extraction)
|
* Bank statement extraction using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||||
*
|
*
|
||||||
* JSON per-page approach:
|
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||||
* 1. Ask for structured JSON of all transactions per page
|
* 1. Process each page with the orchestrator
|
||||||
* 2. Consensus: extract twice, compare, retry if mismatch
|
* 2. Driver extracts transactions and validates JSON before completing
|
||||||
|
* 3. Streaming output during extraction
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -11,6 +12,8 @@ import * as path from 'path';
|
|||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
@@ -21,21 +24,9 @@ interface ITransaction {
|
|||||||
amount: number;
|
amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
const JSON_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
// SmartAi instance and orchestrator (initialized in setup)
|
||||||
|
let smartAi: SmartAi;
|
||||||
IMPORTANT RULES:
|
let orchestrator: DualAgentOrchestrator;
|
||||||
1. Each transaction has: date, description/counterparty, and an amount
|
|
||||||
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
|
||||||
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
|
||||||
4. Date format: YYYY-MM-DD
|
|
||||||
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
|
||||||
6. Only include actual transactions with a specific date and amount
|
|
||||||
|
|
||||||
Return ONLY this JSON format, no explanation:
|
|
||||||
[
|
|
||||||
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
|
||||||
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
|
||||||
]`;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
@@ -65,206 +56,31 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
const EXTRACTION_PROMPT = `Extract ALL transactions from this bank statement page as a JSON array.
|
||||||
* Query for JSON extraction
|
|
||||||
*/
|
|
||||||
async function queryJson(image: string, queryId: string): Promise<string> {
|
|
||||||
console.log(` [${queryId}] Sending request to ${MODEL}...`);
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
IMPORTANT RULES:
|
||||||
method: 'POST',
|
1. Each transaction has: date, counterparty (description), and an amount
|
||||||
headers: { 'Content-Type': 'application/json' },
|
2. Amount is NEGATIVE for money going OUT (debits, payments, withdrawals)
|
||||||
body: JSON.stringify({
|
3. Amount is POSITIVE for money coming IN (credits, deposits, refunds)
|
||||||
model: MODEL,
|
4. Date format: YYYY-MM-DD
|
||||||
messages: [{
|
5. Do NOT include: opening balance, closing balance, subtotals, headers, or summary rows
|
||||||
role: 'user',
|
6. Only include actual transactions with a specific date and amount
|
||||||
content: JSON_PROMPT,
|
|
||||||
images: [image],
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 4000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
Before completing, validate your JSON output:
|
||||||
|
|
||||||
if (!response.ok) {
|
<tool_call>
|
||||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
<tool>json</tool>
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
<action>validate</action>
|
||||||
}
|
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
const data = await response.json();
|
Output format (must be a valid JSON array):
|
||||||
const content = (data.message?.content || '').trim();
|
[
|
||||||
console.log(` [${queryId}] Response received (${elapsed}s, ${content.length} chars)`);
|
{"date": "2021-06-01", "counterparty": "COMPANY NAME", "amount": -25.99},
|
||||||
return content;
|
{"date": "2021-06-02", "counterparty": "DEPOSIT FROM", "amount": 100.00}
|
||||||
}
|
]
|
||||||
|
|
||||||
/**
|
Only complete after validation passes. Output the final JSON array in <task_complete> tags.`;
|
||||||
* Sanitize JSON string - fix common issues from vision model output
|
|
||||||
*/
|
|
||||||
function sanitizeJson(jsonStr: string): string {
|
|
||||||
let s = jsonStr;
|
|
||||||
|
|
||||||
// Fix +number (e.g., +93.80 -> 93.80) - JSON doesn't allow + prefix
|
|
||||||
// Handle various whitespace patterns
|
|
||||||
s = s.replace(/"amount"\s*:\s*\+/g, '"amount": ');
|
|
||||||
s = s.replace(/:\s*\+(\d)/g, ': $1');
|
|
||||||
|
|
||||||
// Fix European number format with thousands separator (e.g., 1.000.00 -> 1000.00)
|
|
||||||
// Pattern: "amount": X.XXX.XX where X.XXX is thousands and .XX is decimal
|
|
||||||
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3.$4');
|
|
||||||
// Also handle larger numbers like 10.000.00
|
|
||||||
s = s.replace(/"amount"\s*:\s*(-?)(\d{1,3})\.(\d{3})\.(\d{3})\.(\d{2})\b/g, '"amount": $1$2$3$4.$5');
|
|
||||||
|
|
||||||
// Fix trailing commas before ] or }
|
|
||||||
s = s.replace(/,\s*([}\]])/g, '$1');
|
|
||||||
|
|
||||||
// Fix unescaped newlines inside strings (replace with space)
|
|
||||||
s = s.replace(/"([^"\\]*)\n([^"]*)"/g, '"$1 $2"');
|
|
||||||
|
|
||||||
// Fix unescaped tabs inside strings
|
|
||||||
s = s.replace(/"([^"\\]*)\t([^"]*)"/g, '"$1 $2"');
|
|
||||||
|
|
||||||
// Fix unescaped backslashes (but not already escaped ones)
|
|
||||||
s = s.replace(/\\(?!["\\/bfnrtu])/g, '\\\\');
|
|
||||||
|
|
||||||
// Fix common issues with counterparty names containing special chars
|
|
||||||
s = s.replace(/"counterparty":\s*"([^"]*)'([^"]*)"/g, '"counterparty": "$1$2"');
|
|
||||||
|
|
||||||
// Remove control characters except newlines (which we handle above)
|
|
||||||
s = s.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ' ');
|
|
||||||
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse JSON response into transactions
|
|
||||||
*/
|
|
||||||
function parseJsonResponse(response: string, queryId: string): ITransaction[] {
|
|
||||||
console.log(` [${queryId}] Parsing response...`);
|
|
||||||
|
|
||||||
// Try to find JSON in markdown code block
|
|
||||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
||||||
let jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
|
||||||
|
|
||||||
if (codeBlockMatch) {
|
|
||||||
console.log(` [${queryId}] Found JSON in code block`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize JSON (fix +number issue)
|
|
||||||
jsonStr = sanitizeJson(jsonStr);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(jsonStr);
|
|
||||||
if (Array.isArray(parsed)) {
|
|
||||||
const txs = parsed.map(tx => ({
|
|
||||||
date: String(tx.date || ''),
|
|
||||||
counterparty: String(tx.counterparty || tx.description || ''),
|
|
||||||
amount: parseAmount(tx.amount),
|
|
||||||
}));
|
|
||||||
console.log(` [${queryId}] Parsed ${txs.length} transactions (direct)`);
|
|
||||||
return txs;
|
|
||||||
}
|
|
||||||
console.log(` [${queryId}] Parsed JSON is not an array`);
|
|
||||||
} catch (e) {
|
|
||||||
const errMsg = (e as Error).message;
|
|
||||||
console.log(` [${queryId}] Direct parse failed: ${errMsg}`);
|
|
||||||
|
|
||||||
// Log problematic section with context
|
|
||||||
const posMatch = errMsg.match(/position (\d+)/);
|
|
||||||
if (posMatch) {
|
|
||||||
const pos = parseInt(posMatch[1]);
|
|
||||||
const start = Math.max(0, pos - 40);
|
|
||||||
const end = Math.min(jsonStr.length, pos + 40);
|
|
||||||
const context = jsonStr.substring(start, end);
|
|
||||||
const marker = ' '.repeat(pos - start) + '^';
|
|
||||||
console.log(` [${queryId}] Context around error position ${pos}:`);
|
|
||||||
console.log(` [${queryId}] ...${context}...`);
|
|
||||||
console.log(` [${queryId}] ${marker}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find JSON array pattern
|
|
||||||
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
|
|
||||||
if (arrayMatch) {
|
|
||||||
console.log(` [${queryId}] Found array pattern, trying to parse...`);
|
|
||||||
const sanitizedArray = sanitizeJson(arrayMatch[0]);
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(sanitizedArray);
|
|
||||||
if (Array.isArray(parsed)) {
|
|
||||||
const txs = parsed.map(tx => ({
|
|
||||||
date: String(tx.date || ''),
|
|
||||||
counterparty: String(tx.counterparty || tx.description || ''),
|
|
||||||
amount: parseAmount(tx.amount),
|
|
||||||
}));
|
|
||||||
console.log(` [${queryId}] Parsed ${txs.length} transactions (array match)`);
|
|
||||||
return txs;
|
|
||||||
}
|
|
||||||
} catch (e2) {
|
|
||||||
const errMsg2 = (e2 as Error).message;
|
|
||||||
console.log(` [${queryId}] Array parse failed: ${errMsg2}`);
|
|
||||||
const posMatch2 = errMsg2.match(/position (\d+)/);
|
|
||||||
if (posMatch2) {
|
|
||||||
const pos2 = parseInt(posMatch2[1]);
|
|
||||||
console.log(` [${queryId}] Context around error: ...${sanitizedArray.substring(Math.max(0, pos2 - 30), pos2 + 30)}...`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to extract individual objects from the malformed array
|
|
||||||
console.log(` [${queryId}] Attempting object-by-object extraction...`);
|
|
||||||
const extracted = extractTransactionsFromMalformedJson(sanitizedArray, queryId);
|
|
||||||
if (extracted.length > 0) {
|
|
||||||
console.log(` [${queryId}] Recovered ${extracted.length} transactions via object extraction`);
|
|
||||||
return extracted;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.log(` [${queryId}] No array pattern found in response`);
|
|
||||||
console.log(` [${queryId}] Raw response preview: ${response.substring(0, 200)}...`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(` [${queryId}] PARSE FAILED - returning empty array`);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract transactions from malformed JSON by parsing objects individually
|
|
||||||
*/
|
|
||||||
function extractTransactionsFromMalformedJson(jsonStr: string, queryId: string): ITransaction[] {
|
|
||||||
const transactions: ITransaction[] = [];
|
|
||||||
|
|
||||||
// Match individual transaction objects
|
|
||||||
const objectPattern = /\{\s*"date"\s*:\s*"([^"]+)"\s*,\s*"counterparty"\s*:\s*"([^"]+)"\s*,\s*"amount"\s*:\s*([+-]?\d+\.?\d*)\s*\}/g;
|
|
||||||
let match;
|
|
||||||
|
|
||||||
while ((match = objectPattern.exec(jsonStr)) !== null) {
|
|
||||||
transactions.push({
|
|
||||||
date: match[1],
|
|
||||||
counterparty: match[2],
|
|
||||||
amount: parseFloat(match[3]),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also try with different field orders (amount before counterparty, etc.)
|
|
||||||
if (transactions.length === 0) {
|
|
||||||
const altPattern = /\{\s*"date"\s*:\s*"([^"]+)"[^}]*"amount"\s*:\s*([+-]?\d+\.?\d*)[^}]*\}/g;
|
|
||||||
while ((match = altPattern.exec(jsonStr)) !== null) {
|
|
||||||
// Try to extract counterparty from the match
|
|
||||||
const counterpartyMatch = match[0].match(/"counterparty"\s*:\s*"([^"]+)"/);
|
|
||||||
const descMatch = match[0].match(/"description"\s*:\s*"([^"]+)"/);
|
|
||||||
transactions.push({
|
|
||||||
date: match[1],
|
|
||||||
counterparty: counterpartyMatch?.[1] || descMatch?.[1] || 'UNKNOWN',
|
|
||||||
amount: parseFloat(match[2]),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return transactions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse amount from various formats
|
* Parse amount from various formats
|
||||||
@@ -284,102 +100,101 @@ function parseAmount(value: unknown): number {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare two transaction arrays for consensus
|
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||||
*/
|
*/
|
||||||
function transactionArraysMatch(a: ITransaction[], b: ITransaction[]): boolean {
|
function extractJsonFromResponse(response: string): unknown[] | null {
|
||||||
if (a.length !== b.length) return false;
|
// Try to find JSON in task_complete tags
|
||||||
|
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
for (let i = 0; i < a.length; i++) {
|
if (completeMatch) {
|
||||||
const dateMatch = a[i].date === b[i].date;
|
const content = completeMatch[1].trim();
|
||||||
const amountMatch = Math.abs(a[i].amount - b[i].amount) < 0.01;
|
// Try to find JSON in the content
|
||||||
if (!dateMatch || !amountMatch) return false;
|
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON array pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonMatch[0]);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
// Try to find JSON in markdown code block
|
||||||
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON array pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonMatch[0]);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare two transaction arrays and log differences
|
* Parse JSON response into transactions
|
||||||
*/
|
*/
|
||||||
function compareAndLogDifferences(txs1: ITransaction[], txs2: ITransaction[], pageNum: number): void {
|
function parseJsonToTransactions(response: string): ITransaction[] {
|
||||||
if (txs1.length !== txs2.length) {
|
const parsed = extractJsonFromResponse(response);
|
||||||
console.log(` [Page ${pageNum}] Length mismatch: Q1=${txs1.length}, Q2=${txs2.length}`);
|
if (!parsed || !Array.isArray(parsed)) return [];
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (let i = 0; i < txs1.length; i++) {
|
return parsed.map((tx: any) => ({
|
||||||
const dateMatch = txs1[i].date === txs2[i].date;
|
date: String(tx.date || ''),
|
||||||
const amountMatch = Math.abs(txs1[i].amount - txs2[i].amount) < 0.01;
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
if (!dateMatch || !amountMatch) {
|
}));
|
||||||
console.log(` [Page ${pageNum}] Tx ${i + 1} differs:`);
|
|
||||||
console.log(` Q1: ${txs1[i].date} | ${txs1[i].amount}`);
|
|
||||||
console.log(` Q2: ${txs2[i].date} | ${txs2[i].amount}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract transactions from a single page with consensus
|
* Extract transactions from a single page using smartagent orchestrator
|
||||||
*/
|
*/
|
||||||
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
async function extractTransactionsFromPage(image: string, pageNum: number): Promise<ITransaction[]> {
|
||||||
const MAX_ATTEMPTS = 5;
|
|
||||||
console.log(`\n ======== Page ${pageNum} ========`);
|
console.log(`\n ======== Page ${pageNum} ========`);
|
||||||
console.log(` [Page ${pageNum}] Starting JSON extraction...`);
|
|
||||||
|
|
||||||
for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
|
const startTime = Date.now();
|
||||||
console.log(`\n [Page ${pageNum}] --- Attempt ${attempt}/${MAX_ATTEMPTS} ---`);
|
|
||||||
|
|
||||||
// Extract twice in parallel
|
const result = await orchestrator.run(EXTRACTION_PROMPT, { images: [image] });
|
||||||
const q1Id = `P${pageNum}A${attempt}Q1`;
|
|
||||||
const q2Id = `P${pageNum}A${attempt}Q2`;
|
|
||||||
|
|
||||||
const [response1, response2] = await Promise.all([
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
queryJson(image, q1Id),
|
console.log(`\n [Page ${pageNum}] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||||
queryJson(image, q2Id),
|
|
||||||
]);
|
|
||||||
|
|
||||||
const txs1 = parseJsonResponse(response1, q1Id);
|
const transactions = parseJsonToTransactions(result.result);
|
||||||
const txs2 = parseJsonResponse(response2, q2Id);
|
|
||||||
|
|
||||||
console.log(` [Page ${pageNum}] Results: Q1=${txs1.length} txs, Q2=${txs2.length} txs`);
|
console.log(` [Page ${pageNum}] Extracted ${transactions.length} transactions:`);
|
||||||
|
for (let i = 0; i < Math.min(transactions.length, 10); i++) {
|
||||||
if (txs1.length > 0 && transactionArraysMatch(txs1, txs2)) {
|
const tx = transactions[i];
|
||||||
console.log(` [Page ${pageNum}] ✓ CONSENSUS REACHED: ${txs1.length} transactions`);
|
|
||||||
console.log(` [Page ${pageNum}] Transactions:`);
|
|
||||||
for (let i = 0; i < txs1.length; i++) {
|
|
||||||
const tx = txs1[i];
|
|
||||||
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
||||||
}
|
}
|
||||||
return txs1;
|
if (transactions.length > 10) {
|
||||||
|
console.log(` ... and ${transactions.length - 10} more transactions`);
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(` [Page ${pageNum}] ✗ NO CONSENSUS`);
|
return transactions;
|
||||||
compareAndLogDifferences(txs1, txs2, pageNum);
|
|
||||||
|
|
||||||
if (attempt < MAX_ATTEMPTS) {
|
|
||||||
console.log(` [Page ${pageNum}] Retrying...`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: use last response
|
|
||||||
console.log(`\n [Page ${pageNum}] === FALLBACK (no consensus after ${MAX_ATTEMPTS} attempts) ===`);
|
|
||||||
const fallbackId = `P${pageNum}FALLBACK`;
|
|
||||||
const fallbackResponse = await queryJson(image, fallbackId);
|
|
||||||
const fallback = parseJsonResponse(fallbackResponse, fallbackId);
|
|
||||||
console.log(` [Page ${pageNum}] ~ FALLBACK RESULT: ${fallback.length} transactions`);
|
|
||||||
for (let i = 0; i < fallback.length; i++) {
|
|
||||||
const tx = fallback[i];
|
|
||||||
console.log(` ${(i + 1).toString().padStart(2)}. ${tx.date} | ${tx.counterparty.substring(0, 30).padEnd(30)} | ${tx.amount >= 0 ? '+' : ''}${tx.amount.toFixed(2)}`);
|
|
||||||
}
|
|
||||||
return fallback;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract all transactions from bank statement
|
* Extract all transactions from bank statement
|
||||||
*/
|
*/
|
||||||
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
async function extractTransactions(images: string[]): Promise<ITransaction[]> {
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (JSON consensus)`);
|
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||||
|
|
||||||
const allTransactions: ITransaction[] = [];
|
const allTransactions: ITransaction[] = [];
|
||||||
|
|
||||||
@@ -474,6 +289,80 @@ tap.test('setup: ensure Docker containers are running', async () => {
|
|||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||||
|
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||||
|
|
||||||
|
smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768,
|
||||||
|
num_predict: 4000,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `You are a Guardian agent overseeing bank statement extraction tasks.
|
||||||
|
|
||||||
|
APPROVE all tool calls that:
|
||||||
|
- Use the json.validate action to verify JSON output
|
||||||
|
- Are reasonable attempts to complete the extraction task
|
||||||
|
|
||||||
|
REJECT tool calls that:
|
||||||
|
- Attempt to access external resources
|
||||||
|
- Try to execute arbitrary code
|
||||||
|
- Are clearly unrelated to bank statement extraction`,
|
||||||
|
driverSystemMessage: `You are an AI assistant that extracts bank transactions from statement images.
|
||||||
|
|
||||||
|
Your task is to analyze bank statement images and extract transaction data.
|
||||||
|
You have access to a json.validate tool to verify your JSON output.
|
||||||
|
|
||||||
|
IMPORTANT: Always validate your JSON before completing the task.
|
||||||
|
|
||||||
|
## Tool Usage Format
|
||||||
|
When you need to validate JSON, output:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
## Completion Format
|
||||||
|
After validation passes, complete the task:
|
||||||
|
|
||||||
|
<task_complete>
|
||||||
|
[{"date": "YYYY-MM-DD", "counterparty": "...", "amount": -123.45}, ...]
|
||||||
|
</task_complete>`,
|
||||||
|
maxIterations: 5,
|
||||||
|
maxConsecutiveRejections: 3,
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onProgress: (event) => {
|
||||||
|
if (event.logLevel === 'error') {
|
||||||
|
console.error(event.logMessage);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register the JsonValidatorTool
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log('[Setup] Orchestrator initialized!\n');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
@@ -482,7 +371,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} bank statement test cases (MiniCPM-V)\n`);
|
console.log(`\nFound ${testCases.length} bank statement test cases (smartagent + MiniCPM-V)\n`);
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
let failedCount = 0;
|
let failedCount = 0;
|
||||||
@@ -514,7 +403,10 @@ for (const testCase of testCases) {
|
|||||||
// Log counterparty variations (names that differ but date/amount matched)
|
// Log counterparty variations (names that differ but date/amount matched)
|
||||||
if (result.variations.length > 0) {
|
if (result.variations.length > 0) {
|
||||||
console.log(` Counterparty variations (${result.variations.length}):`);
|
console.log(` Counterparty variations (${result.variations.length}):`);
|
||||||
result.variations.forEach((v) => console.log(` ${v}`));
|
result.variations.slice(0, 5).forEach((v) => console.log(` ${v}`));
|
||||||
|
if (result.variations.length > 5) {
|
||||||
|
console.log(` ... and ${result.variations.length - 5} more variations`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
expect(result.matches).toEqual(result.total);
|
expect(result.matches).toEqual(result.total);
|
||||||
@@ -522,12 +414,20 @@ for (const testCase of testCases) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tap.test('cleanup: stop orchestrator', async () => {
|
||||||
|
if (orchestrator) {
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log('[Cleanup] Orchestrator stopped');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
tap.test('summary', async () => {
|
||||||
const total = testCases.length;
|
const total = testCases.length;
|
||||||
console.log(`\n======================================================`);
|
console.log(`\n======================================================`);
|
||||||
console.log(` Bank Statement Summary (${MODEL})`);
|
console.log(` Bank Statement Summary`);
|
||||||
|
console.log(` (smartagent + ${MODEL})`);
|
||||||
console.log(`======================================================`);
|
console.log(`======================================================`);
|
||||||
console.log(` Method: JSON per-page + consensus`);
|
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||||
console.log(` Passed: ${passedCount}/${total}`);
|
console.log(` Passed: ${passedCount}/${total}`);
|
||||||
console.log(` Failed: ${failedCount}/${total}`);
|
console.log(` Failed: ${failedCount}/${total}`);
|
||||||
console.log(`======================================================\n`);
|
console.log(`======================================================\n`);
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ import * as fs from 'fs';
|
|||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureNanonetsOcr, ensureMiniCpm, removeContainer, isContainerRunning } from './helpers/docker.js';
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||||
@@ -22,6 +24,22 @@ const EXTRACTION_MODEL = 'gpt-oss:20b';
|
|||||||
// Temp directory for storing markdown between stages
|
// Temp directory for storing markdown between stages
|
||||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-markdown');
|
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-markdown');
|
||||||
|
|
||||||
|
// SmartAi instance for Ollama with optimized settings
|
||||||
|
const smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768, // Larger context for long statements + thinking
|
||||||
|
temperature: 0, // Deterministic for JSON extraction
|
||||||
|
},
|
||||||
|
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// DualAgentOrchestrator for structured task execution
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
interface ITransaction {
|
interface ITransaction {
|
||||||
date: string;
|
date: string;
|
||||||
counterparty: string;
|
counterparty: string;
|
||||||
@@ -252,95 +270,107 @@ async function ensureExtractionModel(): Promise<boolean> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract transactions from markdown using GPT-OSS 20B (streaming)
|
* Try to extract valid JSON from a response string
|
||||||
|
*/
|
||||||
|
function tryExtractJson(response: string): unknown[] | null {
|
||||||
|
// Remove thinking tags
|
||||||
|
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
|
|
||||||
|
// Try task_complete tags first
|
||||||
|
const completeMatch = clean.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
|
if (completeMatch) {
|
||||||
|
clean = completeMatch[1].trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try code block
|
||||||
|
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(jsonStr);
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON array
|
||||||
|
const jsonMatch = jsonStr.match(/\[[\s\S]*\]/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(sanitizeJson(jsonMatch[0]));
|
||||||
|
if (Array.isArray(parsed)) return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract transactions from markdown using smartagent DualAgentOrchestrator
|
||||||
|
* Validates JSON and retries if invalid
|
||||||
*/
|
*/
|
||||||
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
|
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
|
|
||||||
console.log(` [${queryId}] Statement: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
|
console.log(` [${queryId}] Statement: ${markdown.length} chars`);
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
// Build the extraction task with document context
|
||||||
method: 'POST',
|
const taskPrompt = `Extract all transactions from this bank statement document and output ONLY the JSON array:
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: EXTRACTION_MODEL,
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Hi there, how are you?' },
|
|
||||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
|
||||||
{ role: 'user', content: `Here is a bank statement document:\n\n${markdown}` },
|
|
||||||
{ role: 'assistant', content: 'I have read the bank statement document you provided. I can see all the transaction data. What would you like me to do with it?' },
|
|
||||||
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
|
|
||||||
],
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_ctx: 32768, // Larger context for long statements + thinking
|
|
||||||
temperature: 0, // Deterministic for JSON extraction
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
signal: AbortSignal.timeout(600000), // 10 minute timeout
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
${markdown}
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
||||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream the response
|
${JSON_EXTRACTION_PROMPT}
|
||||||
let content = '';
|
|
||||||
let thinkingContent = '';
|
Before completing, validate your JSON using the json.validate tool:
|
||||||
let thinkingStarted = false;
|
|
||||||
let outputStarted = false;
|
<tool_call>
|
||||||
const reader = response.body!.getReader();
|
<tool>json</tool>
|
||||||
const decoder = new TextDecoder();
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY_HERE"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
Only complete after validation passes. Output the final JSON array in <task_complete></task_complete> tags.`;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
while (true) {
|
const result = await orchestrator.run(taskPrompt);
|
||||||
const { done, value } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
|
||||||
|
|
||||||
// Each line is a JSON object
|
|
||||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
|
|
||||||
// Stream thinking tokens
|
|
||||||
const thinking = json.message?.thinking || '';
|
|
||||||
if (thinking) {
|
|
||||||
if (!thinkingStarted) {
|
|
||||||
process.stdout.write(` [${queryId}] THINKING: `);
|
|
||||||
thinkingStarted = true;
|
|
||||||
}
|
|
||||||
process.stdout.write(thinking);
|
|
||||||
thinkingContent += thinking;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream content tokens
|
|
||||||
const token = json.message?.content || '';
|
|
||||||
if (token) {
|
|
||||||
if (!outputStarted) {
|
|
||||||
if (thinkingStarted) process.stdout.write('\n');
|
|
||||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
|
||||||
outputStarted = true;
|
|
||||||
}
|
|
||||||
process.stdout.write(token);
|
|
||||||
content += token;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Ignore parse errors for partial chunks
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||||
|
|
||||||
return parseJsonResponse(content, queryId);
|
// Try to parse JSON from result
|
||||||
|
let jsonData: unknown[] | null = null;
|
||||||
|
let responseText = result.result || '';
|
||||||
|
|
||||||
|
if (result.success && responseText) {
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: try parsing from history
|
||||||
|
if (!jsonData && result.history?.length > 0) {
|
||||||
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
|
if (lastMessage?.content) {
|
||||||
|
responseText = lastMessage.content;
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData) {
|
||||||
|
console.log(` [${queryId}] Failed to parse JSON`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to transactions
|
||||||
|
const txs = jsonData.map((tx: any) => ({
|
||||||
|
date: String(tx.date || ''),
|
||||||
|
counterparty: String(tx.counterparty || tx.description || ''),
|
||||||
|
amount: parseAmount(tx.amount),
|
||||||
|
}));
|
||||||
|
console.log(` [${queryId}] Parsed ${txs.length} transactions`);
|
||||||
|
return txs;
|
||||||
|
} catch (error) {
|
||||||
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -591,6 +621,53 @@ tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
|||||||
|
|
||||||
const extractionOk = await ensureExtractionModel();
|
const extractionOk = await ensureExtractionModel();
|
||||||
expect(extractionOk).toBeTrue();
|
expect(extractionOk).toBeTrue();
|
||||||
|
|
||||||
|
// Initialize SmartAi and DualAgentOrchestrator
|
||||||
|
console.log(' [SmartAgent] Starting SmartAi...');
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `
|
||||||
|
JSON EXTRACTION POLICY:
|
||||||
|
- APPROVE all JSON extraction tasks
|
||||||
|
- APPROVE all json.validate tool calls
|
||||||
|
- This is a read-only operation - no file system or network access needed
|
||||||
|
- The task is to extract structured transaction data from document text
|
||||||
|
`,
|
||||||
|
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract transaction data from bank statements.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
1. Output valid JSON array with the exact format requested
|
||||||
|
2. Amounts should be NEGATIVE for debits/withdrawals, POSITIVE for credits/deposits
|
||||||
|
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_ARRAY"}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
4. Only complete after validation passes
|
||||||
|
|
||||||
|
When done, wrap your JSON array in <task_complete></task_complete> tags.`,
|
||||||
|
maxIterations: 5,
|
||||||
|
// Enable streaming for real-time progress visibility
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register JsonValidatorTool for self-validation
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Starting orchestrator...');
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log(' [SmartAgent] Ready for extraction');
|
||||||
});
|
});
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
@@ -642,11 +719,19 @@ for (const tc of testCases) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tap.test('Summary', async () => {
|
tap.test('Summary', async () => {
|
||||||
|
// Cleanup orchestrator and SmartAi
|
||||||
|
if (orchestrator) {
|
||||||
|
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||||
|
await smartAi.stop();
|
||||||
|
|
||||||
console.log(`\n======================================================`);
|
console.log(`\n======================================================`);
|
||||||
console.log(` Bank Statement Summary (Nanonets + GPT-OSS 20B Sequential)`);
|
console.log(` Bank Statement Summary (Nanonets + SmartAgent)`);
|
||||||
console.log(`======================================================`);
|
console.log(`======================================================`);
|
||||||
console.log(` Stage 1: Nanonets-OCR-s (document -> markdown)`);
|
console.log(` Stage 1: Nanonets-OCR-s (document -> markdown)`);
|
||||||
console.log(` Stage 2: GPT-OSS 20B (markdown -> JSON)`);
|
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (markdown -> JSON)`);
|
||||||
console.log(` Passed: ${passedCount}/${testCases.length}`);
|
console.log(` Passed: ${passedCount}/${testCases.length}`);
|
||||||
console.log(` Failed: ${failedCount}/${testCases.length}`);
|
console.log(` Failed: ${failedCount}/${testCases.length}`);
|
||||||
console.log(`======================================================\n`);
|
console.log(`======================================================\n`);
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* Invoice extraction test using MiniCPM-V (visual extraction)
|
* Invoice extraction test using MiniCPM-V via smartagent DualAgentOrchestrator
|
||||||
*
|
*
|
||||||
* Consensus approach:
|
* Uses vision-capable orchestrator with JsonValidatorTool for self-validation:
|
||||||
* 1. Pass 1: Fast JSON extraction
|
* 1. Pass images to the orchestrator
|
||||||
* 2. Pass 2: Confirm with thinking enabled
|
* 2. Driver extracts invoice data and validates JSON before completing
|
||||||
* 3. If mismatch: repeat until consensus or max attempts
|
* 3. If validation fails, driver retries within the same task
|
||||||
*/
|
*/
|
||||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
@@ -12,6 +12,8 @@ import * as path from 'path';
|
|||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureMiniCpm } from './helpers/docker.js';
|
import { ensureMiniCpm } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
const MODEL = 'openbmb/minicpm-v4.5:q8_0';
|
||||||
@@ -26,6 +28,10 @@ interface IInvoice {
|
|||||||
total_amount: number;
|
total_amount: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SmartAi instance and orchestrator (initialized in setup)
|
||||||
|
let smartAi: SmartAi;
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert PDF to PNG images using ImageMagick
|
* Convert PDF to PNG images using ImageMagick
|
||||||
*/
|
*/
|
||||||
@@ -54,7 +60,9 @@ function convertPdfToImages(pdfPath: string): string[] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON object with these exact fields:
|
const EXTRACTION_PROMPT = `Extract invoice data from the provided image(s).
|
||||||
|
|
||||||
|
IMPORTANT: You must output a valid JSON object with these exact fields:
|
||||||
{
|
{
|
||||||
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
"invoice_number": "the invoice number (not VAT ID, not customer ID)",
|
||||||
"invoice_date": "YYYY-MM-DD format",
|
"invoice_date": "YYYY-MM-DD format",
|
||||||
@@ -64,67 +72,16 @@ const JSON_PROMPT = `Extract invoice data from this image. Return ONLY a JSON ob
|
|||||||
"vat_amount": 0.00,
|
"vat_amount": 0.00,
|
||||||
"total_amount": 0.00
|
"total_amount": 0.00
|
||||||
}
|
}
|
||||||
Return only the JSON, no explanation.`;
|
|
||||||
|
|
||||||
/**
|
Before completing, use the json.validate tool to verify your output is valid JSON with all required fields.
|
||||||
* Query MiniCPM-V for JSON output (fast, no thinking)
|
|
||||||
*/
|
|
||||||
async function queryJsonFast(images: string[]): Promise<string> {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: MODEL,
|
|
||||||
messages: [{
|
|
||||||
role: 'user',
|
|
||||||
content: JSON_PROMPT,
|
|
||||||
images: images,
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 1000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
<tool_call>
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
<tool>json</tool>
|
||||||
}
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON_HERE", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
const data = await response.json();
|
Only complete the task after validation passes. Output the final JSON in <task_complete> tags.`;
|
||||||
return (data.message?.content || '').trim();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Query MiniCPM-V for JSON output with thinking enabled (slower, more accurate)
|
|
||||||
*/
|
|
||||||
async function queryJsonWithThinking(images: string[]): Promise<string> {
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: MODEL,
|
|
||||||
messages: [{
|
|
||||||
role: 'user',
|
|
||||||
content: `Think carefully about this invoice image, then ${JSON_PROMPT}`,
|
|
||||||
images: images,
|
|
||||||
}],
|
|
||||||
stream: false,
|
|
||||||
options: {
|
|
||||||
num_predict: 2000,
|
|
||||||
temperature: 0.1,
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
return (data.message?.content || '').trim();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse amount from string (handles European format)
|
* Parse amount from string (handles European format)
|
||||||
@@ -190,9 +147,31 @@ function extractCurrency(s: string | undefined): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract JSON from response (handles markdown code blocks)
|
* Extract JSON from response (handles markdown code blocks and task_complete tags)
|
||||||
*/
|
*/
|
||||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
||||||
|
// Try to find JSON in task_complete tags
|
||||||
|
const completeMatch = response.match(/<task_complete>([\s\S]*?)<\/task_complete>/);
|
||||||
|
if (completeMatch) {
|
||||||
|
const content = completeMatch[1].trim();
|
||||||
|
// Try to find JSON in the content
|
||||||
|
const codeBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : content;
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonStr);
|
||||||
|
} catch {
|
||||||
|
// Try to find JSON object pattern
|
||||||
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
try {
|
||||||
|
return JSON.parse(jsonMatch[0]);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Try to find JSON in markdown code block
|
// Try to find JSON in markdown code block
|
||||||
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
const codeBlockMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : response.trim();
|
||||||
@@ -232,76 +211,27 @@ function parseJsonToInvoice(response: string): IInvoice | null {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare two invoices for consensus (key fields must match)
|
* Extract invoice data using smartagent orchestrator with vision
|
||||||
*/
|
|
||||||
function invoicesMatch(a: IInvoice, b: IInvoice): boolean {
|
|
||||||
const numMatch = a.invoice_number.toLowerCase() === b.invoice_number.toLowerCase();
|
|
||||||
const dateMatch = a.invoice_date === b.invoice_date;
|
|
||||||
const totalMatch = Math.abs(a.total_amount - b.total_amount) < 0.02;
|
|
||||||
return numMatch && dateMatch && totalMatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract invoice data using consensus approach:
|
|
||||||
* 1. Pass 1: Fast JSON extraction
|
|
||||||
* 2. Pass 2: Confirm with thinking enabled
|
|
||||||
* 3. If mismatch: repeat until consensus or max 5 attempts
|
|
||||||
*/
|
*/
|
||||||
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
async function extractInvoiceFromImages(images: string[]): Promise<IInvoice> {
|
||||||
console.log(` [Vision] Processing ${images.length} page(s) with ${MODEL} (consensus)`);
|
console.log(` [Vision] Processing ${images.length} page(s) with smartagent DualAgentOrchestrator`);
|
||||||
|
|
||||||
const MAX_ATTEMPTS = 5;
|
const startTime = Date.now();
|
||||||
let attempt = 0;
|
|
||||||
|
|
||||||
while (attempt < MAX_ATTEMPTS) {
|
const result = await orchestrator.run(EXTRACTION_PROMPT, { images });
|
||||||
attempt++;
|
|
||||||
console.log(` [Attempt ${attempt}/${MAX_ATTEMPTS}]`);
|
|
||||||
|
|
||||||
// PASS 1: Fast JSON extraction
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
console.log(` [Pass 1] Fast extraction...`);
|
console.log(` [Vision] Completed in ${elapsed}s (${result.iterations} iterations, status: ${result.status})`);
|
||||||
const fastResponse = await queryJsonFast(images);
|
|
||||||
const fastInvoice = parseJsonToInvoice(fastResponse);
|
|
||||||
|
|
||||||
if (!fastInvoice) {
|
const invoice = parseJsonToInvoice(result.result);
|
||||||
console.log(` [Pass 1] JSON parsing failed, retrying...`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
console.log(` [Pass 1] Result: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount} ${fastInvoice.currency}`);
|
|
||||||
|
|
||||||
// PASS 2: Confirm with thinking
|
if (invoice) {
|
||||||
console.log(` [Pass 2] Thinking confirmation...`);
|
console.log(` [Result] ${invoice.invoice_number} | ${invoice.invoice_date} | ${invoice.total_amount} ${invoice.currency}`);
|
||||||
const thinkResponse = await queryJsonWithThinking(images);
|
return invoice;
|
||||||
const thinkInvoice = parseJsonToInvoice(thinkResponse);
|
|
||||||
|
|
||||||
if (!thinkInvoice) {
|
|
||||||
console.log(` [Pass 2] JSON parsing failed, retrying...`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
console.log(` [Pass 2] Result: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount} ${thinkInvoice.currency}`);
|
|
||||||
|
|
||||||
// Check consensus
|
|
||||||
if (invoicesMatch(fastInvoice, thinkInvoice)) {
|
|
||||||
console.log(` [Consensus] MATCH - using result`);
|
|
||||||
return thinkInvoice; // Prefer thinking result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(` [Consensus] MISMATCH - repeating...`);
|
// Return empty invoice if parsing failed
|
||||||
console.log(` Fast: ${fastInvoice.invoice_number} | ${fastInvoice.invoice_date} | ${fastInvoice.total_amount}`);
|
console.log(` [Result] Parsing failed, returning empty invoice`);
|
||||||
console.log(` Think: ${thinkInvoice.invoice_number} | ${thinkInvoice.invoice_date} | ${thinkInvoice.total_amount}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max attempts reached - do one final thinking pass and use that
|
|
||||||
console.log(` [Final] Max attempts reached, using final thinking pass`);
|
|
||||||
const finalResponse = await queryJsonWithThinking(images);
|
|
||||||
const finalInvoice = parseJsonToInvoice(finalResponse);
|
|
||||||
|
|
||||||
if (finalInvoice) {
|
|
||||||
console.log(` [Final] Result: ${finalInvoice.invoice_number} | ${finalInvoice.invoice_date} | ${finalInvoice.total_amount} ${finalInvoice.currency}`);
|
|
||||||
return finalInvoice;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return empty invoice if all else fails
|
|
||||||
console.log(` [Final] All parsing failed, returning empty`);
|
|
||||||
return {
|
return {
|
||||||
invoice_number: '',
|
invoice_number: '',
|
||||||
invoice_date: '',
|
invoice_date: '',
|
||||||
@@ -410,6 +340,79 @@ tap.test('setup: ensure Docker containers are running', async () => {
|
|||||||
console.log('\n[Setup] All containers ready!\n');
|
console.log('\n[Setup] All containers ready!\n');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
tap.test('setup: initialize smartagent orchestrator', async () => {
|
||||||
|
console.log('[Setup] Initializing SmartAi and DualAgentOrchestrator...');
|
||||||
|
|
||||||
|
smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768,
|
||||||
|
temperature: 0.1,
|
||||||
|
},
|
||||||
|
defaultTimeout: 300000, // 5 minutes for vision tasks
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `You are a Guardian agent overseeing invoice extraction tasks.
|
||||||
|
|
||||||
|
APPROVE all tool calls that:
|
||||||
|
- Use the json.validate action to verify JSON output
|
||||||
|
- Are reasonable attempts to complete the extraction task
|
||||||
|
|
||||||
|
REJECT tool calls that:
|
||||||
|
- Attempt to access external resources
|
||||||
|
- Try to execute arbitrary code
|
||||||
|
- Are clearly unrelated to invoice extraction`,
|
||||||
|
driverSystemMessage: `You are an AI assistant that extracts invoice data from images.
|
||||||
|
|
||||||
|
Your task is to analyze invoice images and extract structured data.
|
||||||
|
You have access to a json.validate tool to verify your JSON output.
|
||||||
|
|
||||||
|
IMPORTANT: Always validate your JSON before completing the task.
|
||||||
|
|
||||||
|
## Tool Usage Format
|
||||||
|
When you need to validate JSON, output:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
## Completion Format
|
||||||
|
After validation passes, complete the task:
|
||||||
|
|
||||||
|
<task_complete>
|
||||||
|
{"invoice_number": "...", "invoice_date": "YYYY-MM-DD", ...}
|
||||||
|
</task_complete>`,
|
||||||
|
maxIterations: 5,
|
||||||
|
maxConsecutiveRejections: 3,
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onProgress: (event) => {
|
||||||
|
if (event.logLevel === 'error') {
|
||||||
|
console.error(event.logMessage);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register the JsonValidatorTool
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log('[Setup] Orchestrator initialized!\n');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('should have MiniCPM-V model loaded', async () => {
|
tap.test('should have MiniCPM-V model loaded', async () => {
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
@@ -418,7 +421,7 @@ tap.test('should have MiniCPM-V model loaded', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases (MiniCPM-V)\n`);
|
console.log(`\nFound ${testCases.length} invoice test cases (smartagent + MiniCPM-V)\n`);
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
let failedCount = 0;
|
let failedCount = 0;
|
||||||
@@ -455,6 +458,13 @@ for (const testCase of testCases) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tap.test('cleanup: stop orchestrator', async () => {
|
||||||
|
if (orchestrator) {
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log('[Cleanup] Orchestrator stopped');
|
||||||
|
});
|
||||||
|
|
||||||
tap.test('summary', async () => {
|
tap.test('summary', async () => {
|
||||||
const totalInvoices = testCases.length;
|
const totalInvoices = testCases.length;
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
@@ -462,9 +472,10 @@ tap.test('summary', async () => {
|
|||||||
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
const avgTimeSec = processingTimes.length > 0 ? totalTimeMs / processingTimes.length / 1000 : 0;
|
||||||
|
|
||||||
console.log(`\n========================================`);
|
console.log(`\n========================================`);
|
||||||
console.log(` Invoice Extraction Summary (${MODEL})`);
|
console.log(` Invoice Extraction Summary`);
|
||||||
|
console.log(` (smartagent + ${MODEL})`);
|
||||||
console.log(`========================================`);
|
console.log(`========================================`);
|
||||||
console.log(` Method: Consensus (fast + thinking)`);
|
console.log(` Method: DualAgentOrchestrator with vision`);
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ import * as path from 'path';
|
|||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
import { DualAgentOrchestrator, JsonValidatorTool } from '@push.rocks/smartagent';
|
||||||
|
|
||||||
const NANONETS_URL = 'http://localhost:8000/v1';
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
||||||
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
||||||
@@ -19,8 +21,24 @@ const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
|||||||
const OLLAMA_URL = 'http://localhost:11434';
|
const OLLAMA_URL = 'http://localhost:11434';
|
||||||
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
||||||
|
|
||||||
// Temp directory for storing markdown between stages
|
// Persistent cache directory for storing markdown between runs
|
||||||
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-markdown');
|
const MD_CACHE_DIR = path.join(process.cwd(), '.nogit/invoices-md');
|
||||||
|
|
||||||
|
// SmartAi instance for Ollama with optimized settings
|
||||||
|
const smartAi = new SmartAi({
|
||||||
|
ollama: {
|
||||||
|
baseUrl: OLLAMA_URL,
|
||||||
|
model: EXTRACTION_MODEL,
|
||||||
|
defaultOptions: {
|
||||||
|
num_ctx: 32768, // Larger context for long invoices + thinking
|
||||||
|
temperature: 0, // Deterministic for JSON extraction
|
||||||
|
},
|
||||||
|
defaultTimeout: 600000, // 10 minute timeout for large documents
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// DualAgentOrchestrator for structured task execution
|
||||||
|
let orchestrator: DualAgentOrchestrator;
|
||||||
|
|
||||||
interface IInvoice {
|
interface IInvoice {
|
||||||
invoice_number: string;
|
invoice_number: string;
|
||||||
@@ -58,11 +76,11 @@ Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
|||||||
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
const JSON_EXTRACTION_PROMPT = `Extract key fields from the invoice. Return ONLY valid JSON.
|
||||||
|
|
||||||
WHERE TO FIND DATA:
|
WHERE TO FIND DATA:
|
||||||
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer")
|
- invoice_number, invoice_date, vendor_name: Look in the HEADER section at the TOP of PAGE 1 (near "Invoice no.", "Invoice date:", "Rechnungsnummer"). Use common sense. Btw. an invoice number might start on INV* .
|
||||||
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
- net_amount, vat_amount, total_amount: Look in the SUMMARY section at the BOTTOM (look for "Total", "Amount due", "Gesamtbetrag")
|
||||||
|
|
||||||
RULES:
|
RULES:
|
||||||
1. invoice_number: Extract ONLY the value (e.g., "R0015632540"), NOT the label "Invoice no."
|
1. Use common sense.
|
||||||
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
2. invoice_date: Convert to YYYY-MM-DD format (e.g., "14/04/2022" → "2022-04-14")
|
||||||
3. vendor_name: The company issuing the invoice
|
3. vendor_name: The company issuing the invoice
|
||||||
4. currency: EUR, USD, or GBP
|
4. currency: EUR, USD, or GBP
|
||||||
@@ -71,10 +89,13 @@ RULES:
|
|||||||
7. total_amount: Final total with tax
|
7. total_amount: Final total with tax
|
||||||
|
|
||||||
JSON only:
|
JSON only:
|
||||||
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`;
|
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}
|
||||||
|
|
||||||
|
Double check for valid JSON syntax.
|
||||||
|
|
||||||
|
`;
|
||||||
|
|
||||||
// Constants for smart batching
|
// Constants for smart batching
|
||||||
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
|
|
||||||
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -318,16 +339,20 @@ function extractCurrency(s: string | undefined): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract JSON from response
|
* Try to extract valid JSON from a response string
|
||||||
*/
|
*/
|
||||||
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
function tryExtractJson(response: string): Record<string, unknown> | null {
|
||||||
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
// Remove thinking tags
|
||||||
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
let clean = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
||||||
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
|
||||||
|
// Try code block
|
||||||
|
const codeBlockMatch = clean.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||||
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : clean;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
return JSON.parse(jsonStr);
|
return JSON.parse(jsonStr);
|
||||||
} catch {
|
} catch {
|
||||||
|
// Try to find JSON object
|
||||||
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
||||||
if (jsonMatch) {
|
if (jsonMatch) {
|
||||||
try {
|
try {
|
||||||
@@ -341,113 +366,92 @@ function extractJsonFromResponse(response: string): Record<string, unknown> | nu
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse JSON response into IInvoice
|
* Extract invoice from markdown using smartagent DualAgentOrchestrator
|
||||||
*/
|
* Validates JSON and retries if invalid
|
||||||
function parseJsonToInvoice(response: string): IInvoice | null {
|
|
||||||
const parsed = extractJsonFromResponse(response);
|
|
||||||
if (!parsed) return null;
|
|
||||||
|
|
||||||
return {
|
|
||||||
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
|
||||||
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
|
||||||
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
|
||||||
currency: extractCurrency(String(parsed.currency || '')),
|
|
||||||
net_amount: parseAmount(parsed.net_amount as string | number),
|
|
||||||
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
|
||||||
total_amount: parseAmount(parsed.total_amount as string | number),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
|
||||||
*/
|
*/
|
||||||
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
|
const maxRetries = 2;
|
||||||
|
|
||||||
console.log(` [${queryId}] Invoice: ${markdown.length} chars, Prompt: ${JSON_EXTRACTION_PROMPT.length} chars`);
|
console.log(` [${queryId}] Invoice: ${markdown.length} chars`);
|
||||||
|
|
||||||
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
// Build the extraction task with document context
|
||||||
method: 'POST',
|
const taskPrompt = `Extract the invoice data from this document and output ONLY the JSON:
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: EXTRACTION_MODEL,
|
|
||||||
messages: [
|
|
||||||
{ role: 'user', content: 'Hi there, how are you?' },
|
|
||||||
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
|
||||||
{ role: 'user', content: `Here is an invoice document:\n\n${markdown}` },
|
|
||||||
{ role: 'assistant', content: 'I have read the invoice document you provided. I can see all the text content. What would you like me to do with it?' },
|
|
||||||
{ role: 'user', content: JSON_EXTRACTION_PROMPT },
|
|
||||||
],
|
|
||||||
stream: true,
|
|
||||||
options: {
|
|
||||||
num_ctx: 32768, // Larger context for long invoices + thinking
|
|
||||||
temperature: 0, // Deterministic for JSON extraction
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
signal: AbortSignal.timeout(600000), // 10 minute timeout for large documents
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
${markdown}
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
||||||
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
|
||||||
throw new Error(`Ollama API error: ${response.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream the response
|
${JSON_EXTRACTION_PROMPT}`;
|
||||||
let content = '';
|
|
||||||
let thinkingContent = '';
|
|
||||||
let thinkingStarted = false;
|
|
||||||
let outputStarted = false;
|
|
||||||
const reader = response.body!.getReader();
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
while (true) {
|
let result = await orchestrator.run(taskPrompt);
|
||||||
const { done, value } = await reader.read();
|
let elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
if (done) break;
|
console.log(` [${queryId}] Status: ${result.status}, Iterations: ${result.iterations} (${elapsed}s)`);
|
||||||
|
|
||||||
const chunk = decoder.decode(value, { stream: true });
|
// Try to parse JSON from result
|
||||||
|
let jsonData: Record<string, unknown> | null = null;
|
||||||
|
let responseText = result.result || '';
|
||||||
|
|
||||||
// Each line is a JSON object
|
if (result.success && responseText) {
|
||||||
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
jsonData = tryExtractJson(responseText);
|
||||||
try {
|
|
||||||
const json = JSON.parse(line);
|
|
||||||
|
|
||||||
// Stream thinking tokens
|
|
||||||
const thinking = json.message?.thinking || '';
|
|
||||||
if (thinking) {
|
|
||||||
if (!thinkingStarted) {
|
|
||||||
process.stdout.write(` [${queryId}] THINKING: `);
|
|
||||||
thinkingStarted = true;
|
|
||||||
}
|
|
||||||
process.stdout.write(thinking);
|
|
||||||
thinkingContent += thinking;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream content tokens
|
// Fallback: try parsing from history
|
||||||
const token = json.message?.content || '';
|
if (!jsonData && result.history?.length > 0) {
|
||||||
if (token) {
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
if (!outputStarted) {
|
if (lastMessage?.content) {
|
||||||
if (thinkingStarted) process.stdout.write('\n');
|
responseText = lastMessage.content;
|
||||||
process.stdout.write(` [${queryId}] OUTPUT: `);
|
jsonData = tryExtractJson(responseText);
|
||||||
outputStarted = true;
|
|
||||||
}
|
}
|
||||||
process.stdout.write(token);
|
|
||||||
content += token;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Ignore parse errors for partial chunks
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If JSON is invalid, retry with correction request
|
||||||
|
let retries = 0;
|
||||||
|
while (!jsonData && retries < maxRetries) {
|
||||||
|
retries++;
|
||||||
|
console.log(` [${queryId}] Invalid JSON, requesting correction (retry ${retries}/${maxRetries})...`);
|
||||||
|
|
||||||
|
result = await orchestrator.continueTask(
|
||||||
|
`Your response was not valid JSON. Please output ONLY the JSON object with no markdown, no explanation, no thinking tags. Just the raw JSON starting with { and ending with }. Format:
|
||||||
|
{"invoice_number":"X","invoice_date":"YYYY-MM-DD","vendor_name":"X","currency":"EUR","net_amount":0,"vat_amount":0,"total_amount":0}`
|
||||||
|
);
|
||||||
|
|
||||||
|
elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
|
console.log(` [${queryId}] Retry ${retries}: ${result.status} (${elapsed}s)`);
|
||||||
|
|
||||||
|
responseText = result.result || '';
|
||||||
|
if (responseText) {
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData && result.history?.length > 0) {
|
||||||
|
const lastMessage = result.history[result.history.length - 1];
|
||||||
|
if (lastMessage?.content) {
|
||||||
|
responseText = lastMessage.content;
|
||||||
|
jsonData = tryExtractJson(responseText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!jsonData) {
|
||||||
|
console.log(` [${queryId}] Failed to get valid JSON after ${retries} retries`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(` [${queryId}] Valid JSON extracted`);
|
||||||
|
return {
|
||||||
|
invoice_number: extractInvoiceNumber(String(jsonData.invoice_number || '')),
|
||||||
|
invoice_date: extractDate(String(jsonData.invoice_date || '')),
|
||||||
|
vendor_name: String(jsonData.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
||||||
|
currency: extractCurrency(String(jsonData.currency || '')),
|
||||||
|
net_amount: parseAmount(jsonData.net_amount as string | number),
|
||||||
|
vat_amount: parseAmount(jsonData.vat_amount as string | number),
|
||||||
|
total_amount: parseAmount(jsonData.total_amount as string | number),
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||||
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
console.log(` [${queryId}] ERROR: ${error} (${elapsed}s)`);
|
||||||
|
throw error;
|
||||||
return parseJsonToInvoice(content);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -556,23 +560,45 @@ function findTestCases(): ITestCase[] {
|
|||||||
const testCases = findTestCases();
|
const testCases = findTestCases();
|
||||||
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
console.log(`\nFound ${testCases.length} invoice test cases\n`);
|
||||||
|
|
||||||
// Ensure temp directory exists
|
// Ensure cache directory exists
|
||||||
if (!fs.existsSync(TEMP_MD_DIR)) {
|
if (!fs.existsSync(MD_CACHE_DIR)) {
|
||||||
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
fs.mkdirSync(MD_CACHE_DIR, { recursive: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------- STAGE 1: OCR with Nanonets --------
|
// -------- STAGE 1: OCR with Nanonets --------
|
||||||
|
|
||||||
tap.test('Stage 1: Setup Nanonets', async () => {
|
tap.test('Stage 1: Convert invoices to markdown (with caching)', async () => {
|
||||||
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
||||||
const ok = await ensureNanonetsOcr();
|
|
||||||
expect(ok).toBeTrue();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('Stage 1: Convert all invoices to markdown', async () => {
|
// Check which invoices need OCR conversion
|
||||||
console.log('\n Converting all invoice PDFs to markdown with Nanonets-OCR-s...\n');
|
const needsConversion: ITestCase[] = [];
|
||||||
|
let cachedCount = 0;
|
||||||
|
|
||||||
for (const tc of testCases) {
|
for (const tc of testCases) {
|
||||||
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
|
if (fs.existsSync(mdPath)) {
|
||||||
|
cachedCount++;
|
||||||
|
tc.markdownPath = mdPath;
|
||||||
|
console.log(` [CACHED] ${tc.name} - using cached markdown`);
|
||||||
|
} else {
|
||||||
|
needsConversion.push(tc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`\n Summary: ${cachedCount} cached, ${needsConversion.length} need conversion\n`);
|
||||||
|
|
||||||
|
if (needsConversion.length === 0) {
|
||||||
|
console.log(' All invoices already cached, skipping Nanonets OCR\n');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start Nanonets only if there are files to convert
|
||||||
|
console.log(' Starting Nanonets for OCR conversion...\n');
|
||||||
|
const ok = await ensureNanonetsOcr();
|
||||||
|
expect(ok).toBeTrue();
|
||||||
|
|
||||||
|
// Convert only the invoices that need conversion
|
||||||
|
for (const tc of needsConversion) {
|
||||||
console.log(`\n === ${tc.name} ===`);
|
console.log(`\n === ${tc.name} ===`);
|
||||||
|
|
||||||
const images = convertPdfToImages(tc.pdfPath);
|
const images = convertPdfToImages(tc.pdfPath);
|
||||||
@@ -580,13 +606,13 @@ tap.test('Stage 1: Convert all invoices to markdown', async () => {
|
|||||||
|
|
||||||
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
||||||
|
|
||||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
fs.writeFileSync(mdPath, markdown);
|
fs.writeFileSync(mdPath, markdown);
|
||||||
tc.markdownPath = mdPath;
|
tc.markdownPath = mdPath;
|
||||||
console.log(` Saved: ${mdPath}`);
|
console.log(` Saved: ${mdPath}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('\n Stage 1 complete: All invoices converted to markdown\n');
|
console.log(`\n Stage 1 complete: ${needsConversion.length} invoices converted to markdown\n`);
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('Stage 1: Stop Nanonets', async () => {
|
tap.test('Stage 1: Stop Nanonets', async () => {
|
||||||
@@ -605,6 +631,53 @@ tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
|||||||
|
|
||||||
const extractionOk = await ensureExtractionModel();
|
const extractionOk = await ensureExtractionModel();
|
||||||
expect(extractionOk).toBeTrue();
|
expect(extractionOk).toBeTrue();
|
||||||
|
|
||||||
|
// Initialize SmartAi and DualAgentOrchestrator
|
||||||
|
console.log(' [SmartAgent] Starting SmartAi...');
|
||||||
|
await smartAi.start();
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Creating DualAgentOrchestrator...');
|
||||||
|
orchestrator = new DualAgentOrchestrator({
|
||||||
|
smartAiInstance: smartAi,
|
||||||
|
defaultProvider: 'ollama',
|
||||||
|
guardianPolicyPrompt: `
|
||||||
|
JSON EXTRACTION POLICY:
|
||||||
|
- APPROVE all JSON extraction tasks
|
||||||
|
- APPROVE all json.validate tool calls
|
||||||
|
- This is a read-only operation - no file system or network access needed
|
||||||
|
- The task is to extract structured data from document text
|
||||||
|
`,
|
||||||
|
driverSystemMessage: `You are a precise JSON extraction assistant. Your only job is to extract invoice data from documents.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
1. Output valid JSON with the exact format requested
|
||||||
|
2. If you cannot find a value, use empty string "" or 0 for numbers
|
||||||
|
3. IMPORTANT: Before completing, validate your JSON using the json.validate tool:
|
||||||
|
|
||||||
|
<tool_call>
|
||||||
|
<tool>json</tool>
|
||||||
|
<action>validate</action>
|
||||||
|
<params>{"jsonString": "YOUR_JSON", "requiredFields": ["invoice_number", "invoice_date", "vendor_name", "currency", "net_amount", "vat_amount", "total_amount"]}</params>
|
||||||
|
</tool_call>
|
||||||
|
|
||||||
|
4. Only complete after validation passes
|
||||||
|
|
||||||
|
When done, wrap your JSON in <task_complete></task_complete> tags.`,
|
||||||
|
maxIterations: 5,
|
||||||
|
// Enable streaming for real-time progress visibility
|
||||||
|
onToken: (token, source) => {
|
||||||
|
if (source === 'driver') {
|
||||||
|
process.stdout.write(token);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register JsonValidatorTool for self-validation
|
||||||
|
orchestrator.registerTool(new JsonValidatorTool());
|
||||||
|
|
||||||
|
console.log(' [SmartAgent] Starting orchestrator...');
|
||||||
|
await orchestrator.start();
|
||||||
|
console.log(' [SmartAgent] Ready for extraction');
|
||||||
});
|
});
|
||||||
|
|
||||||
let passedCount = 0;
|
let passedCount = 0;
|
||||||
@@ -619,7 +692,7 @@ for (const tc of testCases) {
|
|||||||
|
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
|
|
||||||
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
const mdPath = path.join(MD_CACHE_DIR, `${tc.name}.md`);
|
||||||
if (!fs.existsSync(mdPath)) {
|
if (!fs.existsSync(mdPath)) {
|
||||||
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
||||||
}
|
}
|
||||||
@@ -649,6 +722,14 @@ for (const tc of testCases) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tap.test('Summary', async () => {
|
tap.test('Summary', async () => {
|
||||||
|
// Cleanup orchestrator and SmartAi
|
||||||
|
if (orchestrator) {
|
||||||
|
console.log('\n [SmartAgent] Stopping orchestrator...');
|
||||||
|
await orchestrator.stop();
|
||||||
|
}
|
||||||
|
console.log(' [SmartAgent] Stopping SmartAi...');
|
||||||
|
await smartAi.stop();
|
||||||
|
|
||||||
const totalInvoices = testCases.length;
|
const totalInvoices = testCases.length;
|
||||||
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
||||||
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
const totalTimeMs = processingTimes.reduce((a, b) => a + b, 0);
|
||||||
@@ -658,7 +739,7 @@ tap.test('Summary', async () => {
|
|||||||
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
|
console.log(` Invoice Summary (Nanonets + GPT-OSS 20B)`);
|
||||||
console.log(`========================================`);
|
console.log(`========================================`);
|
||||||
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
|
console.log(` Stage 1: Nanonets-OCR-s (doc -> md)`);
|
||||||
console.log(` Stage 2: GPT-OSS 20B (md -> JSON)`);
|
console.log(` Stage 2: GPT-OSS 20B + SmartAgent (md -> JSON)`);
|
||||||
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
||||||
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
||||||
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
||||||
@@ -666,14 +747,7 @@ tap.test('Summary', async () => {
|
|||||||
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
console.log(` Total time: ${(totalTimeMs / 1000).toFixed(1)}s`);
|
||||||
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
console.log(` Avg per inv: ${avgTimeSec.toFixed(1)}s`);
|
||||||
console.log(`========================================\n`);
|
console.log(`========================================\n`);
|
||||||
|
console.log(` Cache location: ${MD_CACHE_DIR}\n`);
|
||||||
// Cleanup temp files
|
|
||||||
try {
|
|
||||||
fs.rmSync(TEMP_MD_DIR, { recursive: true, force: true });
|
|
||||||
console.log(` Cleaned up temp directory: ${TEMP_MD_DIR}\n`);
|
|
||||||
} catch {
|
|
||||||
// Ignore
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
export default tap.start();
|
export default tap.start();
|
||||||
|
|||||||
Reference in New Issue
Block a user