696 lines
22 KiB
TypeScript
696 lines
22 KiB
TypeScript
/**
|
|
* Focused test for failed invoice extractions
|
|
*
|
|
* Tests only the 4 invoices that failed in the main test:
|
|
* - consensus_2021-09: invoice_number "2021/1384" → "20211384" (slash stripped)
|
|
* - hetzner_2022-04: model hallucinated after 281s thinking
|
|
* - qonto_2021-08: invoice_number "08-21-INVOICE-410870" → "4108705" (prefix stripped)
|
|
* - qonto_2021-09: invoice_number "09-21-INVOICE-4303642" → "4303642" (prefix stripped)
|
|
*
|
|
* Run with: tstest test/test.invoices.failed.ts --verbose
|
|
*/
|
|
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
|
import * as fs from 'fs';
|
|
import * as path from 'path';
|
|
import { execSync } from 'child_process';
|
|
import * as os from 'os';
|
|
import { ensureNanonetsOcr, ensureMiniCpm, isContainerRunning } from './helpers/docker.js';
|
|
|
|
const NANONETS_URL = 'http://localhost:8000/v1';
|
|
const NANONETS_MODEL = 'nanonets/Nanonets-OCR2-3B';
|
|
|
|
const OLLAMA_URL = 'http://localhost:11434';
|
|
const EXTRACTION_MODEL = 'gpt-oss:20b';
|
|
|
|
// Temp directory for storing markdown between stages
|
|
const TEMP_MD_DIR = path.join(os.tmpdir(), 'nanonets-invoices-failed-debug');
|
|
|
|
// Only test these specific invoices that failed
|
|
const FAILED_INVOICES = [
|
|
'consensus_2021-09',
|
|
'hetzner_2022-04',
|
|
'qonto_2021-08',
|
|
'qonto_2021-09',
|
|
];
|
|
|
|
interface IInvoice {
|
|
invoice_number: string;
|
|
invoice_date: string;
|
|
vendor_name: string;
|
|
currency: string;
|
|
net_amount: number;
|
|
vat_amount: number;
|
|
total_amount: number;
|
|
}
|
|
|
|
interface IImageData {
|
|
base64: string;
|
|
width: number;
|
|
height: number;
|
|
pageNum: number;
|
|
}
|
|
|
|
interface ITestCase {
|
|
name: string;
|
|
pdfPath: string;
|
|
jsonPath: string;
|
|
markdownPath?: string;
|
|
}
|
|
|
|
// Nanonets-specific prompt for document OCR to markdown
|
|
const NANONETS_OCR_PROMPT = `Extract the text from the above document as if you were reading it naturally.
|
|
Return the tables in html format.
|
|
Return the equations in LaTeX representation.
|
|
If there is an image in the document and image caption is not present, add a small description inside <img></img> tag.
|
|
Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>.
|
|
Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number>.`;
|
|
|
|
// JSON extraction prompt for GPT-OSS 20B
|
|
const JSON_EXTRACTION_PROMPT = `You are an invoice data extractor. Below is an invoice document converted to text/markdown. Extract the key invoice fields as JSON.
|
|
|
|
IMPORTANT RULES:
|
|
1. invoice_number: The unique invoice/document number (NOT VAT ID, NOT customer ID). PRESERVE ALL CHARACTERS including slashes, dashes, and prefixes.
|
|
2. invoice_date: Format as YYYY-MM-DD
|
|
3. vendor_name: The company that issued the invoice
|
|
4. currency: EUR, USD, or GBP
|
|
5. net_amount: Amount before tax
|
|
6. vat_amount: Tax/VAT amount
|
|
7. total_amount: Final total (gross amount)
|
|
|
|
Return ONLY this JSON format, no explanation:
|
|
{
|
|
"invoice_number": "INV-2024-001",
|
|
"invoice_date": "2024-01-15",
|
|
"vendor_name": "Company Name",
|
|
"currency": "EUR",
|
|
"net_amount": 100.00,
|
|
"vat_amount": 19.00,
|
|
"total_amount": 119.00
|
|
}
|
|
|
|
INVOICE TEXT:
|
|
`;
|
|
|
|
const PATCH_SIZE = 14;
|
|
|
|
/**
|
|
* Estimate visual tokens for an image based on dimensions
|
|
*/
|
|
function estimateVisualTokens(width: number, height: number): number {
|
|
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
|
|
}
|
|
|
|
/**
|
|
* Process images one page at a time for reliability
|
|
*/
|
|
function batchImages(images: IImageData[]): IImageData[][] {
|
|
return images.map(img => [img]);
|
|
}
|
|
|
|
/**
|
|
* Convert PDF to JPEG images using ImageMagick with dimension tracking
|
|
*/
|
|
function convertPdfToImages(pdfPath: string): IImageData[] {
|
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
|
|
const outputPattern = path.join(tempDir, 'page-%d.jpg');
|
|
|
|
try {
|
|
execSync(
|
|
`convert -density 150 -quality 90 "${pdfPath}" -background white -alpha remove "${outputPattern}"`,
|
|
{ stdio: 'pipe' }
|
|
);
|
|
|
|
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
|
|
const images: IImageData[] = [];
|
|
|
|
for (let i = 0; i < files.length; i++) {
|
|
const file = files[i];
|
|
const imagePath = path.join(tempDir, file);
|
|
const imageData = fs.readFileSync(imagePath);
|
|
|
|
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
|
|
const [width, height] = dimensions.split(' ').map(Number);
|
|
|
|
images.push({
|
|
base64: imageData.toString('base64'),
|
|
width,
|
|
height,
|
|
pageNum: i + 1,
|
|
});
|
|
}
|
|
|
|
return images;
|
|
} finally {
|
|
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Convert a batch of pages to markdown using Nanonets-OCR-s
|
|
*/
|
|
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
|
|
const startTime = Date.now();
|
|
const pageNums = batch.map(img => img.pageNum).join(', ');
|
|
|
|
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
|
|
|
|
for (const img of batch) {
|
|
content.push({
|
|
type: 'image_url',
|
|
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
|
|
});
|
|
}
|
|
|
|
const promptText = batch.length > 1
|
|
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
|
|
: NANONETS_OCR_PROMPT;
|
|
|
|
content.push({ type: 'text', text: promptText });
|
|
|
|
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
'Authorization': 'Bearer dummy',
|
|
},
|
|
body: JSON.stringify({
|
|
model: NANONETS_MODEL,
|
|
messages: [{
|
|
role: 'user',
|
|
content,
|
|
}],
|
|
max_tokens: 4096 * batch.length,
|
|
temperature: 0.0,
|
|
}),
|
|
signal: AbortSignal.timeout(600000),
|
|
});
|
|
|
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
|
|
if (!response.ok) {
|
|
const errorText = await response.text();
|
|
throw new Error(`Nanonets API error: ${response.status} - ${errorText}`);
|
|
}
|
|
|
|
const data = await response.json();
|
|
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
|
|
|
|
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
|
|
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
|
|
}
|
|
|
|
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
|
|
return responseContent;
|
|
}
|
|
|
|
/**
|
|
* Convert all pages of a document to markdown using smart batching
|
|
*/
|
|
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
|
|
const batches = batchImages(images);
|
|
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
|
|
|
|
const markdownParts: string[] = [];
|
|
|
|
for (let i = 0; i < batches.length; i++) {
|
|
const batch = batches[i];
|
|
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
|
|
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
|
|
const markdown = await convertBatchToMarkdown(batch);
|
|
markdownParts.push(markdown);
|
|
}
|
|
|
|
const fullMarkdown = markdownParts.join('\n\n');
|
|
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
|
|
return fullMarkdown;
|
|
}
|
|
|
|
/**
|
|
* Stop Nanonets container
|
|
*/
|
|
function stopNanonets(): void {
|
|
console.log(' [Docker] Stopping Nanonets container...');
|
|
try {
|
|
execSync('docker stop nanonets-test 2>/dev/null || true', { stdio: 'pipe' });
|
|
execSync('sleep 5', { stdio: 'pipe' });
|
|
console.log(' [Docker] Nanonets stopped');
|
|
} catch {
|
|
console.log(' [Docker] Nanonets was not running');
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Ensure GPT-OSS 20B model is available
|
|
*/
|
|
async function ensureExtractionModel(): Promise<boolean> {
|
|
try {
|
|
const response = await fetch(`${OLLAMA_URL}/api/tags`);
|
|
if (response.ok) {
|
|
const data = await response.json();
|
|
const models = data.models || [];
|
|
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
|
|
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
|
|
return true;
|
|
}
|
|
}
|
|
} catch {
|
|
return false;
|
|
}
|
|
|
|
console.log(` [Ollama] Pulling ${EXTRACTION_MODEL}...`);
|
|
const pullResponse = await fetch(`${OLLAMA_URL}/api/pull`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ name: EXTRACTION_MODEL, stream: false }),
|
|
});
|
|
|
|
return pullResponse.ok;
|
|
}
|
|
|
|
/**
|
|
* Parse amount from string (handles European format)
|
|
*/
|
|
function parseAmount(s: string | number | undefined): number {
|
|
if (s === undefined || s === null) return 0;
|
|
if (typeof s === 'number') return s;
|
|
const match = s.match(/([\d.,]+)/);
|
|
if (!match) return 0;
|
|
const numStr = match[1];
|
|
const normalized = numStr.includes(',') && numStr.indexOf(',') > numStr.lastIndexOf('.')
|
|
? numStr.replace(/\./g, '').replace(',', '.')
|
|
: numStr.replace(/,/g, '');
|
|
return parseFloat(normalized) || 0;
|
|
}
|
|
|
|
/**
|
|
* Extract invoice number - MINIMAL normalization for debugging
|
|
*/
|
|
function extractInvoiceNumber(s: string | undefined): string {
|
|
if (!s) return '';
|
|
// Only remove markdown formatting, preserve everything else
|
|
return s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
|
}
|
|
|
|
/**
|
|
* Extract date (YYYY-MM-DD) from response
|
|
*/
|
|
function extractDate(s: string | undefined): string {
|
|
if (!s) return '';
|
|
let clean = s.replace(/\*\*/g, '').replace(/`/g, '').trim();
|
|
const isoMatch = clean.match(/(\d{4}-\d{2}-\d{2})/);
|
|
if (isoMatch) return isoMatch[1];
|
|
const dmyMatch = clean.match(/(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})/);
|
|
if (dmyMatch) {
|
|
return `${dmyMatch[3]}-${dmyMatch[2].padStart(2, '0')}-${dmyMatch[1].padStart(2, '0')}`;
|
|
}
|
|
return clean.replace(/[^\d-]/g, '').trim();
|
|
}
|
|
|
|
/**
|
|
* Extract currency
|
|
*/
|
|
function extractCurrency(s: string | undefined): string {
|
|
if (!s) return 'EUR';
|
|
const upper = s.toUpperCase();
|
|
if (upper.includes('EUR') || upper.includes('€')) return 'EUR';
|
|
if (upper.includes('USD') || upper.includes('$')) return 'USD';
|
|
if (upper.includes('GBP') || upper.includes('£')) return 'GBP';
|
|
return 'EUR';
|
|
}
|
|
|
|
/**
|
|
* Extract JSON from response
|
|
*/
|
|
function extractJsonFromResponse(response: string): Record<string, unknown> | null {
|
|
let cleanResponse = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
const codeBlockMatch = cleanResponse.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
const jsonStr = codeBlockMatch ? codeBlockMatch[1].trim() : cleanResponse;
|
|
|
|
try {
|
|
return JSON.parse(jsonStr);
|
|
} catch {
|
|
const jsonMatch = jsonStr.match(/\{[\s\S]*\}/);
|
|
if (jsonMatch) {
|
|
try {
|
|
return JSON.parse(jsonMatch[0]);
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Parse JSON response into IInvoice
|
|
*/
|
|
function parseJsonToInvoice(response: string): IInvoice | null {
|
|
const parsed = extractJsonFromResponse(response);
|
|
if (!parsed) return null;
|
|
|
|
return {
|
|
invoice_number: extractInvoiceNumber(String(parsed.invoice_number || '')),
|
|
invoice_date: extractDate(String(parsed.invoice_date || '')),
|
|
vendor_name: String(parsed.vendor_name || '').replace(/\*\*/g, '').replace(/`/g, '').trim(),
|
|
currency: extractCurrency(String(parsed.currency || '')),
|
|
net_amount: parseAmount(parsed.net_amount as string | number),
|
|
vat_amount: parseAmount(parsed.vat_amount as string | number),
|
|
total_amount: parseAmount(parsed.total_amount as string | number),
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Extract invoice from markdown using GPT-OSS 20B (streaming)
|
|
*/
|
|
async function extractInvoiceFromMarkdown(markdown: string, queryId: string): Promise<IInvoice | null> {
|
|
const startTime = Date.now();
|
|
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
|
|
|
|
// Log exact prompt
|
|
console.log(`\n [${queryId}] ===== PROMPT =====`);
|
|
console.log(fullPrompt);
|
|
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
|
|
|
|
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({
|
|
model: EXTRACTION_MODEL,
|
|
messages: [
|
|
{ role: 'user', content: 'Hi there, how are you?' },
|
|
{ role: 'assistant', content: 'Good, how can I help you today?' },
|
|
{ role: 'user', content: fullPrompt },
|
|
],
|
|
stream: true,
|
|
}),
|
|
signal: AbortSignal.timeout(600000),
|
|
});
|
|
|
|
if (!response.ok) {
|
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
console.log(` [${queryId}] ERROR: ${response.status} (${elapsed}s)`);
|
|
throw new Error(`Ollama API error: ${response.status}`);
|
|
}
|
|
|
|
// Stream the response
|
|
let content = '';
|
|
let thinkingContent = '';
|
|
let thinkingStarted = false;
|
|
let outputStarted = false;
|
|
const reader = response.body!.getReader();
|
|
const decoder = new TextDecoder();
|
|
|
|
try {
|
|
while (true) {
|
|
const { done, value } = await reader.read();
|
|
if (done) break;
|
|
|
|
const chunk = decoder.decode(value, { stream: true });
|
|
|
|
for (const line of chunk.split('\n').filter(l => l.trim())) {
|
|
try {
|
|
const json = JSON.parse(line);
|
|
|
|
const thinking = json.message?.thinking || '';
|
|
if (thinking) {
|
|
if (!thinkingStarted) {
|
|
process.stdout.write(` [${queryId}] THINKING: `);
|
|
thinkingStarted = true;
|
|
}
|
|
process.stdout.write(thinking);
|
|
thinkingContent += thinking;
|
|
}
|
|
|
|
const token = json.message?.content || '';
|
|
if (token) {
|
|
if (!outputStarted) {
|
|
if (thinkingStarted) process.stdout.write('\n');
|
|
process.stdout.write(` [${queryId}] OUTPUT: `);
|
|
outputStarted = true;
|
|
}
|
|
process.stdout.write(token);
|
|
content += token;
|
|
}
|
|
} catch {
|
|
// Ignore parse errors for partial chunks
|
|
}
|
|
}
|
|
}
|
|
} finally {
|
|
if (thinkingStarted || outputStarted) process.stdout.write('\n');
|
|
}
|
|
|
|
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
|
|
|
|
// Log raw response for debugging
|
|
console.log(` [${queryId}] RAW RESPONSE: ${content}`);
|
|
|
|
return parseJsonToInvoice(content);
|
|
}
|
|
|
|
/**
|
|
* Extract invoice (single pass)
|
|
*/
|
|
async function extractInvoice(markdown: string, docName: string): Promise<IInvoice> {
|
|
console.log(` [${docName}] Extracting...`);
|
|
const invoice = await extractInvoiceFromMarkdown(markdown, docName);
|
|
if (!invoice) {
|
|
return {
|
|
invoice_number: '',
|
|
invoice_date: '',
|
|
vendor_name: '',
|
|
currency: 'EUR',
|
|
net_amount: 0,
|
|
vat_amount: 0,
|
|
total_amount: 0,
|
|
};
|
|
}
|
|
console.log(` [${docName}] Extracted: ${JSON.stringify(invoice, null, 2)}`);
|
|
return invoice;
|
|
}
|
|
|
|
/**
|
|
* Normalize date to YYYY-MM-DD
|
|
*/
|
|
function normalizeDate(dateStr: string | null): string {
|
|
if (!dateStr) return '';
|
|
if (/^\d{4}-\d{2}-\d{2}$/.test(dateStr)) return dateStr;
|
|
|
|
const monthMap: Record<string, string> = {
|
|
JAN: '01', FEB: '02', MAR: '03', APR: '04', MAY: '05', JUN: '06',
|
|
JUL: '07', AUG: '08', SEP: '09', OCT: '10', NOV: '11', DEC: '12',
|
|
};
|
|
|
|
let match = dateStr.match(/^(\d{1,2})-([A-Z]{3})-(\d{4})$/i);
|
|
if (match) {
|
|
return `${match[3]}-${monthMap[match[2].toUpperCase()] || '01'}-${match[1].padStart(2, '0')}`;
|
|
}
|
|
|
|
match = dateStr.match(/^(\d{1,2})[\/.](\d{1,2})[\/.](\d{4})$/);
|
|
if (match) {
|
|
return `${match[3]}-${match[2].padStart(2, '0')}-${match[1].padStart(2, '0')}`;
|
|
}
|
|
|
|
return dateStr;
|
|
}
|
|
|
|
/**
|
|
* Compare extracted invoice against expected - detailed output
|
|
*/
|
|
function compareInvoice(
|
|
extracted: IInvoice,
|
|
expected: IInvoice
|
|
): { match: boolean; errors: string[] } {
|
|
const errors: string[] = [];
|
|
|
|
// Invoice number comparison - exact match after whitespace normalization
|
|
const extNum = extracted.invoice_number?.trim() || '';
|
|
const expNum = expected.invoice_number?.trim() || '';
|
|
if (extNum.toLowerCase() !== expNum.toLowerCase()) {
|
|
errors.push(`invoice_number: expected "${expected.invoice_number}", got "${extracted.invoice_number}"`);
|
|
}
|
|
|
|
if (normalizeDate(extracted.invoice_date) !== normalizeDate(expected.invoice_date)) {
|
|
errors.push(`invoice_date: expected "${expected.invoice_date}", got "${extracted.invoice_date}"`);
|
|
}
|
|
|
|
if (Math.abs(extracted.total_amount - expected.total_amount) > 0.02) {
|
|
errors.push(`total_amount: expected ${expected.total_amount}, got ${extracted.total_amount}`);
|
|
}
|
|
|
|
if (extracted.currency?.toUpperCase() !== expected.currency?.toUpperCase()) {
|
|
errors.push(`currency: expected "${expected.currency}", got "${extracted.currency}"`);
|
|
}
|
|
|
|
return { match: errors.length === 0, errors };
|
|
}
|
|
|
|
/**
|
|
* Find test cases for failed invoices only
|
|
*/
|
|
function findTestCases(): ITestCase[] {
|
|
const testDir = path.join(process.cwd(), '.nogit/invoices');
|
|
if (!fs.existsSync(testDir)) return [];
|
|
|
|
const files = fs.readdirSync(testDir);
|
|
const testCases: ITestCase[] = [];
|
|
|
|
for (const invoiceName of FAILED_INVOICES) {
|
|
const pdfFile = `${invoiceName}.pdf`;
|
|
const jsonFile = `${invoiceName}.json`;
|
|
|
|
if (files.includes(pdfFile) && files.includes(jsonFile)) {
|
|
testCases.push({
|
|
name: invoiceName,
|
|
pdfPath: path.join(testDir, pdfFile),
|
|
jsonPath: path.join(testDir, jsonFile),
|
|
});
|
|
} else {
|
|
console.warn(`Warning: Missing files for ${invoiceName}`);
|
|
}
|
|
}
|
|
|
|
return testCases;
|
|
}
|
|
|
|
// ============ TESTS ============
|
|
|
|
const testCases = findTestCases();
|
|
console.log(`\n========================================`);
|
|
console.log(` FAILED INVOICES DEBUG TEST`);
|
|
console.log(`========================================`);
|
|
console.log(` Testing ${testCases.length} failed invoices:`);
|
|
for (const tc of testCases) {
|
|
console.log(` - ${tc.name}`);
|
|
}
|
|
console.log(`========================================\n`);
|
|
|
|
// Ensure temp directory exists
|
|
if (!fs.existsSync(TEMP_MD_DIR)) {
|
|
fs.mkdirSync(TEMP_MD_DIR, { recursive: true });
|
|
}
|
|
|
|
// -------- STAGE 1: OCR with Nanonets --------
|
|
|
|
tap.test('Stage 1: Setup Nanonets', async () => {
|
|
console.log('\n========== STAGE 1: Nanonets OCR ==========\n');
|
|
const ok = await ensureNanonetsOcr();
|
|
expect(ok).toBeTrue();
|
|
});
|
|
|
|
tap.test('Stage 1: Convert failed invoices to markdown', async () => {
|
|
console.log('\n Converting failed invoice PDFs to markdown with Nanonets-OCR-s...\n');
|
|
|
|
for (const tc of testCases) {
|
|
console.log(`\n === ${tc.name} ===`);
|
|
|
|
const images = convertPdfToImages(tc.pdfPath);
|
|
console.log(` Pages: ${images.length}`);
|
|
|
|
const markdown = await convertDocumentToMarkdown(images, tc.name);
|
|
|
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
|
fs.writeFileSync(mdPath, markdown);
|
|
tc.markdownPath = mdPath;
|
|
console.log(` Saved: ${mdPath}`);
|
|
|
|
// Also save to .nogit for inspection
|
|
const debugMdPath = path.join(process.cwd(), '.nogit/invoices', `${tc.name}.debug.md`);
|
|
fs.writeFileSync(debugMdPath, markdown);
|
|
console.log(` Debug copy: ${debugMdPath}`);
|
|
}
|
|
|
|
console.log('\n Stage 1 complete: All failed invoices converted to markdown\n');
|
|
});
|
|
|
|
tap.test('Stage 1: Stop Nanonets', async () => {
|
|
stopNanonets();
|
|
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
expect(isContainerRunning('nanonets-test')).toBeFalse();
|
|
});
|
|
|
|
// -------- STAGE 2: Extraction with GPT-OSS 20B --------
|
|
|
|
tap.test('Stage 2: Setup Ollama + GPT-OSS 20B', async () => {
|
|
console.log('\n========== STAGE 2: GPT-OSS 20B Extraction ==========\n');
|
|
|
|
const ollamaOk = await ensureMiniCpm();
|
|
expect(ollamaOk).toBeTrue();
|
|
|
|
const extractionOk = await ensureExtractionModel();
|
|
expect(extractionOk).toBeTrue();
|
|
});
|
|
|
|
let passedCount = 0;
|
|
let failedCount = 0;
|
|
|
|
for (const tc of testCases) {
|
|
tap.test(`Stage 2: Extract ${tc.name}`, async () => {
|
|
const expected: IInvoice = JSON.parse(fs.readFileSync(tc.jsonPath, 'utf-8'));
|
|
console.log(`\n ========================================`);
|
|
console.log(` === ${tc.name} ===`);
|
|
console.log(` ========================================`);
|
|
console.log(` EXPECTED:`);
|
|
console.log(` invoice_number: "${expected.invoice_number}"`);
|
|
console.log(` invoice_date: "${expected.invoice_date}"`);
|
|
console.log(` vendor_name: "${expected.vendor_name}"`);
|
|
console.log(` total_amount: ${expected.total_amount} ${expected.currency}`);
|
|
|
|
const startTime = Date.now();
|
|
|
|
const mdPath = path.join(TEMP_MD_DIR, `${tc.name}.md`);
|
|
if (!fs.existsSync(mdPath)) {
|
|
throw new Error(`Markdown not found: ${mdPath}. Run Stage 1 first.`);
|
|
}
|
|
const markdown = fs.readFileSync(mdPath, 'utf-8');
|
|
console.log(` Markdown: ${markdown.length} chars`);
|
|
|
|
const extracted = await extractInvoice(markdown, tc.name);
|
|
|
|
const elapsedMs = Date.now() - startTime;
|
|
|
|
console.log(`\n EXTRACTED:`);
|
|
console.log(` invoice_number: "${extracted.invoice_number}"`);
|
|
console.log(` invoice_date: "${extracted.invoice_date}"`);
|
|
console.log(` vendor_name: "${extracted.vendor_name}"`);
|
|
console.log(` total_amount: ${extracted.total_amount} ${extracted.currency}`);
|
|
|
|
const result = compareInvoice(extracted, expected);
|
|
|
|
if (result.match) {
|
|
passedCount++;
|
|
console.log(`\n Result: ✓ MATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
} else {
|
|
failedCount++;
|
|
console.log(`\n Result: ✗ MISMATCH (${(elapsedMs / 1000).toFixed(1)}s)`);
|
|
console.log(` ERRORS:`);
|
|
result.errors.forEach(e => console.log(` - ${e}`));
|
|
}
|
|
|
|
// Don't fail the test - we're debugging
|
|
// expect(result.match).toBeTrue();
|
|
});
|
|
}
|
|
|
|
tap.test('Summary', async () => {
|
|
const totalInvoices = testCases.length;
|
|
const accuracy = totalInvoices > 0 ? (passedCount / totalInvoices) * 100 : 0;
|
|
|
|
console.log(`\n========================================`);
|
|
console.log(` Failed Invoices Debug Summary`);
|
|
console.log(`========================================`);
|
|
console.log(` Passed: ${passedCount}/${totalInvoices}`);
|
|
console.log(` Failed: ${failedCount}/${totalInvoices}`);
|
|
console.log(` Accuracy: ${accuracy.toFixed(1)}%`);
|
|
console.log(`========================================`);
|
|
console.log(` Markdown files saved to: ${TEMP_MD_DIR}`);
|
|
console.log(` Debug copies in: .nogit/invoices/*.debug.md`);
|
|
console.log(`========================================\n`);
|
|
|
|
// Don't cleanup temp files for debugging
|
|
console.log(` Keeping temp files for debugging.\n`);
|
|
});
|
|
|
|
export default tap.start();
|