This commit is contained in:
2026-01-19 11:51:23 +00:00
parent 6dbd06073b
commit b58bcabc76
3 changed files with 559 additions and 218 deletions

View File

@@ -28,12 +28,19 @@ interface ITransaction {
amount: number;
}
interface IImageData {
base64: string;
width: number;
height: number;
pageNum: number;
}
interface ITestCase {
name: string;
pdfPath: string;
jsonPath: string;
markdownPath?: string;
images?: string[];
images?: IImageData[];
}
// Nanonets-specific prompt for document OCR to markdown
@@ -50,12 +57,48 @@ const JSON_EXTRACTION_PROMPT = `Extract ALL transactions from this bank statemen
STATEMENT:
`;
// Constants for smart batching
const MAX_VISUAL_TOKENS = 28000; // ~32K context minus prompt/output headroom
const PATCH_SIZE = 14; // Qwen2.5-VL uses 14x14 patches
/**
* Convert PDF to PNG images using ImageMagick
* Estimate visual tokens for an image based on dimensions
*/
function convertPdfToImages(pdfPath: string): string[] {
function estimateVisualTokens(width: number, height: number): number {
return Math.ceil((width * height) / (PATCH_SIZE * PATCH_SIZE));
}
/**
* Batch images to fit within context window
*/
function batchImages(images: IImageData[]): IImageData[][] {
const batches: IImageData[][] = [];
let currentBatch: IImageData[] = [];
let currentTokens = 0;
for (const img of images) {
const imgTokens = estimateVisualTokens(img.width, img.height);
if (currentTokens + imgTokens > MAX_VISUAL_TOKENS && currentBatch.length > 0) {
batches.push(currentBatch);
currentBatch = [img];
currentTokens = imgTokens;
} else {
currentBatch.push(img);
currentTokens += imgTokens;
}
}
if (currentBatch.length > 0) batches.push(currentBatch);
return batches;
}
/**
* Convert PDF to JPEG images using ImageMagick with dimension tracking
*/
function convertPdfToImages(pdfPath: string): IImageData[] {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pdf-convert-'));
const outputPattern = path.join(tempDir, 'page-%d.png');
const outputPattern = path.join(tempDir, 'page-%d.jpg');
try {
execSync(
@@ -63,13 +106,24 @@ function convertPdfToImages(pdfPath: string): string[] {
{ stdio: 'pipe' }
);
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.png')).sort();
const images: string[] = [];
const files = fs.readdirSync(tempDir).filter((f: string) => f.endsWith('.jpg')).sort();
const images: IImageData[] = [];
for (const file of files) {
for (let i = 0; i < files.length; i++) {
const file = files[i];
const imagePath = path.join(tempDir, file);
const imageData = fs.readFileSync(imagePath);
images.push(imageData.toString('base64'));
// Get image dimensions using identify command
const dimensions = execSync(`identify -format "%w %h" "${imagePath}"`, { encoding: 'utf-8' }).trim();
const [width, height] = dimensions.split(' ').map(Number);
images.push({
base64: imageData.toString('base64'),
width,
height,
pageNum: i + 1,
});
}
return images;
@@ -79,10 +133,28 @@ function convertPdfToImages(pdfPath: string): string[] {
}
/**
* Convert a single page to markdown using Nanonets-OCR-s
* Convert a batch of pages to markdown using Nanonets-OCR-s
*/
async function convertPageToMarkdown(image: string, pageNum: number): Promise<string> {
async function convertBatchToMarkdown(batch: IImageData[]): Promise<string> {
const startTime = Date.now();
const pageNums = batch.map(img => img.pageNum).join(', ');
// Build content array with all images first, then the prompt
const content: Array<{ type: string; image_url?: { url: string }; text?: string }> = [];
for (const img of batch) {
content.push({
type: 'image_url',
image_url: { url: `data:image/jpeg;base64,${img.base64}` },
});
}
// Add prompt with page separator instruction if multiple pages
const promptText = batch.length > 1
? `${NANONETS_OCR_PROMPT}\n\nPlease clearly separate each page's content with "--- PAGE N ---" markers, where N is the page number starting from ${batch[0].pageNum}.`
: NANONETS_OCR_PROMPT;
content.push({ type: 'text', text: promptText });
const response = await fetch(`${NANONETS_URL}/chat/completions`, {
method: 'POST',
@@ -94,12 +166,9 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
model: NANONETS_MODEL,
messages: [{
role: 'user',
content: [
{ type: 'image_url', image_url: { url: `data:image/png;base64,${image}` }},
{ type: 'text', text: NANONETS_OCR_PROMPT },
],
content,
}],
max_tokens: 4096,
max_tokens: 4096 * batch.length, // Scale output tokens with batch size
temperature: 0.0,
}),
});
@@ -112,25 +181,35 @@ async function convertPageToMarkdown(image: string, pageNum: number): Promise<st
}
const data = await response.json();
const content = (data.choices?.[0]?.message?.content || '').trim();
console.log(` Page ${pageNum}: ${content.length} chars (${elapsed}s)`);
return content;
let responseContent = (data.choices?.[0]?.message?.content || '').trim();
// For single-page batches, add page marker if not present
if (batch.length === 1 && !responseContent.includes('--- PAGE')) {
responseContent = `--- PAGE ${batch[0].pageNum} ---\n${responseContent}`;
}
console.log(` Pages [${pageNums}]: ${responseContent.length} chars (${elapsed}s)`);
return responseContent;
}
/**
* Convert all pages of a document to markdown
* Convert all pages of a document to markdown using smart batching
*/
async function convertDocumentToMarkdown(images: string[], docName: string): Promise<string> {
console.log(` [${docName}] Converting ${images.length} page(s)...`);
async function convertDocumentToMarkdown(images: IImageData[], docName: string): Promise<string> {
const batches = batchImages(images);
console.log(` [${docName}] Processing ${images.length} page(s) in ${batches.length} batch(es)...`);
const markdownPages: string[] = [];
const markdownParts: string[] = [];
for (let i = 0; i < images.length; i++) {
const markdown = await convertPageToMarkdown(images[i], i + 1);
markdownPages.push(`--- PAGE ${i + 1} ---\n${markdown}`);
for (let i = 0; i < batches.length; i++) {
const batch = batches[i];
const batchTokens = batch.reduce((sum, img) => sum + estimateVisualTokens(img.width, img.height), 0);
console.log(` Batch ${i + 1}: ${batch.length} page(s), ~${batchTokens} tokens`);
const markdown = await convertBatchToMarkdown(batch);
markdownParts.push(markdown);
}
const fullMarkdown = markdownPages.join('\n\n');
const fullMarkdown = markdownParts.join('\n\n');
console.log(` [${docName}] Complete: ${fullMarkdown.length} chars total`);
return fullMarkdown;
}
@@ -161,25 +240,6 @@ async function ensureExtractionModel(): Promise<boolean> {
const models = data.models || [];
if (models.some((m: { name: string }) => m.name === EXTRACTION_MODEL)) {
console.log(` [Ollama] Model available: ${EXTRACTION_MODEL}`);
// Warmup: send a simple request to ensure model is loaded
console.log(` [Ollama] Warming up model...`);
const warmupResponse = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [{ role: 'user', content: 'Return: [{"test": 1}]' }],
stream: false,
}),
signal: AbortSignal.timeout(120000),
});
if (warmupResponse.ok) {
const warmupData = await warmupResponse.json();
console.log(` [Ollama] Warmup complete (${warmupData.message?.content?.length || 0} chars)`);
}
return true;
}
}
@@ -201,22 +261,24 @@ async function ensureExtractionModel(): Promise<boolean> {
* Extract transactions from markdown using GPT-OSS 20B (streaming)
*/
async function extractTransactionsFromMarkdown(markdown: string, queryId: string): Promise<ITransaction[]> {
console.log(` [${queryId}] Sending to ${EXTRACTION_MODEL}...`);
console.log(` [${queryId}] Markdown length: ${markdown.length}`);
const startTime = Date.now();
const fullPrompt = JSON_EXTRACTION_PROMPT + markdown;
console.log(` [${queryId}] Prompt preview: ${fullPrompt.substring(0, 200)}...`);
// Log exact prompt
console.log(`\n [${queryId}] ===== PROMPT =====`);
console.log(fullPrompt);
console.log(` [${queryId}] ===== END PROMPT (${fullPrompt.length} chars) =====\n`);
const response = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: EXTRACTION_MODEL,
messages: [{
role: 'user',
content: fullPrompt,
}],
messages: [
{ role: 'user', content: 'Hi there, how are you?' },
{ role: 'assistant', content: 'Good, how can I help you today?' },
{ role: 'user', content: fullPrompt },
],
stream: true,
}),
signal: AbortSignal.timeout(600000), // 10 minute timeout
@@ -228,35 +290,59 @@ async function extractTransactionsFromMarkdown(markdown: string, queryId: string
throw new Error(`Ollama API error: ${response.status}`);
}
// Stream the response and log to console
// Stream the response
let content = '';
let thinkingContent = '';
let thinkingStarted = false;
let outputStarted = false;
const reader = response.body!.getReader();
const decoder = new TextDecoder();
process.stdout.write(` [${queryId}] `);
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value, { stream: true });
const chunk = decoder.decode(value, { stream: true });
// Each line is a JSON object
for (const line of chunk.split('\n').filter(l => l.trim())) {
try {
const json = JSON.parse(line);
const token = json.message?.content || '';
if (token) {
process.stdout.write(token);
content += token;
// Each line is a JSON object
for (const line of chunk.split('\n').filter(l => l.trim())) {
try {
const json = JSON.parse(line);
// Stream thinking tokens
const thinking = json.message?.thinking || '';
if (thinking) {
if (!thinkingStarted) {
process.stdout.write(` [${queryId}] THINKING: `);
thinkingStarted = true;
}
process.stdout.write(thinking);
thinkingContent += thinking;
}
// Stream content tokens
const token = json.message?.content || '';
if (token) {
if (!outputStarted) {
if (thinkingStarted) process.stdout.write('\n');
process.stdout.write(` [${queryId}] OUTPUT: `);
outputStarted = true;
}
process.stdout.write(token);
content += token;
}
} catch {
// Ignore parse errors for partial chunks
}
} catch {
// Ignore parse errors for partial chunks
}
}
} finally {
if (thinkingStarted || outputStarted) process.stdout.write('\n');
}
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
console.log(`\n [${queryId}] Done: ${content.length} chars (${elapsed}s)`);
console.log(` [${queryId}] Done: ${thinkingContent.length} thinking chars, ${content.length} output chars (${elapsed}s)`);
return parseJsonResponse(content, queryId);
}