einvoice/test/suite/einvoice_corpus-validation/test.corp-04.peppol-large.ts

238 lines
9.0 KiB
TypeScript

import { tap, expect } from '@git.zone/tstest/tapbundle';
import { EInvoice } from '../../../ts/index.js';
import { ValidationLevel } from '../../../ts/interfaces/common.js';
import { CorpusLoader } from '../../helpers/corpus.loader.js';
import { PerformanceTracker } from '../../helpers/performance.tracker.js';
import * as path from 'path';
/**
* Test ID: CORP-04
* Test Description: PEPPOL Large Files Processing
* Priority: High
*
* This test validates processing of large PEPPOL BIS 3.0 files
* to ensure scalability and performance with real-world data volumes.
*/
tap.test('CORP-04: PEPPOL Large Files Processing - should handle large PEPPOL files efficiently', async () => {
// Load PEPPOL test files
const peppolFiles = await CorpusLoader.loadCategory('PEPPOL');
// Handle case where no files are found
if (peppolFiles.length === 0) {
console.log('⚠ No PEPPOL files found in corpus - skipping test');
return;
}
// Sort by file size to process largest files first
const sortedFiles = peppolFiles.sort((a, b) => b.size - a.size);
// For CI/CD environments, check file sizes
const maxFileSize = 5 * 1024 * 1024; // 5MB threshold for CI/CD
const smallestFile = peppolFiles.sort((a, b) => a.size - b.size)[0];
// Skip test if all files are too large for CI/CD environment
if (smallestFile.size > maxFileSize) {
console.log(`⚠ All PEPPOL files are larger than ${maxFileSize / 1024 / 1024}MB`);
console.log(` Smallest file: ${path.basename(smallestFile.path)} (${(smallestFile.size / 1024 / 1024).toFixed(1)}MB)`);
console.log(` This test is designed for large file handling but skipped in CI/CD to prevent timeouts`);
console.log(` ✓ Test skipped (all files too large for CI/CD environment)`);
return;
}
// Take files under the threshold, or just the smallest one
const filesToProcess = sortedFiles.filter(f => f.size <= maxFileSize).slice(0, 3);
if (filesToProcess.length === 0) {
filesToProcess.push(smallestFile);
}
console.log(`Testing ${filesToProcess.length} of ${peppolFiles.length} PEPPOL files`);
console.log(`Largest file in test: ${path.basename(filesToProcess[0].path)} (${(filesToProcess[0].size / 1024).toFixed(1)}KB)`);
const results = {
total: filesToProcess.length,
successful: 0,
failed: 0,
largeFiles: 0, // Files > 100KB
veryLargeFiles: 0, // Files > 500KB
processingTimes: [] as number[],
memorySamples: [] as number[],
fileSizes: [] as number[],
profiles: new Map<string, number>()
};
const failures: Array<{
file: string;
size: number;
error: string;
duration?: number;
}> = [];
// Process files
for (const file of filesToProcess) {
const isLarge = file.size > 100 * 1024;
const isVeryLarge = file.size > 500 * 1024;
if (isLarge) results.largeFiles++;
if (isVeryLarge) results.veryLargeFiles++;
try {
const xmlBuffer = await CorpusLoader.loadFile(file.path);
// Measure memory before processing
const memBefore = process.memoryUsage().heapUsed;
// Track performance
const { result: invoice, metric } = await PerformanceTracker.track(
'peppol-large-processing',
async () => {
const einvoice = new EInvoice();
const xmlString = xmlBuffer.toString('utf-8');
await einvoice.fromXmlString(xmlString);
return einvoice;
},
{ file: file.path, size: file.size }
);
// Measure memory after processing
const memAfter = process.memoryUsage().heapUsed;
const memoryUsed = memAfter - memBefore;
results.processingTimes.push(metric.duration);
results.memorySamples.push(memoryUsed);
results.fileSizes.push(file.size);
// Detect PEPPOL profile
let profile = 'unknown';
if (invoice.metadata?.profile) {
profile = invoice.metadata.profile;
} else if (invoice.metadata?.customizationId) {
// Extract profile from customization ID
if (invoice.metadata.customizationId.includes('billing')) profile = 'billing';
else if (invoice.metadata.customizationId.includes('procurement')) profile = 'procurement';
}
results.profiles.set(profile, (results.profiles.get(profile) || 0) + 1);
// Validate the invoice
try {
const validationResult = await invoice.validate(ValidationLevel.BUSINESS);
if (validationResult.valid) {
results.successful++;
// Log details for large files
if (isLarge) {
console.log(`✓ Large file ${path.basename(file.path)} (${(file.size/1024).toFixed(0)}KB):`);
console.log(` - Processing time: ${metric.duration.toFixed(0)}ms`);
console.log(` - Memory used: ${(memoryUsed/1024/1024).toFixed(1)}MB`);
console.log(` - Processing rate: ${(file.size/metric.duration).toFixed(0)} bytes/ms`);
} else {
console.log(`${path.basename(file.path)}: Processed successfully`);
}
} else {
results.failed++;
failures.push({
file: path.basename(file.path),
size: file.size,
error: validationResult.errors?.[0]?.message || 'Validation failed',
duration: metric.duration
});
}
} catch (validationError: any) {
results.failed++;
failures.push({
file: path.basename(file.path),
size: file.size,
error: validationError.message,
duration: metric.duration
});
}
} catch (error: any) {
results.failed++;
failures.push({
file: path.basename(file.path),
size: file.size,
error: error.message
});
console.log(`${path.basename(file.path)}: ${error.message}`);
}
}
// Calculate performance metrics
const avgProcessingTime = results.processingTimes.reduce((a, b) => a + b, 0) / results.processingTimes.length;
const avgMemoryUsed = results.memorySamples.reduce((a, b) => a + b, 0) / results.memorySamples.length;
// Calculate processing rate (bytes per millisecond)
const processingRates = results.processingTimes.map((time, i) => results.fileSizes[i] / time);
const avgProcessingRate = processingRates.reduce((a, b) => a + b, 0) / processingRates.length;
// Summary report
console.log('\n=== PEPPOL Large Files Processing Summary ===');
console.log(`Total files: ${results.total}`);
console.log(` - Large files (>100KB): ${results.largeFiles}`);
console.log(` - Very large files (>500KB): ${results.veryLargeFiles}`);
console.log(`Successful: ${results.successful} (${(results.successful/results.total*100).toFixed(1)}%)`);
console.log(`Failed: ${results.failed}`);
console.log('\nPEPPOL Profiles:');
results.profiles.forEach((count, profile) => {
console.log(` - ${profile}: ${count} files`);
});
if (failures.length > 0) {
console.log('\nFailures:');
failures.forEach(f => {
console.log(` ${f.file} (${(f.size/1024).toFixed(1)}KB): ${f.error}`);
});
}
console.log('\nPerformance Metrics:');
console.log(` Average processing time: ${avgProcessingTime.toFixed(2)}ms`);
console.log(` Average memory usage: ${(avgMemoryUsed/1024/1024).toFixed(2)}MB`);
console.log(` Average processing rate: ${(avgProcessingRate/1024).toFixed(2)} KB/ms`);
// Performance analysis for large files
if (results.largeFiles > 0) {
const largeFileIndices = results.fileSizes
.map((size, i) => ({ size, i }))
.filter(x => x.size > 100 * 1024)
.map(x => x.i);
const largeFileTimes = largeFileIndices.map(i => results.processingTimes[i]);
const largeFileAvgTime = largeFileTimes.reduce((a, b) => a + b, 0) / largeFileTimes.length;
console.log(`\nLarge File Performance:`);
console.log(` Average time for files >100KB: ${largeFileAvgTime.toFixed(2)}ms`);
// Check linear scaling
const smallFiles = results.fileSizes.filter(s => s < 50 * 1024);
const smallFilesAvgSize = smallFiles.reduce((a, b) => a + b, 0) / smallFiles.length;
const largeFilesAvgSize = results.fileSizes
.filter(s => s > 100 * 1024)
.reduce((a, b) => a + b, 0) / results.largeFiles;
const sizeRatio = largeFilesAvgSize / smallFilesAvgSize;
const timeRatio = largeFileAvgTime / avgProcessingTime;
console.log(` Size ratio (large/small): ${sizeRatio.toFixed(1)}x`);
console.log(` Time ratio (large/small): ${timeRatio.toFixed(1)}x`);
if (timeRatio < sizeRatio * 2) {
console.log(` ✓ Good scaling performance (sub-linear)`);
} else {
console.log(` ⚠ Poor scaling performance`);
}
}
// Success criteria
const successRate = results.successful / results.total;
expect(successRate).toBeGreaterThan(0.7);
// Performance criteria (relaxed for large files)
expect(avgProcessingTime).toBeLessThan(10000); // Average should be under 10 seconds
expect(avgProcessingRate).toBeGreaterThan(5); // At least 5 bytes/ms for large files
});
tap.start();