257 lines
8.6 KiB
TypeScript
257 lines
8.6 KiB
TypeScript
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
|
import * as smartdb from '../ts/index.js';
|
|
import { MongoClient, Db } from 'mongodb';
|
|
import * as fs from 'fs';
|
|
import * as path from 'path';
|
|
import * as os from 'os';
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// Helpers
|
|
// ---------------------------------------------------------------------------
|
|
|
|
let tmpDir: string;
|
|
let server: smartdb.SmartdbServer;
|
|
let client: MongoClient;
|
|
let db: Db;
|
|
|
|
function makeTmpDir(): string {
|
|
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-compact-test-'));
|
|
}
|
|
|
|
function cleanTmpDir(dir: string): void {
|
|
if (fs.existsSync(dir)) {
|
|
fs.rmSync(dir, { recursive: true, force: true });
|
|
}
|
|
}
|
|
|
|
function getDataFileSize(storagePath: string, dbName: string, collName: string): number {
|
|
const dataPath = path.join(storagePath, dbName, collName, 'data.rdb');
|
|
if (!fs.existsSync(dataPath)) return 0;
|
|
return fs.statSync(dataPath).size;
|
|
}
|
|
|
|
// ============================================================================
|
|
// Compaction: Setup
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: start server with file storage', async () => {
|
|
tmpDir = makeTmpDir();
|
|
server = new smartdb.SmartdbServer({
|
|
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
|
storage: 'file',
|
|
storagePath: tmpDir,
|
|
});
|
|
await server.start();
|
|
|
|
client = new MongoClient(server.getConnectionUri(), {
|
|
directConnection: true,
|
|
serverSelectionTimeoutMS: 5000,
|
|
});
|
|
await client.connect();
|
|
db = client.db('compactdb');
|
|
});
|
|
|
|
// ============================================================================
|
|
// Compaction: Updates grow the data file
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: repeated updates grow the data file', async () => {
|
|
const coll = db.collection('growing');
|
|
|
|
// Insert a document
|
|
await coll.insertOne({ key: 'target', counter: 0, payload: 'x'.repeat(200) });
|
|
|
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
|
expect(sizeAfterInsert).toBeGreaterThan(0);
|
|
|
|
// Update the same document 50 times — each update appends a new record
|
|
for (let i = 1; i <= 50; i++) {
|
|
await coll.updateOne(
|
|
{ key: 'target' },
|
|
{ $set: { counter: i, payload: 'y'.repeat(200) } }
|
|
);
|
|
}
|
|
|
|
const sizeAfterUpdates = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
|
// Compaction may have run during updates, so we can't assert the file is
|
|
// much larger. What matters is the data is correct.
|
|
|
|
// The collection still has just 1 document
|
|
const count = await coll.countDocuments();
|
|
expect(count).toEqual(1);
|
|
|
|
const doc = await coll.findOne({ key: 'target' });
|
|
expect(doc!.counter).toEqual(50);
|
|
});
|
|
|
|
// ============================================================================
|
|
// Compaction: Deletes create tombstones
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: insert-then-delete creates dead space', async () => {
|
|
const coll = db.collection('tombstones');
|
|
|
|
// Insert 100 documents
|
|
const docs = [];
|
|
for (let i = 0; i < 100; i++) {
|
|
docs.push({ idx: i, data: 'delete-me-' + 'z'.repeat(100) });
|
|
}
|
|
await coll.insertMany(docs);
|
|
|
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
|
|
|
// Delete all 100
|
|
await coll.deleteMany({});
|
|
|
|
const sizeAfterDelete = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
|
// File may have been compacted during deletes (dead > 50% threshold),
|
|
// but the operation itself should succeed regardless of file size.
|
|
// After deleting all docs, the file might be very small (just header + compacted).
|
|
|
|
// But count is 0
|
|
const count = await coll.countDocuments();
|
|
expect(count).toEqual(0);
|
|
});
|
|
|
|
// ============================================================================
|
|
// Compaction: Data integrity after compaction trigger
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: data file shrinks after heavy updates trigger compaction', async () => {
|
|
const coll = db.collection('shrinktest');
|
|
|
|
// Insert 10 documents with large payloads
|
|
const docs = [];
|
|
for (let i = 0; i < 10; i++) {
|
|
docs.push({ idx: i, data: 'a'.repeat(500) });
|
|
}
|
|
await coll.insertMany(docs);
|
|
|
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
|
|
|
// Update each document 20 times (creates 200 dead records vs 10 live)
|
|
// This should trigger compaction (dead > 50% threshold)
|
|
for (let round = 0; round < 20; round++) {
|
|
for (let i = 0; i < 10; i++) {
|
|
await coll.updateOne(
|
|
{ idx: i },
|
|
{ $set: { data: `round-${round}-` + 'b'.repeat(500) } }
|
|
);
|
|
}
|
|
}
|
|
|
|
// After compaction, file should be smaller than the pre-compaction peak
|
|
// (We can't measure the peak exactly, but the final size should be reasonable)
|
|
const sizeAfterCompaction = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
|
|
|
// The file should not be 20x the insert size since compaction should have run
|
|
// With 10 live records of ~530 bytes each, the file should be roughly that
|
|
// plus header overhead. Without compaction it would be 210 * ~530 bytes.
|
|
const maxExpectedSize = sizeAfterInsert * 5; // generous upper bound
|
|
expect(sizeAfterCompaction).toBeLessThanOrEqual(maxExpectedSize);
|
|
|
|
// All documents should still be readable and correct
|
|
const count = await coll.countDocuments();
|
|
expect(count).toEqual(10);
|
|
|
|
for (let i = 0; i < 10; i++) {
|
|
const doc = await coll.findOne({ idx: i });
|
|
expect(doc).toBeTruthy();
|
|
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
|
}
|
|
});
|
|
|
|
// ============================================================================
|
|
// Compaction: Persistence after compaction + restart
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: data survives compaction + restart', async () => {
|
|
await client.close();
|
|
await server.stop();
|
|
|
|
server = new smartdb.SmartdbServer({
|
|
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
|
storage: 'file',
|
|
storagePath: tmpDir,
|
|
});
|
|
await server.start();
|
|
|
|
client = new MongoClient(server.getConnectionUri(), {
|
|
directConnection: true,
|
|
serverSelectionTimeoutMS: 5000,
|
|
});
|
|
await client.connect();
|
|
db = client.db('compactdb');
|
|
|
|
// Verify shrinktest data
|
|
const coll = db.collection('shrinktest');
|
|
const count = await coll.countDocuments();
|
|
expect(count).toEqual(10);
|
|
|
|
for (let i = 0; i < 10; i++) {
|
|
const doc = await coll.findOne({ idx: i });
|
|
expect(doc).toBeTruthy();
|
|
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
|
}
|
|
|
|
// Verify growing collection
|
|
const growing = db.collection('growing');
|
|
const growDoc = await growing.findOne({ key: 'target' });
|
|
expect(growDoc).toBeTruthy();
|
|
expect(growDoc!.counter).toEqual(50);
|
|
|
|
// Verify tombstones collection is empty
|
|
const tombCount = await db.collection('tombstones').countDocuments();
|
|
expect(tombCount).toEqual(0);
|
|
});
|
|
|
|
// ============================================================================
|
|
// Compaction: Mixed operations stress test
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: mixed insert-update-delete stress test', async () => {
|
|
const coll = db.collection('stress');
|
|
|
|
// Phase 1: Insert 200 documents
|
|
const batch = [];
|
|
for (let i = 0; i < 200; i++) {
|
|
batch.push({ idx: i, value: `initial-${i}`, alive: true });
|
|
}
|
|
await coll.insertMany(batch);
|
|
|
|
// Phase 2: Update every even-indexed document
|
|
for (let i = 0; i < 200; i += 2) {
|
|
await coll.updateOne({ idx: i }, { $set: { value: `updated-${i}` } });
|
|
}
|
|
|
|
// Phase 3: Delete every document where idx % 3 === 0
|
|
await coll.deleteMany({ idx: { $in: Array.from({ length: 67 }, (_, k) => k * 3) } });
|
|
|
|
// Verify: documents where idx % 3 !== 0 should remain
|
|
const remaining = await coll.find({}).toArray();
|
|
for (const doc of remaining) {
|
|
expect(doc.idx % 3).not.toEqual(0);
|
|
if (doc.idx % 2 === 0) {
|
|
expect(doc.value).toEqual(`updated-${doc.idx}`);
|
|
} else {
|
|
expect(doc.value).toEqual(`initial-${doc.idx}`);
|
|
}
|
|
}
|
|
|
|
// Count should be 200 - 67 = 133
|
|
const count = await coll.countDocuments();
|
|
expect(count).toEqual(133);
|
|
});
|
|
|
|
// ============================================================================
|
|
// Cleanup
|
|
// ============================================================================
|
|
|
|
tap.test('compaction: cleanup', async () => {
|
|
await client.close();
|
|
await server.stop();
|
|
cleanTmpDir(tmpDir);
|
|
});
|
|
|
|
export default tap.start();
|