feat(test): add integration coverage for file storage, compaction, migration, and LocalSmartDb workflows
This commit is contained in:
@@ -1,5 +1,13 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-04-04 - 2.3.0 - feat(test)
|
||||||
|
add integration coverage for file storage, compaction, migration, and LocalSmartDb workflows
|
||||||
|
|
||||||
|
- adds end-to-end tests for file-backed storage creation, CRUD operations, bulk updates, persistence, and index file generation
|
||||||
|
- adds compaction stress tests covering repeated updates, tombstones, file shrinking behavior, and restart integrity
|
||||||
|
- adds migration tests for automatic v0 JSON layout detection, v1 conversion, restart persistence, and post-migration writes
|
||||||
|
- adds LocalSmartDb lifecycle and unix socket tests, including restart persistence, custom socket paths, and database isolation
|
||||||
|
|
||||||
## 2026-04-04 - 2.2.0 - feat(storage)
|
## 2026-04-04 - 2.2.0 - feat(storage)
|
||||||
add Bitcask storage migration, binary WAL, and data compaction support
|
add Bitcask storage migration, binary WAL, and data compaction support
|
||||||
|
|
||||||
|
|||||||
256
test/test.compaction.ts
Normal file
256
test/test.compaction.ts
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as smartdb from '../ts/index.js';
|
||||||
|
import { MongoClient, Db } from 'mongodb';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let tmpDir: string;
|
||||||
|
let server: smartdb.SmartdbServer;
|
||||||
|
let client: MongoClient;
|
||||||
|
let db: Db;
|
||||||
|
|
||||||
|
function makeTmpDir(): string {
|
||||||
|
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-compact-test-'));
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanTmpDir(dir: string): void {
|
||||||
|
if (fs.existsSync(dir)) {
|
||||||
|
fs.rmSync(dir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getDataFileSize(storagePath: string, dbName: string, collName: string): number {
|
||||||
|
const dataPath = path.join(storagePath, dbName, collName, 'data.rdb');
|
||||||
|
if (!fs.existsSync(dataPath)) return 0;
|
||||||
|
return fs.statSync(dataPath).size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Setup
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: start server with file storage', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('compactdb');
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Updates grow the data file
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: repeated updates grow the data file', async () => {
|
||||||
|
const coll = db.collection('growing');
|
||||||
|
|
||||||
|
// Insert a document
|
||||||
|
await coll.insertOne({ key: 'target', counter: 0, payload: 'x'.repeat(200) });
|
||||||
|
|
||||||
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
||||||
|
expect(sizeAfterInsert).toBeGreaterThan(0);
|
||||||
|
|
||||||
|
// Update the same document 50 times — each update appends a new record
|
||||||
|
for (let i = 1; i <= 50; i++) {
|
||||||
|
await coll.updateOne(
|
||||||
|
{ key: 'target' },
|
||||||
|
{ $set: { counter: i, payload: 'y'.repeat(200) } }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const sizeAfterUpdates = getDataFileSize(tmpDir, 'compactdb', 'growing');
|
||||||
|
// Compaction may have run during updates, so we can't assert the file is
|
||||||
|
// much larger. What matters is the data is correct.
|
||||||
|
|
||||||
|
// The collection still has just 1 document
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(1);
|
||||||
|
|
||||||
|
const doc = await coll.findOne({ key: 'target' });
|
||||||
|
expect(doc!.counter).toEqual(50);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Deletes create tombstones
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: insert-then-delete creates dead space', async () => {
|
||||||
|
const coll = db.collection('tombstones');
|
||||||
|
|
||||||
|
// Insert 100 documents
|
||||||
|
const docs = [];
|
||||||
|
for (let i = 0; i < 100; i++) {
|
||||||
|
docs.push({ idx: i, data: 'delete-me-' + 'z'.repeat(100) });
|
||||||
|
}
|
||||||
|
await coll.insertMany(docs);
|
||||||
|
|
||||||
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
||||||
|
|
||||||
|
// Delete all 100
|
||||||
|
await coll.deleteMany({});
|
||||||
|
|
||||||
|
const sizeAfterDelete = getDataFileSize(tmpDir, 'compactdb', 'tombstones');
|
||||||
|
// File may have been compacted during deletes (dead > 50% threshold),
|
||||||
|
// but the operation itself should succeed regardless of file size.
|
||||||
|
// After deleting all docs, the file might be very small (just header + compacted).
|
||||||
|
|
||||||
|
// But count is 0
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Data integrity after compaction trigger
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: data file shrinks after heavy updates trigger compaction', async () => {
|
||||||
|
const coll = db.collection('shrinktest');
|
||||||
|
|
||||||
|
// Insert 10 documents with large payloads
|
||||||
|
const docs = [];
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
docs.push({ idx: i, data: 'a'.repeat(500) });
|
||||||
|
}
|
||||||
|
await coll.insertMany(docs);
|
||||||
|
|
||||||
|
const sizeAfterInsert = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
||||||
|
|
||||||
|
// Update each document 20 times (creates 200 dead records vs 10 live)
|
||||||
|
// This should trigger compaction (dead > 50% threshold)
|
||||||
|
for (let round = 0; round < 20; round++) {
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
await coll.updateOne(
|
||||||
|
{ idx: i },
|
||||||
|
{ $set: { data: `round-${round}-` + 'b'.repeat(500) } }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// After compaction, file should be smaller than the pre-compaction peak
|
||||||
|
// (We can't measure the peak exactly, but the final size should be reasonable)
|
||||||
|
const sizeAfterCompaction = getDataFileSize(tmpDir, 'compactdb', 'shrinktest');
|
||||||
|
|
||||||
|
// The file should not be 20x the insert size since compaction should have run
|
||||||
|
// With 10 live records of ~530 bytes each, the file should be roughly that
|
||||||
|
// plus header overhead. Without compaction it would be 210 * ~530 bytes.
|
||||||
|
const maxExpectedSize = sizeAfterInsert * 5; // generous upper bound
|
||||||
|
expect(sizeAfterCompaction).toBeLessThanOrEqual(maxExpectedSize);
|
||||||
|
|
||||||
|
// All documents should still be readable and correct
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(10);
|
||||||
|
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
const doc = await coll.findOne({ idx: i });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Persistence after compaction + restart
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: data survives compaction + restart', async () => {
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
|
||||||
|
server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-compact-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('compactdb');
|
||||||
|
|
||||||
|
// Verify shrinktest data
|
||||||
|
const coll = db.collection('shrinktest');
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(10);
|
||||||
|
|
||||||
|
for (let i = 0; i < 10; i++) {
|
||||||
|
const doc = await coll.findOne({ idx: i });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
expect(doc!.data.startsWith('round-19-')).toBeTrue();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify growing collection
|
||||||
|
const growing = db.collection('growing');
|
||||||
|
const growDoc = await growing.findOne({ key: 'target' });
|
||||||
|
expect(growDoc).toBeTruthy();
|
||||||
|
expect(growDoc!.counter).toEqual(50);
|
||||||
|
|
||||||
|
// Verify tombstones collection is empty
|
||||||
|
const tombCount = await db.collection('tombstones').countDocuments();
|
||||||
|
expect(tombCount).toEqual(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Compaction: Mixed operations stress test
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: mixed insert-update-delete stress test', async () => {
|
||||||
|
const coll = db.collection('stress');
|
||||||
|
|
||||||
|
// Phase 1: Insert 200 documents
|
||||||
|
const batch = [];
|
||||||
|
for (let i = 0; i < 200; i++) {
|
||||||
|
batch.push({ idx: i, value: `initial-${i}`, alive: true });
|
||||||
|
}
|
||||||
|
await coll.insertMany(batch);
|
||||||
|
|
||||||
|
// Phase 2: Update every even-indexed document
|
||||||
|
for (let i = 0; i < 200; i += 2) {
|
||||||
|
await coll.updateOne({ idx: i }, { $set: { value: `updated-${i}` } });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3: Delete every document where idx % 3 === 0
|
||||||
|
await coll.deleteMany({ idx: { $in: Array.from({ length: 67 }, (_, k) => k * 3) } });
|
||||||
|
|
||||||
|
// Verify: documents where idx % 3 !== 0 should remain
|
||||||
|
const remaining = await coll.find({}).toArray();
|
||||||
|
for (const doc of remaining) {
|
||||||
|
expect(doc.idx % 3).not.toEqual(0);
|
||||||
|
if (doc.idx % 2 === 0) {
|
||||||
|
expect(doc.value).toEqual(`updated-${doc.idx}`);
|
||||||
|
} else {
|
||||||
|
expect(doc.value).toEqual(`initial-${doc.idx}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count should be 200 - 67 = 133
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(133);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Cleanup
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('compaction: cleanup', async () => {
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
394
test/test.file-storage.ts
Normal file
394
test/test.file-storage.ts
Normal file
@@ -0,0 +1,394 @@
|
|||||||
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as smartdb from '../ts/index.js';
|
||||||
|
import { MongoClient, Db } from 'mongodb';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let tmpDir: string;
|
||||||
|
let server: smartdb.SmartdbServer;
|
||||||
|
let client: MongoClient;
|
||||||
|
let db: Db;
|
||||||
|
|
||||||
|
function makeTmpDir(): string {
|
||||||
|
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-test-'));
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanTmpDir(dir: string): void {
|
||||||
|
if (fs.existsSync(dir)) {
|
||||||
|
fs.rmSync(dir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Startup
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: should start server with file storage', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
server = new smartdb.SmartdbServer({
|
||||||
|
port: 27118,
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
expect(server.running).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: should connect MongoClient', async () => {
|
||||||
|
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('filetest');
|
||||||
|
expect(db).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Data files are created on disk
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: inserting creates data files on disk', async () => {
|
||||||
|
const coll = db.collection('diskcheck');
|
||||||
|
await coll.insertOne({ name: 'disk-test', value: 42 });
|
||||||
|
|
||||||
|
// The storage directory should now contain a database directory
|
||||||
|
const dbDir = path.join(tmpDir, 'filetest');
|
||||||
|
expect(fs.existsSync(dbDir)).toBeTrue();
|
||||||
|
|
||||||
|
// Collection directory with data.rdb should exist
|
||||||
|
const collDir = path.join(dbDir, 'diskcheck');
|
||||||
|
expect(fs.existsSync(collDir)).toBeTrue();
|
||||||
|
|
||||||
|
const dataFile = path.join(collDir, 'data.rdb');
|
||||||
|
expect(fs.existsSync(dataFile)).toBeTrue();
|
||||||
|
|
||||||
|
// data.rdb should have the SMARTDB magic header
|
||||||
|
const header = Buffer.alloc(8);
|
||||||
|
const fd = fs.openSync(dataFile, 'r');
|
||||||
|
fs.readSync(fd, header, 0, 8, 0);
|
||||||
|
fs.closeSync(fd);
|
||||||
|
expect(header.toString('ascii')).toEqual('SMARTDB\0');
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Full CRUD cycle
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: insertOne returns valid id', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const result = await coll.insertOne({ name: 'Alice', age: 30 });
|
||||||
|
expect(result.acknowledged).toBeTrue();
|
||||||
|
expect(result.insertedId).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: insertMany returns all ids', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const result = await coll.insertMany([
|
||||||
|
{ name: 'Bob', age: 25 },
|
||||||
|
{ name: 'Charlie', age: 35 },
|
||||||
|
{ name: 'Diana', age: 28 },
|
||||||
|
{ name: 'Eve', age: 32 },
|
||||||
|
]);
|
||||||
|
expect(result.insertedCount).toEqual(4);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: findOne retrieves correct document', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const doc = await coll.findOne({ name: 'Alice' });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
expect(doc!.name).toEqual('Alice');
|
||||||
|
expect(doc!.age).toEqual(30);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: find with filter returns correct subset', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const docs = await coll.find({ age: { $gte: 30 } }).toArray();
|
||||||
|
expect(docs.length).toEqual(3); // Alice(30), Charlie(35), Eve(32)
|
||||||
|
expect(docs.every(d => d.age >= 30)).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: updateOne modifies document', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const result = await coll.updateOne(
|
||||||
|
{ name: 'Alice' },
|
||||||
|
{ $set: { age: 31, updated: true } }
|
||||||
|
);
|
||||||
|
expect(result.modifiedCount).toEqual(1);
|
||||||
|
|
||||||
|
const doc = await coll.findOne({ name: 'Alice' });
|
||||||
|
expect(doc!.age).toEqual(31);
|
||||||
|
expect(doc!.updated).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: deleteOne removes document', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const result = await coll.deleteOne({ name: 'Eve' });
|
||||||
|
expect(result.deletedCount).toEqual(1);
|
||||||
|
|
||||||
|
const doc = await coll.findOne({ name: 'Eve' });
|
||||||
|
expect(doc).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: count reflects current state', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(4); // 5 inserted - 1 deleted = 4
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Persistence across server restart
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: stop server for restart test', async () => {
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
expect(server.running).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: restart server with same data path', async () => {
|
||||||
|
server = new smartdb.SmartdbServer({
|
||||||
|
port: 27118,
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
expect(server.running).toBeTrue();
|
||||||
|
|
||||||
|
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('filetest');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: data persists after restart', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
|
||||||
|
// Alice should still be there with updated age
|
||||||
|
const alice = await coll.findOne({ name: 'Alice' });
|
||||||
|
expect(alice).toBeTruthy();
|
||||||
|
expect(alice!.age).toEqual(31);
|
||||||
|
expect(alice!.updated).toBeTrue();
|
||||||
|
|
||||||
|
// Bob, Charlie, Diana should be there
|
||||||
|
const bob = await coll.findOne({ name: 'Bob' });
|
||||||
|
expect(bob).toBeTruthy();
|
||||||
|
expect(bob!.age).toEqual(25);
|
||||||
|
|
||||||
|
const charlie = await coll.findOne({ name: 'Charlie' });
|
||||||
|
expect(charlie).toBeTruthy();
|
||||||
|
|
||||||
|
const diana = await coll.findOne({ name: 'Diana' });
|
||||||
|
expect(diana).toBeTruthy();
|
||||||
|
|
||||||
|
// Eve should still be deleted
|
||||||
|
const eve = await coll.findOne({ name: 'Eve' });
|
||||||
|
expect(eve).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: count is correct after restart', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(4);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: can write new data after restart', async () => {
|
||||||
|
const coll = db.collection('crud');
|
||||||
|
const result = await coll.insertOne({ name: 'Frank', age: 45 });
|
||||||
|
expect(result.acknowledged).toBeTrue();
|
||||||
|
|
||||||
|
const doc = await coll.findOne({ name: 'Frank' });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
expect(doc!.age).toEqual(45);
|
||||||
|
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(5);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Multiple collections in same database
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: multiple collections are independent', async () => {
|
||||||
|
const products = db.collection('products');
|
||||||
|
const orders = db.collection('orders');
|
||||||
|
|
||||||
|
await products.insertMany([
|
||||||
|
{ sku: 'A001', name: 'Widget', price: 9.99 },
|
||||||
|
{ sku: 'A002', name: 'Gadget', price: 19.99 },
|
||||||
|
]);
|
||||||
|
|
||||||
|
await orders.insertMany([
|
||||||
|
{ orderId: 1, sku: 'A001', qty: 3 },
|
||||||
|
{ orderId: 2, sku: 'A002', qty: 1 },
|
||||||
|
{ orderId: 3, sku: 'A001', qty: 2 },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const productCount = await products.countDocuments();
|
||||||
|
const orderCount = await orders.countDocuments();
|
||||||
|
expect(productCount).toEqual(2);
|
||||||
|
expect(orderCount).toEqual(3);
|
||||||
|
|
||||||
|
// Deleting from one collection doesn't affect the other
|
||||||
|
await products.deleteOne({ sku: 'A001' });
|
||||||
|
expect(await products.countDocuments()).toEqual(1);
|
||||||
|
expect(await orders.countDocuments()).toEqual(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Multiple databases
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: multiple databases are independent', async () => {
|
||||||
|
const db2 = client.db('filetest2');
|
||||||
|
const coll2 = db2.collection('items');
|
||||||
|
|
||||||
|
await coll2.insertOne({ name: 'cross-db-test', source: 'db2' });
|
||||||
|
|
||||||
|
// db2 has 1 doc
|
||||||
|
const count2 = await coll2.countDocuments();
|
||||||
|
expect(count2).toEqual(1);
|
||||||
|
|
||||||
|
// original db is unaffected
|
||||||
|
const crudCount = await db.collection('crud').countDocuments();
|
||||||
|
expect(crudCount).toEqual(5);
|
||||||
|
|
||||||
|
await db2.dropDatabase();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Large batch insert and retrieval
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: bulk insert 1000 documents', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const docs = [];
|
||||||
|
for (let i = 0; i < 1000; i++) {
|
||||||
|
docs.push({ index: i, data: `value-${i}`, timestamp: Date.now() });
|
||||||
|
}
|
||||||
|
const result = await coll.insertMany(docs);
|
||||||
|
expect(result.insertedCount).toEqual(1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: find all 1000 documents', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const docs = await coll.find({}).toArray();
|
||||||
|
expect(docs.length).toEqual(1000);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: range query on 1000 documents', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const docs = await coll.find({ index: { $gte: 500, $lt: 600 } }).toArray();
|
||||||
|
expect(docs.length).toEqual(100);
|
||||||
|
expect(docs.every(d => d.index >= 500 && d.index < 600)).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: sorted retrieval with limit', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const docs = await coll.find({}).sort({ index: -1 }).limit(10).toArray();
|
||||||
|
expect(docs.length).toEqual(10);
|
||||||
|
expect(docs[0].index).toEqual(999);
|
||||||
|
expect(docs[9].index).toEqual(990);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Update many and verify persistence
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: updateMany on bulk collection', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const result = await coll.updateMany(
|
||||||
|
{ index: { $lt: 100 } },
|
||||||
|
{ $set: { batch: 'first-hundred' } }
|
||||||
|
);
|
||||||
|
expect(result.modifiedCount).toEqual(100);
|
||||||
|
|
||||||
|
const updated = await coll.find({ batch: 'first-hundred' }).toArray();
|
||||||
|
expect(updated.length).toEqual(100);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Delete many and verify
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: deleteMany removes correct documents', async () => {
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const result = await coll.deleteMany({ index: { $gte: 900 } });
|
||||||
|
expect(result.deletedCount).toEqual(100);
|
||||||
|
|
||||||
|
const remaining = await coll.countDocuments();
|
||||||
|
expect(remaining).toEqual(900);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Persistence of bulk data across restart
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: stop server for bulk restart test', async () => {
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
expect(server.running).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('file-storage: restart and verify bulk data', async () => {
|
||||||
|
server = new smartdb.SmartdbServer({
|
||||||
|
port: 27118,
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
client = new MongoClient('mongodb://127.0.0.1:27118', {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('filetest');
|
||||||
|
|
||||||
|
const coll = db.collection('bulk');
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(900);
|
||||||
|
|
||||||
|
// Verify the updateMany persisted
|
||||||
|
const firstHundred = await coll.find({ batch: 'first-hundred' }).toArray();
|
||||||
|
expect(firstHundred.length).toEqual(100);
|
||||||
|
|
||||||
|
// Verify deleted docs are gone
|
||||||
|
const over900 = await coll.find({ index: { $gte: 900 } }).toArray();
|
||||||
|
expect(over900.length).toEqual(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// File Storage: Index persistence
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: default indexes.json exists on disk', async () => {
|
||||||
|
// The indexes.json is created when the collection is first created,
|
||||||
|
// containing the default _id_ index spec.
|
||||||
|
const indexFile = path.join(tmpDir, 'filetest', 'crud', 'indexes.json');
|
||||||
|
expect(fs.existsSync(indexFile)).toBeTrue();
|
||||||
|
|
||||||
|
const indexData = JSON.parse(fs.readFileSync(indexFile, 'utf-8'));
|
||||||
|
const names = indexData.map((i: any) => i.name);
|
||||||
|
expect(names).toContain('_id_');
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Cleanup
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('file-storage: cleanup', async () => {
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
expect(server.running).toBeFalse();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
235
test/test.localsmartdb.ts
Normal file
235
test/test.localsmartdb.ts
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as smartdb from '../ts/index.js';
|
||||||
|
import { MongoClient, Db } from 'mongodb';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let tmpDir: string;
|
||||||
|
let localDb: smartdb.LocalSmartDb;
|
||||||
|
let client: MongoClient;
|
||||||
|
let db: Db;
|
||||||
|
|
||||||
|
function makeTmpDir(): string {
|
||||||
|
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-local-test-'));
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanTmpDir(dir: string): void {
|
||||||
|
if (fs.existsSync(dir)) {
|
||||||
|
fs.rmSync(dir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: Lifecycle
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: should start with just a folder path', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||||
|
const info = await localDb.start();
|
||||||
|
|
||||||
|
expect(localDb.running).toBeTrue();
|
||||||
|
expect(info.socketPath).toBeTruthy();
|
||||||
|
expect(info.connectionUri).toBeTruthy();
|
||||||
|
expect(info.connectionUri.startsWith('mongodb://')).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: should connect via returned connectionUri', async () => {
|
||||||
|
const info = localDb.getConnectionInfo();
|
||||||
|
client = new MongoClient(info.connectionUri, {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('localtest');
|
||||||
|
expect(db).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: should reject double start', async () => {
|
||||||
|
let threw = false;
|
||||||
|
try {
|
||||||
|
await localDb.start();
|
||||||
|
} catch {
|
||||||
|
threw = true;
|
||||||
|
}
|
||||||
|
expect(threw).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: CRUD via Unix socket
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: insert and find documents', async () => {
|
||||||
|
const coll = db.collection('notes');
|
||||||
|
await coll.insertMany([
|
||||||
|
{ title: 'Note 1', body: 'First note', priority: 1 },
|
||||||
|
{ title: 'Note 2', body: 'Second note', priority: 2 },
|
||||||
|
{ title: 'Note 3', body: 'Third note', priority: 3 },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const all = await coll.find({}).toArray();
|
||||||
|
expect(all.length).toEqual(3);
|
||||||
|
|
||||||
|
const high = await coll.findOne({ priority: 3 });
|
||||||
|
expect(high).toBeTruthy();
|
||||||
|
expect(high!.title).toEqual('Note 3');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: update and verify', async () => {
|
||||||
|
const coll = db.collection('notes');
|
||||||
|
await coll.updateOne(
|
||||||
|
{ title: 'Note 2' },
|
||||||
|
{ $set: { body: 'Updated second note', edited: true } }
|
||||||
|
);
|
||||||
|
|
||||||
|
const doc = await coll.findOne({ title: 'Note 2' });
|
||||||
|
expect(doc!.body).toEqual('Updated second note');
|
||||||
|
expect(doc!.edited).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: delete and verify', async () => {
|
||||||
|
const coll = db.collection('notes');
|
||||||
|
await coll.deleteOne({ title: 'Note 1' });
|
||||||
|
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(2);
|
||||||
|
|
||||||
|
const deleted = await coll.findOne({ title: 'Note 1' });
|
||||||
|
expect(deleted).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: Persistence across restart
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: stop for restart', async () => {
|
||||||
|
await client.close();
|
||||||
|
await localDb.stop();
|
||||||
|
expect(localDb.running).toBeFalse();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: restart with same folder', async () => {
|
||||||
|
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||||
|
const info = await localDb.start();
|
||||||
|
expect(localDb.running).toBeTrue();
|
||||||
|
|
||||||
|
client = new MongoClient(info.connectionUri, {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('localtest');
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: data persists after restart', async () => {
|
||||||
|
const coll = db.collection('notes');
|
||||||
|
|
||||||
|
const count = await coll.countDocuments();
|
||||||
|
expect(count).toEqual(2); // 3 inserted - 1 deleted
|
||||||
|
|
||||||
|
const note2 = await coll.findOne({ title: 'Note 2' });
|
||||||
|
expect(note2!.body).toEqual('Updated second note');
|
||||||
|
expect(note2!.edited).toBeTrue();
|
||||||
|
|
||||||
|
const note3 = await coll.findOne({ title: 'Note 3' });
|
||||||
|
expect(note3!.priority).toEqual(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: Custom socket path
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: works with custom socket path', async () => {
|
||||||
|
await client.close();
|
||||||
|
await localDb.stop();
|
||||||
|
|
||||||
|
const customSocket = path.join(os.tmpdir(), `smartdb-custom-${Date.now()}.sock`);
|
||||||
|
const tmpDir2 = makeTmpDir();
|
||||||
|
const localDb2 = new smartdb.LocalSmartDb({
|
||||||
|
folderPath: tmpDir2,
|
||||||
|
socketPath: customSocket,
|
||||||
|
});
|
||||||
|
|
||||||
|
const info = await localDb2.start();
|
||||||
|
expect(info.socketPath).toEqual(customSocket);
|
||||||
|
|
||||||
|
const client2 = new MongoClient(info.connectionUri, {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client2.connect();
|
||||||
|
const testDb = client2.db('customsock');
|
||||||
|
await testDb.collection('test').insertOne({ x: 1 });
|
||||||
|
const doc = await testDb.collection('test').findOne({ x: 1 });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
|
||||||
|
await client2.close();
|
||||||
|
await localDb2.stop();
|
||||||
|
cleanTmpDir(tmpDir2);
|
||||||
|
|
||||||
|
// Reconnect original for remaining tests
|
||||||
|
localDb = new smartdb.LocalSmartDb({ folderPath: tmpDir });
|
||||||
|
const origInfo = await localDb.start();
|
||||||
|
client = new MongoClient(origInfo.connectionUri, {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
db = client.db('localtest');
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: getConnectionUri and getServer helpers
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: getConnectionUri returns valid uri', async () => {
|
||||||
|
const uri = localDb.getConnectionUri();
|
||||||
|
expect(uri.startsWith('mongodb://')).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('localsmartdb: getServer returns the SmartdbServer', async () => {
|
||||||
|
const srv = localDb.getServer();
|
||||||
|
expect(srv).toBeTruthy();
|
||||||
|
expect(srv.running).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// LocalSmartDb: Data isolation between databases
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: databases are isolated', async () => {
|
||||||
|
const dbA = client.db('isoA');
|
||||||
|
const dbB = client.db('isoB');
|
||||||
|
|
||||||
|
await dbA.collection('shared').insertOne({ source: 'A', val: 1 });
|
||||||
|
await dbB.collection('shared').insertOne({ source: 'B', val: 2 });
|
||||||
|
|
||||||
|
const docsA = await dbA.collection('shared').find({}).toArray();
|
||||||
|
const docsB = await dbB.collection('shared').find({}).toArray();
|
||||||
|
|
||||||
|
expect(docsA.length).toEqual(1);
|
||||||
|
expect(docsA[0].source).toEqual('A');
|
||||||
|
expect(docsB.length).toEqual(1);
|
||||||
|
expect(docsB[0].source).toEqual('B');
|
||||||
|
|
||||||
|
await dbA.dropDatabase();
|
||||||
|
await dbB.dropDatabase();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Cleanup
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('localsmartdb: cleanup', async () => {
|
||||||
|
await client.close();
|
||||||
|
await localDb.stop();
|
||||||
|
expect(localDb.running).toBeFalse();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
269
test/test.migration.ts
Normal file
269
test/test.migration.ts
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||||
|
import * as smartdb from '../ts/index.js';
|
||||||
|
import { MongoClient, Db } from 'mongodb';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let tmpDir: string;
|
||||||
|
|
||||||
|
function makeTmpDir(): string {
|
||||||
|
return fs.mkdtempSync(path.join(os.tmpdir(), 'smartdb-migration-test-'));
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanTmpDir(dir: string): void {
|
||||||
|
if (fs.existsSync(dir)) {
|
||||||
|
fs.rmSync(dir, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a v0 (legacy JSON) storage layout:
|
||||||
|
* {base}/{db}/{coll}.json
|
||||||
|
* {base}/{db}/{coll}.indexes.json
|
||||||
|
*/
|
||||||
|
function createV0Layout(basePath: string, dbName: string, collName: string, docs: any[]): void {
|
||||||
|
const dbDir = path.join(basePath, dbName);
|
||||||
|
fs.mkdirSync(dbDir, { recursive: true });
|
||||||
|
|
||||||
|
// Convert docs to the extended JSON format that the old Rust engine wrote:
|
||||||
|
// ObjectId is stored as { "$oid": "hex" }
|
||||||
|
const jsonDocs = docs.map(doc => {
|
||||||
|
const clone = { ...doc };
|
||||||
|
if (!clone._id) {
|
||||||
|
// Generate a fake ObjectId-like hex string
|
||||||
|
const hex = [...Array(24)].map(() => Math.floor(Math.random() * 16).toString(16)).join('');
|
||||||
|
clone._id = { '$oid': hex };
|
||||||
|
}
|
||||||
|
return clone;
|
||||||
|
});
|
||||||
|
|
||||||
|
const collPath = path.join(dbDir, `${collName}.json`);
|
||||||
|
fs.writeFileSync(collPath, JSON.stringify(jsonDocs, null, 2));
|
||||||
|
|
||||||
|
const indexPath = path.join(dbDir, `${collName}.indexes.json`);
|
||||||
|
fs.writeFileSync(indexPath, JSON.stringify([
|
||||||
|
{ name: '_id_', key: { _id: 1 } },
|
||||||
|
], null, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Migration: v0 → v1 basic
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('migration: detects v0 format and migrates on startup', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
|
||||||
|
// Create v0 layout with test data
|
||||||
|
createV0Layout(tmpDir, 'mydb', 'users', [
|
||||||
|
{ name: 'Alice', age: 30, email: 'alice@test.com' },
|
||||||
|
{ name: 'Bob', age: 25, email: 'bob@test.com' },
|
||||||
|
{ name: 'Charlie', age: 35, email: 'charlie@test.com' },
|
||||||
|
]);
|
||||||
|
|
||||||
|
createV0Layout(tmpDir, 'mydb', 'products', [
|
||||||
|
{ sku: 'W001', name: 'Widget', price: 9.99 },
|
||||||
|
{ sku: 'G001', name: 'Gadget', price: 19.99 },
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Verify v0 files exist
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users.json'))).toBeTrue();
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products.json'))).toBeTrue();
|
||||||
|
|
||||||
|
// Start server — migration should run automatically
|
||||||
|
const server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
// v1 directories should now exist
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users', 'data.rdb'))).toBeTrue();
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products', 'data.rdb'))).toBeTrue();
|
||||||
|
|
||||||
|
// v0 files should still exist (not deleted)
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'users.json'))).toBeTrue();
|
||||||
|
expect(fs.existsSync(path.join(tmpDir, 'mydb', 'products.json'))).toBeTrue();
|
||||||
|
|
||||||
|
// Connect and verify data is accessible
|
||||||
|
const client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
const db = client.db('mydb');
|
||||||
|
|
||||||
|
// Users collection
|
||||||
|
const users = await db.collection('users').find({}).toArray();
|
||||||
|
expect(users.length).toEqual(3);
|
||||||
|
const alice = users.find(u => u.name === 'Alice');
|
||||||
|
expect(alice).toBeTruthy();
|
||||||
|
expect(alice!.age).toEqual(30);
|
||||||
|
expect(alice!.email).toEqual('alice@test.com');
|
||||||
|
|
||||||
|
// Products collection
|
||||||
|
const products = await db.collection('products').find({}).toArray();
|
||||||
|
expect(products.length).toEqual(2);
|
||||||
|
const widget = products.find(p => p.sku === 'W001');
|
||||||
|
expect(widget).toBeTruthy();
|
||||||
|
expect(widget!.price).toEqual(9.99);
|
||||||
|
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Migration: migrated data survives another restart
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('migration: migrated data persists across restart', async () => {
|
||||||
|
const server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
const client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
const db = client.db('mydb');
|
||||||
|
|
||||||
|
const users = await db.collection('users').find({}).toArray();
|
||||||
|
expect(users.length).toEqual(3);
|
||||||
|
|
||||||
|
const products = await db.collection('products').find({}).toArray();
|
||||||
|
expect(products.length).toEqual(2);
|
||||||
|
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Migration: can write new data after migration
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('migration: new writes work after migration', async () => {
|
||||||
|
const server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
const client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
const db = client.db('mydb');
|
||||||
|
|
||||||
|
// Insert new documents
|
||||||
|
await db.collection('users').insertOne({ name: 'Diana', age: 28 });
|
||||||
|
const count = await db.collection('users').countDocuments();
|
||||||
|
expect(count).toEqual(4);
|
||||||
|
|
||||||
|
// Update existing migrated document
|
||||||
|
await db.collection('users').updateOne(
|
||||||
|
{ name: 'Alice' },
|
||||||
|
{ $set: { age: 31 } }
|
||||||
|
);
|
||||||
|
const alice = await db.collection('users').findOne({ name: 'Alice' });
|
||||||
|
expect(alice!.age).toEqual(31);
|
||||||
|
|
||||||
|
// Delete a migrated document
|
||||||
|
await db.collection('products').deleteOne({ sku: 'G001' });
|
||||||
|
const prodCount = await db.collection('products').countDocuments();
|
||||||
|
expect(prodCount).toEqual(1);
|
||||||
|
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Migration: skips already-migrated data
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('migration: no-op for v1 format', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
|
||||||
|
// Start fresh to create v1 layout
|
||||||
|
const server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
const client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
const db = client.db('v1test');
|
||||||
|
await db.collection('items').insertOne({ x: 1 });
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
|
||||||
|
// Restart — migration should detect v1 and skip
|
||||||
|
const server2 = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server2.start();
|
||||||
|
|
||||||
|
const client2 = new MongoClient(server2.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client2.connect();
|
||||||
|
const db2 = client2.db('v1test');
|
||||||
|
const doc = await db2.collection('items').findOne({ x: 1 });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
|
||||||
|
await client2.close();
|
||||||
|
await server2.stop();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Migration: empty storage is handled gracefully
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
tap.test('migration: empty storage directory works', async () => {
|
||||||
|
tmpDir = makeTmpDir();
|
||||||
|
|
||||||
|
const server = new smartdb.SmartdbServer({
|
||||||
|
socketPath: path.join(os.tmpdir(), `smartdb-mig-${Date.now()}-${Math.random().toString(36).slice(2)}.sock`),
|
||||||
|
storage: 'file',
|
||||||
|
storagePath: tmpDir,
|
||||||
|
});
|
||||||
|
await server.start();
|
||||||
|
|
||||||
|
const client = new MongoClient(server.getConnectionUri(), {
|
||||||
|
directConnection: true,
|
||||||
|
serverSelectionTimeoutMS: 5000,
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
|
||||||
|
// Should work fine with empty storage
|
||||||
|
const db = client.db('emptytest');
|
||||||
|
await db.collection('first').insertOne({ hello: 'world' });
|
||||||
|
const doc = await db.collection('first').findOne({ hello: 'world' });
|
||||||
|
expect(doc).toBeTruthy();
|
||||||
|
|
||||||
|
await client.close();
|
||||||
|
await server.stop();
|
||||||
|
cleanTmpDir(tmpDir);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartdb',
|
name: '@push.rocks/smartdb',
|
||||||
version: '2.2.0',
|
version: '2.3.0',
|
||||||
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
|
description: 'A MongoDB-compatible embedded database server with wire protocol support, backed by a high-performance Rust engine.'
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user