0e9862efca
- Introduced new data structures for bucket and storage statistics, including BucketSummary, StorageStats, and ClusterHealth. - Implemented runtime statistics tracking for buckets, including object count and total size. - Added methods to retrieve storage stats and bucket summaries in the FileStore. - Enhanced the SmartStorage interface to expose storage stats and cluster health. - Implemented tests for runtime stats, cluster health, and credential management. - Added support for runtime-managed credentials with atomic replacement. - Improved filesystem usage reporting for storage locations.
85 lines
3.1 KiB
TypeScript
85 lines
3.1 KiB
TypeScript
/// <reference types="node" />
|
|
|
|
import { rm } from 'fs/promises';
|
|
import { join } from 'path';
|
|
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
|
import * as smartstorage from '../ts/index.js';
|
|
|
|
let clusterStorage: smartstorage.SmartStorage;
|
|
const baseDir = join(process.cwd(), '.nogit', `cluster-health-${Date.now()}`);
|
|
const drivePaths = Array.from({ length: 6 }, (_value, index) => {
|
|
return join(baseDir, `drive-${index + 1}`);
|
|
});
|
|
const storageDir = join(baseDir, 'storage');
|
|
|
|
tap.test('setup: start clustered storage server', async () => {
|
|
clusterStorage = await smartstorage.SmartStorage.createAndStart({
|
|
server: {
|
|
port: 3348,
|
|
silent: true,
|
|
},
|
|
storage: {
|
|
directory: storageDir,
|
|
},
|
|
cluster: {
|
|
enabled: true,
|
|
nodeId: 'cluster-health-node',
|
|
quicPort: 4348,
|
|
seedNodes: [],
|
|
erasure: {
|
|
dataShards: 4,
|
|
parityShards: 2,
|
|
chunkSizeBytes: 1024 * 1024,
|
|
},
|
|
drives: {
|
|
paths: drivePaths,
|
|
},
|
|
},
|
|
});
|
|
});
|
|
|
|
tap.test('should expose clustered runtime health', async () => {
|
|
const health = await clusterStorage.getClusterHealth();
|
|
|
|
expect(health.enabled).toEqual(true);
|
|
expect(health.nodeId).toEqual('cluster-health-node');
|
|
expect(health.quorumHealthy).toEqual(true);
|
|
expect(health.majorityHealthy).toEqual(true);
|
|
expect(Array.isArray(health.peers)).toEqual(true);
|
|
expect(health.peers!.length).toEqual(0);
|
|
expect(Array.isArray(health.drives)).toEqual(true);
|
|
expect(health.drives!.length).toEqual(6);
|
|
expect(health.drives!.every((drive) => drive.status === 'online')).toEqual(true);
|
|
expect(health.drives!.every((drive) => drivePaths.includes(drive.path))).toEqual(true);
|
|
expect(health.drives!.every((drive) => drive.totalBytes !== undefined)).toEqual(true);
|
|
expect(health.drives!.every((drive) => drive.usedBytes !== undefined)).toEqual(true);
|
|
expect(health.drives!.every((drive) => drive.lastCheck !== undefined)).toEqual(true);
|
|
expect(health.drives!.every((drive) => drive.erasureSetId === 0)).toEqual(true);
|
|
expect(health.erasure?.dataShards).toEqual(4);
|
|
expect(health.erasure?.parityShards).toEqual(2);
|
|
expect(health.erasure?.chunkSizeBytes).toEqual(1024 * 1024);
|
|
expect(health.erasure?.totalShards).toEqual(6);
|
|
expect(health.erasure?.readQuorum).toEqual(4);
|
|
expect(health.erasure?.writeQuorum).toEqual(5);
|
|
expect(health.erasure?.erasureSetCount).toEqual(1);
|
|
expect(health.repairs?.active).toEqual(false);
|
|
expect(health.repairs?.scanIntervalMs).toEqual(24 * 60 * 60 * 1000);
|
|
});
|
|
|
|
tap.test('should expose cluster health after bucket creation', async () => {
|
|
const bucket = await clusterStorage.createBucket('cluster-health-bucket');
|
|
const health = await clusterStorage.getClusterHealth();
|
|
|
|
expect(bucket.name).toEqual('cluster-health-bucket');
|
|
expect(health.enabled).toEqual(true);
|
|
expect(health.quorumHealthy).toEqual(true);
|
|
expect(health.drives!.length).toEqual(6);
|
|
});
|
|
|
|
tap.test('teardown: stop clustered server and clean files', async () => {
|
|
await clusterStorage.stop();
|
|
await rm(baseDir, { recursive: true, force: true });
|
|
});
|
|
|
|
export default tap.start()
|