feat(platform-services): Add platform service log streaming, improve health checks and provisioning robustness
This commit is contained in:
8
ts/00_commitinfo_data.ts
Normal file
8
ts/00_commitinfo_data.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
/**
|
||||
* autocreated commitinfo by @push.rocks/commitinfo
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@serve.zone/onebox',
|
||||
version: '1.1.0',
|
||||
description: 'Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers'
|
||||
}
|
||||
@@ -78,9 +78,26 @@ export class OneboxDockerManager {
|
||||
* Pull an image from a registry
|
||||
*/
|
||||
async pullImage(image: string, registry?: string): Promise<void> {
|
||||
// Skip manual image pulling - Docker will automatically pull when creating container
|
||||
const fullImage = registry ? `${registry}/${image}` : image;
|
||||
logger.debug(`Skipping manual pull for ${fullImage} - Docker will auto-pull on container creation`);
|
||||
logger.info(`Pulling image: ${fullImage}`);
|
||||
|
||||
try {
|
||||
// Parse image name and tag (e.g., "nginx:alpine" -> imageUrl: "nginx", imageTag: "alpine")
|
||||
const [imageUrl, imageTag] = fullImage.includes(':')
|
||||
? fullImage.split(':')
|
||||
: [fullImage, 'latest'];
|
||||
|
||||
// Use the library's built-in createImageFromRegistry method
|
||||
await this.dockerClient!.createImageFromRegistry({
|
||||
imageUrl,
|
||||
imageTag,
|
||||
});
|
||||
|
||||
logger.success(`Image pulled successfully: ${fullImage}`);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to pull image ${fullImage}: ${getErrorMessage(error)}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -796,6 +813,34 @@ export class OneboxDockerManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get host port binding for a container's exposed port
|
||||
* @returns The host port number, or null if not bound
|
||||
*/
|
||||
async getContainerHostPort(containerID: string, containerPort: number): Promise<number | null> {
|
||||
try {
|
||||
const container = await this.dockerClient!.getContainerById(containerID);
|
||||
|
||||
if (!container) {
|
||||
throw new Error(`Container not found: ${containerID}`);
|
||||
}
|
||||
|
||||
const info = await container.inspect();
|
||||
|
||||
const portKey = `${containerPort}/tcp`;
|
||||
const bindings = info.NetworkSettings.Ports?.[portKey];
|
||||
|
||||
if (bindings && bindings.length > 0 && bindings[0].HostPort) {
|
||||
return parseInt(bindings[0].HostPort, 10);
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to get container host port ${containerID}:${containerPort}: ${getErrorMessage(error)}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command in a running container
|
||||
*/
|
||||
@@ -829,8 +874,11 @@ export class OneboxDockerManager {
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
await new Promise((resolve) => stream.on('end', resolve));
|
||||
// Wait for completion with timeout
|
||||
await Promise.race([
|
||||
new Promise<void>((resolve) => stream.on('end', resolve)),
|
||||
new Promise<void>((_, reject) => setTimeout(() => reject(new Error('Exec timeout after 30s')), 30000))
|
||||
]);
|
||||
|
||||
const execInfo = await inspect();
|
||||
const exitCode = execInfo.ExitCode || 0;
|
||||
@@ -859,6 +907,10 @@ export class OneboxDockerManager {
|
||||
try {
|
||||
logger.info(`Creating platform container: ${options.name}`);
|
||||
|
||||
// Pull the image first to ensure it's available
|
||||
logger.info(`Pulling image for platform service: ${options.image}`);
|
||||
await this.pullImage(options.image);
|
||||
|
||||
// Check if container already exists
|
||||
const existingContainers = await this.dockerClient!.listContainers();
|
||||
const existing = existingContainers.find((c: any) =>
|
||||
@@ -877,8 +929,8 @@ export class OneboxDockerManager {
|
||||
const portsToExpose = options.exposePorts || [options.port];
|
||||
for (const port of portsToExpose) {
|
||||
exposedPorts[`${port}/tcp`] = {};
|
||||
// Don't bind to host ports by default - services communicate via Docker network
|
||||
portBindings[`${port}/tcp`] = [];
|
||||
// Bind to random host port so we can access from host (for provisioning)
|
||||
portBindings[`${port}/tcp`] = [{ HostIp: '127.0.0.1', HostPort: '' }];
|
||||
}
|
||||
|
||||
// Prepare volume bindings
|
||||
|
||||
@@ -83,6 +83,12 @@ export class OneboxHttpServer {
|
||||
return this.handleLogStreamUpgrade(req, serviceName);
|
||||
}
|
||||
|
||||
// Platform service log streaming WebSocket
|
||||
if (path.startsWith('/api/platform-services/') && path.endsWith('/logs/stream') && req.headers.get('upgrade') === 'websocket') {
|
||||
const platformType = path.split('/')[3];
|
||||
return this.handlePlatformLogStreamUpgrade(req, platformType);
|
||||
}
|
||||
|
||||
// Network access logs WebSocket
|
||||
if (path === '/api/network/logs/stream' && req.headers.get('upgrade') === 'websocket') {
|
||||
return this.handleNetworkLogStreamUpgrade(req, new URL(req.url));
|
||||
@@ -1060,6 +1066,123 @@ export class OneboxHttpServer {
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle WebSocket upgrade for platform service log streaming
|
||||
*/
|
||||
private handlePlatformLogStreamUpgrade(req: Request, platformType: string): Response {
|
||||
const { socket, response } = Deno.upgradeWebSocket(req);
|
||||
|
||||
socket.onopen = async () => {
|
||||
logger.info(`Platform log stream WebSocket connected for: ${platformType}`);
|
||||
|
||||
try {
|
||||
// Get the platform service from database
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(platformType as any);
|
||||
if (!platformService) {
|
||||
socket.send(JSON.stringify({ error: 'Platform service not found' }));
|
||||
socket.close();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!platformService.containerId) {
|
||||
socket.send(JSON.stringify({ error: 'Platform service has no container' }));
|
||||
socket.close();
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the container
|
||||
logger.info(`Looking up container for platform service ${platformType}, containerID: ${platformService.containerId}`);
|
||||
const container = await this.oneboxRef.docker.getContainerById(platformService.containerId);
|
||||
|
||||
if (!container) {
|
||||
logger.error(`Container not found for platform service ${platformType}, containerID: ${platformService.containerId}`);
|
||||
socket.send(JSON.stringify({ error: 'Container not found' }));
|
||||
socket.close();
|
||||
return;
|
||||
}
|
||||
|
||||
// Start streaming logs
|
||||
const logStream = await container.streamLogs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
timestamps: true,
|
||||
tail: 100, // Start with last 100 lines
|
||||
});
|
||||
|
||||
// Send initial connection message
|
||||
socket.send(JSON.stringify({
|
||||
type: 'connected',
|
||||
serviceName: platformType,
|
||||
}));
|
||||
|
||||
// Demultiplex and pipe log data to WebSocket
|
||||
// Docker streams use 8-byte headers: [STREAM_TYPE, 0, 0, 0, SIZE_BYTE1, SIZE_BYTE2, SIZE_BYTE3, SIZE_BYTE4]
|
||||
let buffer = new Uint8Array(0);
|
||||
|
||||
logStream.on('data', (chunk: Uint8Array) => {
|
||||
if (socket.readyState !== WebSocket.OPEN) return;
|
||||
|
||||
// Append new data to buffer
|
||||
const newBuffer = new Uint8Array(buffer.length + chunk.length);
|
||||
newBuffer.set(buffer);
|
||||
newBuffer.set(chunk, buffer.length);
|
||||
buffer = newBuffer;
|
||||
|
||||
// Process complete frames
|
||||
while (buffer.length >= 8) {
|
||||
// Read frame size from header (bytes 4-7, big-endian)
|
||||
const frameSize = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
|
||||
|
||||
// Check if we have the complete frame
|
||||
if (buffer.length < 8 + frameSize) {
|
||||
break; // Wait for more data
|
||||
}
|
||||
|
||||
// Extract the frame data (skip 8-byte header)
|
||||
const frameData = buffer.slice(8, 8 + frameSize);
|
||||
|
||||
// Send the clean log line
|
||||
socket.send(new TextDecoder().decode(frameData));
|
||||
|
||||
// Remove processed frame from buffer
|
||||
buffer = buffer.slice(8 + frameSize);
|
||||
}
|
||||
});
|
||||
|
||||
logStream.on('error', (error: Error) => {
|
||||
logger.error(`Platform log stream error for ${platformType}: ${getErrorMessage(error)}`);
|
||||
if (socket.readyState === WebSocket.OPEN) {
|
||||
socket.send(JSON.stringify({ error: getErrorMessage(error) }));
|
||||
}
|
||||
});
|
||||
|
||||
logStream.on('end', () => {
|
||||
logger.info(`Platform log stream ended for ${platformType}`);
|
||||
socket.close();
|
||||
});
|
||||
|
||||
// Clean up on close
|
||||
socket.onclose = () => {
|
||||
logger.info(`Platform log stream WebSocket closed for ${platformType}`);
|
||||
logStream.destroy();
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logger.error(`Failed to start platform log stream for ${platformType}: ${getErrorMessage(error)}`);
|
||||
if (socket.readyState === WebSocket.OPEN) {
|
||||
socket.send(JSON.stringify({ error: getErrorMessage(error) }));
|
||||
socket.close();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
socket.onerror = (error) => {
|
||||
logger.error(`Platform log stream WebSocket error: ${error}`);
|
||||
};
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle WebSocket upgrade for network access log streaming
|
||||
*/
|
||||
|
||||
@@ -93,6 +93,8 @@ export class PlatformServicesManager {
|
||||
}
|
||||
|
||||
// Check if already running
|
||||
let needsDeploy = platformService.status !== 'running';
|
||||
|
||||
if (platformService.status === 'running') {
|
||||
// Verify it's actually healthy
|
||||
const isHealthy = await provider.healthCheck();
|
||||
@@ -100,11 +102,14 @@ export class PlatformServicesManager {
|
||||
logger.debug(`${provider.displayName} is already running and healthy`);
|
||||
return platformService;
|
||||
}
|
||||
logger.warn(`${provider.displayName} reports running but health check failed, restarting...`);
|
||||
logger.warn(`${provider.displayName} reports running but health check failed, will redeploy...`);
|
||||
// Mark status as needing redeploy - container may have been recreated with different credentials
|
||||
this.oneboxRef.database.updatePlatformService(platformService.id!, { status: 'stopped' });
|
||||
needsDeploy = true;
|
||||
}
|
||||
|
||||
// Deploy if not running
|
||||
if (platformService.status !== 'running') {
|
||||
// Deploy if needed
|
||||
if (needsDeploy) {
|
||||
logger.info(`Starting ${provider.displayName} platform service...`);
|
||||
|
||||
try {
|
||||
@@ -143,19 +148,28 @@ export class PlatformServicesManager {
|
||||
*/
|
||||
private async waitForHealthy(type: TPlatformServiceType, timeoutMs: number): Promise<boolean> {
|
||||
const provider = this.providers.get(type);
|
||||
if (!provider) return false;
|
||||
if (!provider) {
|
||||
logger.warn(`waitForHealthy: no provider for type ${type}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.info(`waitForHealthy: starting health check loop for ${type} (timeout: ${timeoutMs}ms)`);
|
||||
const startTime = Date.now();
|
||||
const checkInterval = 2000; // Check every 2 seconds
|
||||
let checkCount = 0;
|
||||
|
||||
while (Date.now() - startTime < timeoutMs) {
|
||||
checkCount++;
|
||||
logger.info(`waitForHealthy: health check attempt #${checkCount} for ${type}`);
|
||||
const isHealthy = await provider.healthCheck();
|
||||
if (isHealthy) {
|
||||
logger.info(`waitForHealthy: ${type} became healthy after ${checkCount} attempts`);
|
||||
return true;
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, checkInterval));
|
||||
}
|
||||
|
||||
logger.warn(`waitForHealthy: ${type} did not become healthy after ${checkCount} attempts (${timeoutMs}ms)`);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -118,8 +118,19 @@ export class MinioProvider extends BasePlatformServiceProvider {
|
||||
|
||||
async healthCheck(): Promise<boolean> {
|
||||
try {
|
||||
const containerName = this.getContainerName();
|
||||
const endpoint = `http://${containerName}:9000/minio/health/live`;
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
if (!platformService || !platformService.containerId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get container IP for health check (hostname won't resolve from host)
|
||||
const containerIP = await this.oneboxRef.docker.getContainerIP(platformService.containerId);
|
||||
if (!containerIP) {
|
||||
logger.debug('MinIO health check: could not get container IP');
|
||||
return false;
|
||||
}
|
||||
|
||||
const endpoint = `http://${containerIP}:9000/minio/health/live`;
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: 'GET',
|
||||
|
||||
@@ -52,21 +52,52 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
|
||||
async deployContainer(): Promise<string> {
|
||||
const config = this.getDefaultConfig();
|
||||
const containerName = this.getContainerName();
|
||||
|
||||
// Generate admin password
|
||||
const adminPassword = credentialEncryption.generatePassword(32);
|
||||
|
||||
// Store admin credentials encrypted in the platform service record
|
||||
const adminCredentials = {
|
||||
username: 'admin',
|
||||
password: adminPassword,
|
||||
};
|
||||
const dataDir = '/var/lib/onebox/mongodb';
|
||||
|
||||
logger.info(`Deploying MongoDB platform service as ${containerName}...`);
|
||||
|
||||
// Check if we have existing data and stored credentials
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
let adminCredentials: { username: string; password: string };
|
||||
let dataExists = false;
|
||||
|
||||
// Check if data directory has existing MongoDB data
|
||||
try {
|
||||
const stat = await Deno.stat(`${dataDir}/WiredTiger`);
|
||||
dataExists = stat.isFile;
|
||||
logger.info(`MongoDB data directory exists with WiredTiger file`);
|
||||
} catch {
|
||||
// WiredTiger file doesn't exist, this is a fresh install
|
||||
dataExists = false;
|
||||
}
|
||||
|
||||
if (dataExists && platformService?.adminCredentialsEncrypted) {
|
||||
// Reuse existing credentials from database
|
||||
logger.info('Reusing existing MongoDB credentials (data directory already initialized)');
|
||||
adminCredentials = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
|
||||
} else {
|
||||
// Generate new credentials for fresh deployment
|
||||
logger.info('Generating new MongoDB admin credentials');
|
||||
adminCredentials = {
|
||||
username: 'admin',
|
||||
password: credentialEncryption.generatePassword(32),
|
||||
};
|
||||
|
||||
// If data exists but we don't have credentials, we need to wipe the data
|
||||
if (dataExists) {
|
||||
logger.warn('MongoDB data exists but no credentials in database - wiping data directory');
|
||||
try {
|
||||
await Deno.remove(dataDir, { recursive: true });
|
||||
} catch (e) {
|
||||
logger.error(`Failed to wipe MongoDB data directory: ${getErrorMessage(e)}`);
|
||||
throw new Error('Cannot deploy MongoDB: data directory exists without credentials');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure data directory exists
|
||||
try {
|
||||
await Deno.mkdir('/var/lib/onebox/mongodb', { recursive: true });
|
||||
await Deno.mkdir(dataDir, { recursive: true });
|
||||
} catch (e) {
|
||||
// Directory might already exist
|
||||
if (!(e instanceof Deno.errors.AlreadyExists)) {
|
||||
@@ -90,9 +121,8 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
|
||||
network: this.getNetworkName(),
|
||||
});
|
||||
|
||||
// Store encrypted admin credentials
|
||||
// Store encrypted admin credentials (only update if new or changed)
|
||||
const encryptedCreds = await credentialEncryption.encrypt(adminCredentials);
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
if (platformService) {
|
||||
this.oneboxRef.database.updatePlatformService(platformService.id!, {
|
||||
containerId,
|
||||
@@ -113,43 +143,59 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
|
||||
|
||||
async healthCheck(): Promise<boolean> {
|
||||
try {
|
||||
logger.info('MongoDB health check: starting...');
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
if (!platformService || !platformService.adminCredentialsEncrypted) {
|
||||
if (!platformService) {
|
||||
logger.info('MongoDB health check: platform service not found in database');
|
||||
return false;
|
||||
}
|
||||
if (!platformService.adminCredentialsEncrypted) {
|
||||
logger.info('MongoDB health check: no admin credentials stored');
|
||||
return false;
|
||||
}
|
||||
if (!platformService.containerId) {
|
||||
logger.info('MongoDB health check: no container ID in database record');
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.info(`MongoDB health check: using container ID ${platformService.containerId.substring(0, 12)}...`);
|
||||
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
|
||||
const containerName = this.getContainerName();
|
||||
|
||||
// Try to connect to MongoDB using mongosh ping
|
||||
const { MongoClient } = await import('npm:mongodb@6');
|
||||
const uri = `mongodb://${adminCreds.username}:${adminCreds.password}@${containerName}:27017/?authSource=admin`;
|
||||
// Use docker exec to run health check inside the container
|
||||
// This avoids network issues with overlay networks
|
||||
const result = await this.oneboxRef.docker.execInContainer(
|
||||
platformService.containerId,
|
||||
['mongosh', '--eval', 'db.adminCommand("ping")', '--username', adminCreds.username, '--password', adminCreds.password, '--authenticationDatabase', 'admin', '--quiet']
|
||||
);
|
||||
|
||||
const client = new MongoClient(uri, {
|
||||
serverSelectionTimeoutMS: 5000,
|
||||
connectTimeoutMS: 5000,
|
||||
});
|
||||
|
||||
await client.connect();
|
||||
await client.db('admin').command({ ping: 1 });
|
||||
await client.close();
|
||||
|
||||
return true;
|
||||
if (result.exitCode === 0) {
|
||||
logger.info('MongoDB health check: success');
|
||||
return true;
|
||||
} else {
|
||||
logger.info(`MongoDB health check failed: exit code ${result.exitCode}, stderr: ${result.stderr.substring(0, 200)}`);
|
||||
return false;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.debug(`MongoDB health check failed: ${getErrorMessage(error)}`);
|
||||
logger.info(`MongoDB health check exception: ${getErrorMessage(error)}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async provisionResource(userService: IService): Promise<IProvisionedResource> {
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
if (!platformService || !platformService.adminCredentialsEncrypted) {
|
||||
if (!platformService || !platformService.adminCredentialsEncrypted || !platformService.containerId) {
|
||||
throw new Error('MongoDB platform service not found or not configured');
|
||||
}
|
||||
|
||||
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
|
||||
const containerName = this.getContainerName();
|
||||
|
||||
// Get container host port for connection from host (overlay network IPs not accessible from host)
|
||||
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 27017);
|
||||
if (!hostPort) {
|
||||
throw new Error('Could not get MongoDB container host port');
|
||||
}
|
||||
|
||||
// Generate resource names and credentials
|
||||
const dbName = this.generateResourceName(userService.name);
|
||||
const username = this.generateResourceName(userService.name);
|
||||
@@ -157,9 +203,9 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
|
||||
|
||||
logger.info(`Provisioning MongoDB database '${dbName}' for service '${userService.name}'...`);
|
||||
|
||||
// Connect to MongoDB and create database/user
|
||||
// Connect to MongoDB via localhost and the mapped host port
|
||||
const { MongoClient } = await import('npm:mongodb@6');
|
||||
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@${containerName}:27017/?authSource=admin`;
|
||||
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@127.0.0.1:${hostPort}/?authSource=admin`;
|
||||
|
||||
const client = new MongoClient(adminUri);
|
||||
await client.connect();
|
||||
@@ -211,17 +257,22 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
|
||||
|
||||
async deprovisionResource(resource: IPlatformResource, credentials: Record<string, string>): Promise<void> {
|
||||
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
|
||||
if (!platformService || !platformService.adminCredentialsEncrypted) {
|
||||
if (!platformService || !platformService.adminCredentialsEncrypted || !platformService.containerId) {
|
||||
throw new Error('MongoDB platform service not found or not configured');
|
||||
}
|
||||
|
||||
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
|
||||
const containerName = this.getContainerName();
|
||||
|
||||
// Get container host port for connection from host (overlay network IPs not accessible from host)
|
||||
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 27017);
|
||||
if (!hostPort) {
|
||||
throw new Error('Could not get MongoDB container host port');
|
||||
}
|
||||
|
||||
logger.info(`Deprovisioning MongoDB database '${resource.resourceName}'...`);
|
||||
|
||||
const { MongoClient } = await import('npm:mongodb@6');
|
||||
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@${containerName}:27017/?authSource=admin`;
|
||||
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@127.0.0.1:${hostPort}/?authSource=admin`;
|
||||
|
||||
const client = new MongoClient(adminUri);
|
||||
await client.connect();
|
||||
|
||||
Reference in New Issue
Block a user