553 lines
20 KiB
TypeScript
553 lines
20 KiB
TypeScript
import * as plugins from './tsdocker.plugins.js';
|
|
import * as paths from './tsdocker.paths.js';
|
|
import { logger, formatDuration } from './tsdocker.logging.js';
|
|
import { Dockerfile } from './classes.dockerfile.js';
|
|
import { DockerRegistry } from './classes.dockerregistry.js';
|
|
import { RegistryStorage } from './classes.registrystorage.js';
|
|
import { TsDockerCache } from './classes.tsdockercache.js';
|
|
import { DockerContext } from './classes.dockercontext.js';
|
|
import { TsDockerSession } from './classes.tsdockersession.js';
|
|
import { RegistryCopy } from './classes.registrycopy.js';
|
|
import type { ITsDockerConfig, IBuildCommandOptions } from './interfaces/index.js';
|
|
|
|
const smartshellInstance = new plugins.smartshell.Smartshell({
|
|
executor: 'bash',
|
|
});
|
|
|
|
/**
|
|
* Main orchestrator class for Docker operations
|
|
*/
|
|
export class TsDockerManager {
|
|
public registryStorage: RegistryStorage;
|
|
public config: ITsDockerConfig;
|
|
public projectInfo: any;
|
|
public dockerContext: DockerContext;
|
|
public session!: TsDockerSession;
|
|
private dockerfiles: Dockerfile[] = [];
|
|
|
|
constructor(config: ITsDockerConfig) {
|
|
this.config = config;
|
|
this.registryStorage = new RegistryStorage();
|
|
this.dockerContext = new DockerContext();
|
|
}
|
|
|
|
/**
|
|
* Prepares the manager by loading project info and registries
|
|
*/
|
|
public async prepare(contextArg?: string): Promise<void> {
|
|
// Detect Docker context
|
|
if (contextArg) {
|
|
this.dockerContext.setContext(contextArg);
|
|
}
|
|
await this.dockerContext.detect();
|
|
this.dockerContext.logContextInfo();
|
|
this.dockerContext.logRootlessWarnings();
|
|
|
|
// Load project info
|
|
try {
|
|
const projectinfoInstance = new plugins.projectinfo.ProjectInfo(paths.cwd);
|
|
this.projectInfo = {
|
|
npm: {
|
|
name: projectinfoInstance.npm.name,
|
|
version: projectinfoInstance.npm.version,
|
|
},
|
|
};
|
|
} catch (err) {
|
|
logger.log('warn', 'Could not load project info');
|
|
this.projectInfo = null;
|
|
}
|
|
|
|
// Load registries from environment
|
|
this.registryStorage.loadFromEnv();
|
|
|
|
// Add registries from config if specified
|
|
if (this.config.registries) {
|
|
for (const registryUrl of this.config.registries) {
|
|
// Check if already loaded from env
|
|
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
|
|
// Try to load credentials for this registry from env
|
|
const envVarName = registryUrl.replace(/\./g, '_').toUpperCase();
|
|
const envString = process.env[`DOCKER_REGISTRY_${envVarName}`];
|
|
if (envString) {
|
|
try {
|
|
const registry = DockerRegistry.fromEnvString(envString);
|
|
this.registryStorage.addRegistry(registry);
|
|
} catch (err) {
|
|
logger.log('warn', `Could not load credentials for registry ${registryUrl}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fallback: check ~/.docker/config.json if env vars didn't provide credentials
|
|
if (!this.registryStorage.getRegistryByUrl(registryUrl)) {
|
|
const dockerConfigCreds = RegistryCopy.getDockerConfigCredentials(registryUrl);
|
|
if (dockerConfigCreds) {
|
|
const registry = new DockerRegistry({
|
|
registryUrl,
|
|
username: dockerConfigCreds.username,
|
|
password: dockerConfigCreds.password,
|
|
});
|
|
this.registryStorage.addRegistry(registry);
|
|
logger.log('info', `Loaded credentials for ${registryUrl} from ~/.docker/config.json`);
|
|
} else {
|
|
logger.log('warn', `No credentials found for ${registryUrl} (checked env vars and ~/.docker/config.json)`);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Create session identity (unique ports, names for CI concurrency)
|
|
this.session = await TsDockerSession.create();
|
|
|
|
logger.log('info', `Prepared TsDockerManager with ${this.registryStorage.getAllRegistries().length} registries`);
|
|
}
|
|
|
|
/**
|
|
* Logs in to all configured registries
|
|
*/
|
|
public async login(): Promise<void> {
|
|
if (this.registryStorage.getAllRegistries().length === 0) {
|
|
logger.log('warn', 'No registries configured');
|
|
return;
|
|
}
|
|
await this.registryStorage.loginAll();
|
|
}
|
|
|
|
/**
|
|
* Discovers and sorts Dockerfiles in the current directory
|
|
*/
|
|
public async discoverDockerfiles(): Promise<Dockerfile[]> {
|
|
this.dockerfiles = await Dockerfile.readDockerfiles(this);
|
|
this.dockerfiles = await Dockerfile.sortDockerfiles(this.dockerfiles);
|
|
this.dockerfiles = await Dockerfile.mapDockerfiles(this.dockerfiles);
|
|
// Inject session into each Dockerfile
|
|
for (const df of this.dockerfiles) {
|
|
df.session = this.session;
|
|
}
|
|
return this.dockerfiles;
|
|
}
|
|
|
|
/**
|
|
* Filters discovered Dockerfiles by name patterns (glob-style).
|
|
* Mutates this.dockerfiles in place.
|
|
*/
|
|
public filterDockerfiles(patterns: string[]): void {
|
|
const matched = this.dockerfiles.filter((df) => {
|
|
const basename = plugins.path.basename(df.filePath);
|
|
return patterns.some((pattern) => {
|
|
if (pattern.includes('*') || pattern.includes('?')) {
|
|
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
|
return new RegExp(regexStr).test(basename);
|
|
}
|
|
return basename === pattern;
|
|
});
|
|
});
|
|
if (matched.length === 0) {
|
|
logger.log('warn', `No Dockerfiles matched patterns: ${patterns.join(', ')}`);
|
|
}
|
|
this.dockerfiles = matched;
|
|
}
|
|
|
|
/**
|
|
* Builds discovered Dockerfiles in dependency order.
|
|
* When options.patterns is provided, only matching Dockerfiles (and their dependencies) are built.
|
|
*/
|
|
public async build(options?: IBuildCommandOptions): Promise<Dockerfile[]> {
|
|
if (this.dockerfiles.length === 0) {
|
|
await this.discoverDockerfiles();
|
|
}
|
|
|
|
if (this.dockerfiles.length === 0) {
|
|
logger.log('warn', 'No Dockerfiles found');
|
|
return [];
|
|
}
|
|
|
|
// Determine which Dockerfiles to build
|
|
let toBuild = this.dockerfiles;
|
|
|
|
if (options?.patterns && options.patterns.length > 0) {
|
|
// Filter to matching Dockerfiles
|
|
const matched = this.dockerfiles.filter((df) => {
|
|
const basename = plugins.path.basename(df.filePath);
|
|
return options.patterns!.some((pattern) => {
|
|
if (pattern.includes('*') || pattern.includes('?')) {
|
|
// Convert glob pattern to regex
|
|
const regexStr = '^' + pattern.replace(/\*/g, '.*').replace(/\?/g, '.') + '$';
|
|
return new RegExp(regexStr).test(basename);
|
|
}
|
|
return basename === pattern;
|
|
});
|
|
});
|
|
|
|
if (matched.length === 0) {
|
|
logger.log('warn', `No Dockerfiles matched patterns: ${options.patterns.join(', ')}`);
|
|
return [];
|
|
}
|
|
|
|
// Resolve dependency chain and preserve topological order
|
|
toBuild = this.resolveWithDependencies(matched, this.dockerfiles);
|
|
logger.log('info', `Matched ${matched.length} Dockerfile(s), building ${toBuild.length} (including dependencies)`);
|
|
}
|
|
|
|
// Check if buildx is needed
|
|
const useBuildx = !!(options?.platform || (this.config.platforms && this.config.platforms.length > 1));
|
|
if (useBuildx) {
|
|
await this.ensureBuildx();
|
|
}
|
|
|
|
logger.log('info', '');
|
|
logger.log('info', '=== BUILD PHASE ===');
|
|
|
|
if (useBuildx) {
|
|
const platforms = options?.platform || this.config.platforms!.join(', ');
|
|
logger.log('info', `Build mode: buildx multi-platform [${platforms}]`);
|
|
} else {
|
|
logger.log('info', 'Build mode: standard docker build');
|
|
}
|
|
|
|
const localDeps = toBuild.filter(df => df.localBaseImageDependent);
|
|
if (localDeps.length > 0) {
|
|
logger.log('info', `Local dependencies: ${localDeps.map(df => `${df.cleanTag} -> ${df.localBaseDockerfile?.cleanTag}`).join(', ')}`);
|
|
}
|
|
|
|
if (options?.noCache) {
|
|
logger.log('info', 'Cache: disabled (--no-cache)');
|
|
}
|
|
|
|
if (options?.parallel) {
|
|
const concurrency = options.parallelConcurrency ?? 4;
|
|
const levels = Dockerfile.computeLevels(toBuild);
|
|
logger.log('info', `Parallel build: ${levels.length} level(s), concurrency ${concurrency}`);
|
|
for (let l = 0; l < levels.length; l++) {
|
|
const level = levels[l];
|
|
logger.log('info', ` Level ${l} (${level.length}): ${level.map(df => df.cleanTag).join(', ')}`);
|
|
}
|
|
}
|
|
|
|
logger.log('info', `Building ${toBuild.length} Dockerfile(s)...`);
|
|
|
|
if (options?.cached) {
|
|
// === CACHED MODE: skip builds for unchanged Dockerfiles ===
|
|
logger.log('info', '(cached mode active)');
|
|
const cache = new TsDockerCache();
|
|
cache.load();
|
|
|
|
const total = toBuild.length;
|
|
const overallStart = Date.now();
|
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
|
|
|
try {
|
|
if (options?.parallel) {
|
|
// === PARALLEL CACHED MODE ===
|
|
const concurrency = options.parallelConcurrency ?? 4;
|
|
const levels = Dockerfile.computeLevels(toBuild);
|
|
|
|
let built = 0;
|
|
for (let l = 0; l < levels.length; l++) {
|
|
const level = levels[l];
|
|
logger.log('info', `--- Level ${l}: building ${level.length} image(s) in parallel ---`);
|
|
|
|
const tasks = level.map((df) => {
|
|
const myIndex = ++built;
|
|
return async () => {
|
|
const progress = `(${myIndex}/${total})`;
|
|
const skip = await cache.shouldSkipBuild(df.cleanTag, df.content);
|
|
|
|
if (skip) {
|
|
logger.log('ok', `${progress} Skipped ${df.cleanTag} (cached)`);
|
|
} else {
|
|
logger.log('info', `${progress} Building ${df.cleanTag}...`);
|
|
const elapsed = await df.build({
|
|
platform: options?.platform,
|
|
timeout: options?.timeout,
|
|
noCache: options?.noCache,
|
|
verbose: options?.verbose,
|
|
});
|
|
logger.log('ok', `${progress} Built ${df.cleanTag} in ${formatDuration(elapsed)}`);
|
|
const imageId = await df.getId();
|
|
cache.recordBuild(df.cleanTag, df.content, imageId, df.buildTag);
|
|
}
|
|
return df;
|
|
};
|
|
});
|
|
|
|
await Dockerfile.runWithConcurrency(tasks, concurrency);
|
|
|
|
// After the entire level completes, push all to local registry + tag for deps
|
|
for (const df of level) {
|
|
const dependentBaseImages = new Set<string>();
|
|
for (const other of toBuild) {
|
|
if (other.localBaseDockerfile === df && other.baseImage !== df.buildTag) {
|
|
dependentBaseImages.add(other.baseImage);
|
|
}
|
|
}
|
|
for (const fullTag of dependentBaseImages) {
|
|
logger.log('info', `Tagging ${df.buildTag} as ${fullTag} for local dependency resolution`);
|
|
await smartshellInstance.exec(`docker tag ${df.buildTag} ${fullTag}`);
|
|
}
|
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
|
if (!df.localRegistryTag) {
|
|
await Dockerfile.pushToLocalRegistry(this.session, df);
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
// === SEQUENTIAL CACHED MODE ===
|
|
for (let i = 0; i < total; i++) {
|
|
const dockerfileArg = toBuild[i];
|
|
const progress = `(${i + 1}/${total})`;
|
|
const skip = await cache.shouldSkipBuild(dockerfileArg.cleanTag, dockerfileArg.content);
|
|
|
|
if (skip) {
|
|
logger.log('ok', `${progress} Skipped ${dockerfileArg.cleanTag} (cached)`);
|
|
} else {
|
|
logger.log('info', `${progress} Building ${dockerfileArg.cleanTag}...`);
|
|
const elapsed = await dockerfileArg.build({
|
|
platform: options?.platform,
|
|
timeout: options?.timeout,
|
|
noCache: options?.noCache,
|
|
verbose: options?.verbose,
|
|
});
|
|
logger.log('ok', `${progress} Built ${dockerfileArg.cleanTag} in ${formatDuration(elapsed)}`);
|
|
const imageId = await dockerfileArg.getId();
|
|
cache.recordBuild(dockerfileArg.cleanTag, dockerfileArg.content, imageId, dockerfileArg.buildTag);
|
|
}
|
|
|
|
// Tag for dependents IMMEDIATELY (not after all builds)
|
|
const dependentBaseImages = new Set<string>();
|
|
for (const other of toBuild) {
|
|
if (other.localBaseDockerfile === dockerfileArg && other.baseImage !== dockerfileArg.buildTag) {
|
|
dependentBaseImages.add(other.baseImage);
|
|
}
|
|
}
|
|
for (const fullTag of dependentBaseImages) {
|
|
logger.log('info', `Tagging ${dockerfileArg.buildTag} as ${fullTag} for local dependency resolution`);
|
|
await smartshellInstance.exec(`docker tag ${dockerfileArg.buildTag} ${fullTag}`);
|
|
}
|
|
|
|
// Push ALL images to local registry (skip if already pushed via buildx)
|
|
if (!dockerfileArg.localRegistryTag) {
|
|
await Dockerfile.pushToLocalRegistry(this.session, dockerfileArg);
|
|
}
|
|
}
|
|
}
|
|
} finally {
|
|
await Dockerfile.stopLocalRegistry(this.session);
|
|
}
|
|
|
|
logger.log('info', `Total build time: ${formatDuration(Date.now() - overallStart)}`);
|
|
cache.save();
|
|
} else {
|
|
// === STANDARD MODE: build all via static helper ===
|
|
await Dockerfile.buildDockerfiles(toBuild, this.session, {
|
|
platform: options?.platform,
|
|
timeout: options?.timeout,
|
|
noCache: options?.noCache,
|
|
verbose: options?.verbose,
|
|
isRootless: this.dockerContext.contextInfo?.isRootless,
|
|
parallel: options?.parallel,
|
|
parallelConcurrency: options?.parallelConcurrency,
|
|
});
|
|
}
|
|
|
|
logger.log('success', 'All Dockerfiles built successfully');
|
|
|
|
return toBuild;
|
|
}
|
|
|
|
/**
|
|
* Resolves a set of target Dockerfiles to include all their local base image dependencies,
|
|
* preserving the original topological build order.
|
|
*/
|
|
private resolveWithDependencies(targets: Dockerfile[], allSorted: Dockerfile[]): Dockerfile[] {
|
|
const needed = new Set<Dockerfile>();
|
|
const addWithDeps = (df: Dockerfile) => {
|
|
if (needed.has(df)) return;
|
|
needed.add(df);
|
|
if (df.localBaseImageDependent && df.localBaseDockerfile) {
|
|
addWithDeps(df.localBaseDockerfile);
|
|
}
|
|
};
|
|
for (const df of targets) addWithDeps(df);
|
|
return allSorted.filter((df) => needed.has(df));
|
|
}
|
|
|
|
/**
|
|
* Ensures Docker buildx is set up for multi-architecture builds
|
|
*/
|
|
private async ensureBuildx(): Promise<void> {
|
|
const builderName = this.dockerContext.getBuilderName() + (this.session?.config.builderSuffix || '');
|
|
const platforms = this.config.platforms?.join(', ') || 'default';
|
|
logger.log('info', `Setting up Docker buildx [${platforms}]...`);
|
|
logger.log('info', `Builder: ${builderName}`);
|
|
const inspectResult = await smartshellInstance.exec(`docker buildx inspect ${builderName} 2>/dev/null`);
|
|
|
|
if (inspectResult.exitCode !== 0) {
|
|
logger.log('info', 'Creating new buildx builder with host network...');
|
|
await smartshellInstance.exec(
|
|
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
|
);
|
|
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
|
} else {
|
|
const inspectOutput = inspectResult.stdout || '';
|
|
if (!inspectOutput.includes('network=host')) {
|
|
logger.log('info', 'Recreating buildx builder with host network (migration)...');
|
|
await smartshellInstance.exec(`docker buildx rm ${builderName} 2>/dev/null`);
|
|
await smartshellInstance.exec(
|
|
`docker buildx create --name ${builderName} --driver docker-container --driver-opt network=host --use`
|
|
);
|
|
await smartshellInstance.exec('docker buildx inspect --bootstrap');
|
|
} else {
|
|
await smartshellInstance.exec(`docker buildx use ${builderName}`);
|
|
}
|
|
}
|
|
logger.log('ok', `Docker buildx ready (builder: ${builderName}, platforms: ${platforms})`);
|
|
}
|
|
|
|
/**
|
|
* Pushes all built images to specified registries
|
|
*/
|
|
public async push(registryUrls?: string[]): Promise<void> {
|
|
if (this.dockerfiles.length === 0) {
|
|
await this.discoverDockerfiles();
|
|
}
|
|
|
|
if (this.dockerfiles.length === 0) {
|
|
logger.log('warn', 'No Dockerfiles found to push');
|
|
return;
|
|
}
|
|
|
|
// Determine which registries to push to
|
|
let registriesToPush: DockerRegistry[] = [];
|
|
|
|
if (registryUrls && registryUrls.length > 0) {
|
|
// Push to specified registries
|
|
for (const url of registryUrls) {
|
|
const registry = this.registryStorage.getRegistryByUrl(url);
|
|
if (registry) {
|
|
registriesToPush.push(registry);
|
|
} else {
|
|
logger.log('warn', `Registry ${url} not found in storage`);
|
|
}
|
|
}
|
|
} else {
|
|
// Push to all configured registries
|
|
registriesToPush = this.registryStorage.getAllRegistries();
|
|
}
|
|
|
|
if (registriesToPush.length === 0) {
|
|
logger.log('warn', 'No registries available to push to');
|
|
return;
|
|
}
|
|
|
|
// Start local registry (reads from persistent .nogit/docker-registry/)
|
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
|
try {
|
|
// Push each Dockerfile to each registry via OCI copy
|
|
for (const dockerfile of this.dockerfiles) {
|
|
for (const registry of registriesToPush) {
|
|
await dockerfile.push(registry);
|
|
}
|
|
}
|
|
} finally {
|
|
await Dockerfile.stopLocalRegistry(this.session);
|
|
}
|
|
|
|
logger.log('success', 'All images pushed successfully');
|
|
}
|
|
|
|
/**
|
|
* Pulls images from a specified registry
|
|
*/
|
|
public async pull(registryUrl: string): Promise<void> {
|
|
if (this.dockerfiles.length === 0) {
|
|
await this.discoverDockerfiles();
|
|
}
|
|
|
|
const registry = this.registryStorage.getRegistryByUrl(registryUrl);
|
|
if (!registry) {
|
|
throw new Error(`Registry ${registryUrl} not found`);
|
|
}
|
|
|
|
for (const dockerfile of this.dockerfiles) {
|
|
await dockerfile.pull(registry);
|
|
}
|
|
|
|
logger.log('success', 'All images pulled successfully');
|
|
}
|
|
|
|
/**
|
|
* Runs tests for all Dockerfiles.
|
|
* Starts the local registry so multi-platform images can be auto-pulled.
|
|
*/
|
|
public async test(): Promise<void> {
|
|
if (this.dockerfiles.length === 0) {
|
|
await this.discoverDockerfiles();
|
|
}
|
|
|
|
if (this.dockerfiles.length === 0) {
|
|
logger.log('warn', 'No Dockerfiles found to test');
|
|
return;
|
|
}
|
|
|
|
logger.log('info', '');
|
|
logger.log('info', '=== TEST PHASE ===');
|
|
|
|
await Dockerfile.startLocalRegistry(this.session, this.dockerContext.contextInfo?.isRootless);
|
|
try {
|
|
await Dockerfile.testDockerfiles(this.dockerfiles);
|
|
} finally {
|
|
await Dockerfile.stopLocalRegistry(this.session);
|
|
}
|
|
|
|
logger.log('success', 'All tests completed');
|
|
}
|
|
|
|
/**
|
|
* Lists all discovered Dockerfiles and their info
|
|
*/
|
|
public async list(): Promise<Dockerfile[]> {
|
|
if (this.dockerfiles.length === 0) {
|
|
await this.discoverDockerfiles();
|
|
}
|
|
|
|
logger.log('info', '');
|
|
logger.log('info', 'Discovered Dockerfiles:');
|
|
logger.log('info', '========================');
|
|
logger.log('info', '');
|
|
|
|
for (let i = 0; i < this.dockerfiles.length; i++) {
|
|
const df = this.dockerfiles[i];
|
|
logger.log('info', `${i + 1}. ${df.filePath}`);
|
|
logger.log('info', ` Tag: ${df.cleanTag}`);
|
|
logger.log('info', ` Base Image: ${df.baseImage}`);
|
|
logger.log('info', ` Version: ${df.version}`);
|
|
if (df.localBaseImageDependent) {
|
|
logger.log('info', ` Depends on: ${df.localBaseDockerfile?.cleanTag}`);
|
|
}
|
|
logger.log('info', '');
|
|
}
|
|
|
|
return this.dockerfiles;
|
|
}
|
|
|
|
/**
|
|
* Gets the cached Dockerfiles (after discovery)
|
|
*/
|
|
public getDockerfiles(): Dockerfile[] {
|
|
return this.dockerfiles;
|
|
}
|
|
|
|
/**
|
|
* Cleans up session-specific resources.
|
|
* In CI, removes the session-specific buildx builder to avoid accumulation.
|
|
*/
|
|
public async cleanup(): Promise<void> {
|
|
if (this.session?.config.isCI && this.session.config.builderSuffix) {
|
|
const builderName = this.dockerContext.getBuilderName() + this.session.config.builderSuffix;
|
|
logger.log('info', `CI cleanup: removing buildx builder ${builderName}`);
|
|
await smartshellInstance.execSilent(`docker buildx rm ${builderName} 2>/dev/null || true`);
|
|
}
|
|
}
|
|
}
|