feat: add registry deploy-on-push scenario
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
node_modules/
|
||||
.nogit/
|
||||
.vagrant/
|
||||
dist/
|
||||
*.log
|
||||
Vendored
+47
@@ -0,0 +1,47 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
servezone_root = ENV.fetch('SERVEZONE_ROOT', File.expand_path('..', __dir__))
|
||||
|
||||
Vagrant.configure('2') do |config|
|
||||
config.vm.box = ENV.fetch('SERVEZONE_VAGRANT_BOX', 'bento/ubuntu-24.04')
|
||||
config.vm.hostname = 'servezone-testing'
|
||||
|
||||
config.vm.synced_folder '.', '/vagrant', disabled: true
|
||||
|
||||
config.vm.synced_folder servezone_root, '/serve.zone',
|
||||
type: 'rsync',
|
||||
owner: 'vagrant',
|
||||
group: 'vagrant',
|
||||
rsync__auto: false,
|
||||
rsync__exclude: [
|
||||
'.git/',
|
||||
'.nogit/',
|
||||
'.vagrant/',
|
||||
'node_modules/',
|
||||
'containerarchive/',
|
||||
'remoteingress/',
|
||||
'siprouter/',
|
||||
'**/.git/',
|
||||
'**/.nogit/',
|
||||
'**/.vagrant/',
|
||||
'**/node_modules/',
|
||||
'**/.cache/',
|
||||
'**/.pnpm-store/',
|
||||
'**/dist/',
|
||||
'**/dist_*/',
|
||||
'**/target/',
|
||||
]
|
||||
|
||||
config.vm.provider 'virtualbox' do |vb|
|
||||
vb.name = 'servezone-testing'
|
||||
vb.cpus = ENV.fetch('SERVEZONE_VAGRANT_CPUS', '4').to_i
|
||||
vb.memory = ENV.fetch('SERVEZONE_VAGRANT_MEMORY', '8192').to_i
|
||||
end
|
||||
|
||||
config.vm.provider 'libvirt' do |lv|
|
||||
lv.cpus = ENV.fetch('SERVEZONE_VAGRANT_CPUS', '4').to_i
|
||||
lv.memory = ENV.fetch('SERVEZONE_VAGRANT_MEMORY', '8192').to_i
|
||||
end
|
||||
|
||||
config.vm.provision 'shell', path: 'scripts/provision-vm.sh'
|
||||
end
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"name": "@serve.zone/testing",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "Whole-system integration scenarios for serve.zone components.",
|
||||
"scripts": {
|
||||
"bootstrap:components": "pnpm --dir ../interfaces install && pnpm --dir ../api install && pnpm --dir ../cloudly install && pnpm --dir ../coreflow install && pnpm --dir ../coretraffic install && pnpm install",
|
||||
"test": "pnpm scenario:registry-deploy-on-push",
|
||||
"scenario:registry-deploy-on-push": "tsx --tsconfig ../cloudly/tsconfig.json scenarios/registry-deploy-on-push/scenario.ts",
|
||||
"vagrant:up": "vagrant up",
|
||||
"vagrant:test": "vagrant ssh -c 'cd /serve.zone/testing && pnpm bootstrap:components && pnpm scenario:registry-deploy-on-push'",
|
||||
"vagrant:destroy": "vagrant destroy -f"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tstest": "2.3.8",
|
||||
"@push.rocks/smartnetwork": "4.4.0",
|
||||
"@types/node": "^22.0.0",
|
||||
"tsx": "^4.20.6",
|
||||
"typescript": "^5.9.3"
|
||||
},
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"@push.rocks/smarts3": "2.2.6",
|
||||
"is-generator-function": "1.1.0"
|
||||
}
|
||||
},
|
||||
"packageManager": "pnpm@10.7.0+sha512.6b865ad4b62a1d9842b61d674a393903b871d9244954f652b8842c2b553c72176b278f64c463e52d40fff8aba385c235c8c9ecf5cc7de4fd78b8bb6d49633ab6"
|
||||
}
|
||||
Generated
+9214
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,40 @@
|
||||
# serve.zone Testing
|
||||
|
||||
Whole-system integration scenarios for serve.zone components.
|
||||
|
||||
Fast package tests stay in each component repo. This repo is for stateful cross-component tests that need Docker, Swarm, Cloudly, Coreflow, registry behavior, and later Coretraffic routing.
|
||||
|
||||
## Scenarios
|
||||
|
||||
- `registry-deploy-on-push`: starts Cloudly with isolated Mongo/S3 helpers, connects Coreflow as a cluster, pushes a Docker image to Cloudly's built-in registry, verifies Cloudly metadata updates, verifies Coreflow creates the workload service, verifies Coretraffic HTTPS routing, then pushes the same tag again and verifies service recreation and routing through the new digest.
|
||||
|
||||
## Host Run
|
||||
|
||||
Requires Docker with Swarm already active.
|
||||
|
||||
```bash
|
||||
pnpm bootstrap:components
|
||||
pnpm scenario:registry-deploy-on-push
|
||||
```
|
||||
|
||||
## Vagrant Run
|
||||
|
||||
Requires Vagrant plus a provider on the host. The Vagrant VM installs Docker, Node.js, pnpm, initializes Docker Swarm, and mounts the full `serve.zone` directory at `/serve.zone`.
|
||||
|
||||
```bash
|
||||
pnpm vagrant:up
|
||||
pnpm vagrant:test
|
||||
pnpm vagrant:destroy
|
||||
```
|
||||
|
||||
Provider options:
|
||||
|
||||
- VirtualBox: install `vagrant` and `virtualbox` on the host.
|
||||
- libvirt: install `vagrant`, `libvirt`, and the `vagrant-libvirt` plugin on the host.
|
||||
|
||||
Useful environment variables:
|
||||
|
||||
- `SERVEZONE_ROOT`: host path mounted as `/serve.zone`.
|
||||
- `SERVEZONE_VAGRANT_BOX`: defaults to `bento/ubuntu-24.04`.
|
||||
- `SERVEZONE_VAGRANT_CPUS`: defaults to `4`.
|
||||
- `SERVEZONE_VAGRANT_MEMORY`: defaults to `8192`.
|
||||
@@ -0,0 +1,13 @@
|
||||
# registry-deploy-on-push
|
||||
|
||||
This scenario verifies the Cloudly registry to Coreflow deploy-on-push path.
|
||||
|
||||
Assertions:
|
||||
|
||||
- Cloudly starts with isolated MongoDB and S3 helpers.
|
||||
- Coreflow authenticates with the cluster machine token.
|
||||
- Docker login and push to Cloudly's OCI registry work.
|
||||
- Cloudly records the pushed image digest and pushes config to connected Coreflow.
|
||||
- Coreflow creates the workload service from the registry-backed image.
|
||||
- Coretraffic receives the route config and serves HTTPS traffic to the workload.
|
||||
- Re-pushing `latest` with a new digest recreates the Docker service.
|
||||
@@ -0,0 +1,694 @@
|
||||
import { execFile } from 'node:child_process';
|
||||
import { mkdirSync, readFileSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import { dirname, join, resolve } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { promisify } from 'node:util';
|
||||
|
||||
import { tapNodeTools } from '@git.zone/tstest/tapbundle_node';
|
||||
import { SmartNetwork } from '@push.rocks/smartnetwork';
|
||||
|
||||
import * as cloudlyApiClient from '../../../api/ts/index.js';
|
||||
import { Cloudly, type ICloudlyConfig } from '../../../cloudly/ts/index.js';
|
||||
import { Coreflow } from '../../../coreflow/ts/coreflow.classes.coreflow.js';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
const scenarioDir = dirname(fileURLToPath(import.meta.url));
|
||||
const testingDir = resolve(scenarioDir, '../..');
|
||||
const repoRoot = resolve(testingDir, '..');
|
||||
const smokeId = `szn-e2e-smoke-${Date.now().toString(36)}`;
|
||||
const buildDir = join(testingDir, '.nogit', 'registry-deploy-on-push', smokeId);
|
||||
const coretrafficServiceName = `${smokeId}-coretraffic`;
|
||||
const coreflowProxyServiceName = 'coreflow';
|
||||
const stopFunctions: Array<() => Promise<void>> = [];
|
||||
|
||||
const delayFor = async (millisecondsArg: number) => {
|
||||
await new Promise((resolveArg) => setTimeout(resolveArg, millisecondsArg));
|
||||
};
|
||||
|
||||
const run = async (commandArg: string, argsArg: string[]) => {
|
||||
const { stdout, stderr } = await execFileAsync(commandArg, argsArg, {
|
||||
maxBuffer: 1024 * 1024 * 20,
|
||||
});
|
||||
if (stdout.trim()) {
|
||||
console.log(stdout.trim());
|
||||
}
|
||||
if (stderr.trim()) {
|
||||
console.log(stderr.trim());
|
||||
}
|
||||
};
|
||||
|
||||
const waitFor = async (checkFunctionArg: () => boolean | Promise<boolean>, messageArg: string) => {
|
||||
const startTime = Date.now();
|
||||
while (Date.now() - startTime < 90000) {
|
||||
if (await checkFunctionArg()) {
|
||||
return;
|
||||
}
|
||||
await delayFor(500);
|
||||
}
|
||||
throw new Error(`Timed out waiting for ${messageArg}`);
|
||||
};
|
||||
|
||||
const getDockerSafeName = (valueArg: string, maxLengthArg = 64) => {
|
||||
const safeName = valueArg
|
||||
.replace(/[^a-zA-Z0-9_.-]+/g, '-')
|
||||
.replace(/^[^a-zA-Z0-9]+|[^a-zA-Z0-9]+$/g, '')
|
||||
.slice(0, maxLengthArg)
|
||||
.replace(/[^a-zA-Z0-9]+$/g, '');
|
||||
return safeName || 'resource';
|
||||
};
|
||||
|
||||
const getWorkloadSecretName = (serviceArg: { id: string; data: { name: string } }) => {
|
||||
const serviceName = getDockerSafeName(serviceArg.data.name, 36);
|
||||
const serviceId = getDockerSafeName(serviceArg.id, 20);
|
||||
return getDockerSafeName(`${serviceName}-${serviceId}-secret`);
|
||||
};
|
||||
|
||||
const dockerServiceExists = async (serviceNameArg: string) => {
|
||||
try {
|
||||
await execFileAsync('docker', ['service', 'inspect', serviceNameArg]);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
const removeDockerService = async (serviceNameArg: string) => {
|
||||
if (await dockerServiceExists(serviceNameArg)) {
|
||||
await execFileAsync('docker', ['service', 'rm', serviceNameArg]).catch(() => null);
|
||||
await delayFor(2000);
|
||||
}
|
||||
};
|
||||
|
||||
const printDockerServiceLogs = async (serviceNameArg: string) => {
|
||||
if (!(await dockerServiceExists(serviceNameArg))) {
|
||||
return;
|
||||
}
|
||||
console.log(`[registry-deploy-on-push] Logs for Docker service ${serviceNameArg}:`);
|
||||
await execFileAsync('docker', ['service', 'logs', '--raw', '--tail', '120', serviceNameArg], {
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
})
|
||||
.then(({ stdout, stderr }) => {
|
||||
if (stdout.trim()) {
|
||||
console.log(stdout.trim());
|
||||
}
|
||||
if (stderr.trim()) {
|
||||
console.log(stderr.trim());
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.log(`[registry-deploy-on-push] Could not read ${serviceNameArg} logs: ${(error as Error).message}`);
|
||||
});
|
||||
};
|
||||
|
||||
const printDockerServicePs = async (serviceNameArg: string) => {
|
||||
if (!(await dockerServiceExists(serviceNameArg))) {
|
||||
return;
|
||||
}
|
||||
console.log(`[registry-deploy-on-push] Tasks for Docker service ${serviceNameArg}:`);
|
||||
await execFileAsync('docker', ['service', 'ps', '--no-trunc', serviceNameArg], {
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
})
|
||||
.then(({ stdout, stderr }) => {
|
||||
if (stdout.trim()) {
|
||||
console.log(stdout.trim());
|
||||
}
|
||||
if (stderr.trim()) {
|
||||
console.log(stderr.trim());
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.log(`[registry-deploy-on-push] Could not read ${serviceNameArg} tasks: ${(error as Error).message}`);
|
||||
});
|
||||
};
|
||||
|
||||
const printDockerNetworkContainers = async (networkNameArg: string) => {
|
||||
console.log(`[registry-deploy-on-push] Containers on Docker network ${networkNameArg}:`);
|
||||
await execFileAsync('docker', ['network', 'inspect', networkNameArg, '--format', '{{json .Containers}}'], {
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
})
|
||||
.then(({ stdout, stderr }) => {
|
||||
if (stdout.trim()) {
|
||||
console.log(stdout.trim());
|
||||
}
|
||||
if (stderr.trim()) {
|
||||
console.log(stderr.trim());
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.log(`[registry-deploy-on-push] Could not inspect ${networkNameArg}: ${(error as Error).message}`);
|
||||
});
|
||||
};
|
||||
|
||||
const getDockerGwbridgeGatewayIp = async () => {
|
||||
const { stdout } = await execFileAsync('docker', [
|
||||
'network',
|
||||
'inspect',
|
||||
'docker_gwbridge',
|
||||
'--format',
|
||||
'{{(index .IPAM.Config 0).Gateway}}',
|
||||
]);
|
||||
const gatewayIp = stdout.trim();
|
||||
if (!gatewayIp) {
|
||||
throw new Error('Could not determine docker_gwbridge gateway IP');
|
||||
}
|
||||
return gatewayIp;
|
||||
};
|
||||
|
||||
const createSelfSignedCertificate = async (domainNameArg: string) => {
|
||||
mkdirSync(buildDir, { recursive: true });
|
||||
const keyPath = join(buildDir, 'route.key');
|
||||
const certPath = join(buildDir, 'route.crt');
|
||||
await run('openssl', [
|
||||
'req',
|
||||
'-x509',
|
||||
'-newkey',
|
||||
'rsa:2048',
|
||||
'-nodes',
|
||||
'-keyout',
|
||||
keyPath,
|
||||
'-out',
|
||||
certPath,
|
||||
'-subj',
|
||||
`/CN=${domainNameArg}`,
|
||||
'-days',
|
||||
'1',
|
||||
]);
|
||||
return {
|
||||
privateKey: readFileSync(keyPath, 'utf8'),
|
||||
publicKey: readFileSync(certPath, 'utf8'),
|
||||
};
|
||||
};
|
||||
|
||||
const waitForWorkloadContainer = async (
|
||||
networkArg: Awaited<ReturnType<Coreflow['dockerHost']['getNetworkByName']>>,
|
||||
serviceArg: Awaited<ReturnType<Coreflow['dockerHost']['getServiceByName']>>,
|
||||
) => {
|
||||
if (!networkArg) {
|
||||
throw new Error('Missing Docker network while waiting for workload container');
|
||||
}
|
||||
await waitFor(async () => {
|
||||
const containers = await networkArg.getContainersOnNetworkForService(serviceArg);
|
||||
return containers.length > 0;
|
||||
}, 'workload container on web gateway network');
|
||||
};
|
||||
|
||||
const createCoreflowProxyService = async (corechatNetworkNameArg: string) => {
|
||||
if (await dockerServiceExists(coreflowProxyServiceName)) {
|
||||
throw new Error(`Docker service ${coreflowProxyServiceName} already exists; refusing to overwrite it`);
|
||||
}
|
||||
mkdirSync(buildDir, { recursive: true });
|
||||
const gatewayIp = await getDockerGwbridgeGatewayIp();
|
||||
const caddyfilePath = join(buildDir, 'coreflow-proxy.Caddyfile');
|
||||
writeFileSync(caddyfilePath, `:3000 {\n reverse_proxy ${gatewayIp}:3000\n}\n`);
|
||||
await run('docker', [
|
||||
'service',
|
||||
'create',
|
||||
'--name',
|
||||
coreflowProxyServiceName,
|
||||
'--label',
|
||||
`serve.zone.testing.id=${smokeId}`,
|
||||
'--network',
|
||||
corechatNetworkNameArg,
|
||||
'--mount',
|
||||
`type=bind,source=${caddyfilePath},target=/etc/caddy/Caddyfile,readonly`,
|
||||
'caddy:2-alpine',
|
||||
]);
|
||||
};
|
||||
|
||||
const createCoretrafficService = async (
|
||||
corechatNetworkNameArg: string,
|
||||
webGatewayNetworkNameArg: string,
|
||||
httpsPortArg: number,
|
||||
) => {
|
||||
const coretrafficDir = join(repoRoot, 'coretraffic');
|
||||
await run('pnpm', ['--dir', coretrafficDir, 'build']);
|
||||
await run('docker', [
|
||||
'service',
|
||||
'create',
|
||||
'--name',
|
||||
coretrafficServiceName,
|
||||
'--label',
|
||||
`serve.zone.testing.id=${smokeId}`,
|
||||
'--network',
|
||||
corechatNetworkNameArg,
|
||||
'--network',
|
||||
webGatewayNetworkNameArg,
|
||||
'--publish',
|
||||
`published=${httpsPortArg},target=8000,protocol=tcp`,
|
||||
'--mount',
|
||||
`type=bind,source=${coretrafficDir},target=/app`,
|
||||
'node:22-trixie-slim',
|
||||
'sh',
|
||||
'-lc',
|
||||
'cd /app && node cli.js',
|
||||
]);
|
||||
};
|
||||
|
||||
const requestCoretrafficRoute = async (domainNameArg: string, httpsPortArg: number) => {
|
||||
const curlArgs = [
|
||||
'-k',
|
||||
'-sS',
|
||||
'--noproxy',
|
||||
'*',
|
||||
'--max-time',
|
||||
'10',
|
||||
'--resolve',
|
||||
`${domainNameArg}:${httpsPortArg}:127.0.0.1`,
|
||||
'-o',
|
||||
'-',
|
||||
'-w',
|
||||
'\n%{http_code}',
|
||||
`https://${domainNameArg}:${httpsPortArg}/`,
|
||||
];
|
||||
const { stdout, stderr } = await execFileAsync('curl', curlArgs);
|
||||
const lines = stdout.trim().split('\n');
|
||||
const statusCode = lines[lines.length - 1];
|
||||
const body = lines.slice(0, -1).join('\n');
|
||||
return {
|
||||
statusCode,
|
||||
body,
|
||||
stderr,
|
||||
};
|
||||
};
|
||||
|
||||
const waitForCoretrafficRoute = async (
|
||||
domainNameArg: string,
|
||||
httpsPortArg: number,
|
||||
messageArg: string,
|
||||
backendServiceNameArg?: string,
|
||||
) => {
|
||||
let lastResponse: Awaited<ReturnType<typeof requestCoretrafficRoute>> | undefined;
|
||||
let lastError: Error | undefined;
|
||||
try {
|
||||
await waitFor(async () => {
|
||||
try {
|
||||
lastResponse = await requestCoretrafficRoute(domainNameArg, httpsPortArg);
|
||||
lastError = undefined;
|
||||
return lastResponse.statusCode === '200' && /Caddy|serve/i.test(lastResponse.body);
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
return false;
|
||||
}
|
||||
}, messageArg);
|
||||
} catch (error) {
|
||||
console.log(`[registry-deploy-on-push] Last route response: ${JSON.stringify(lastResponse)}`);
|
||||
if (lastError) {
|
||||
console.log(`[registry-deploy-on-push] Last route error: ${lastError.message}`);
|
||||
}
|
||||
if (backendServiceNameArg) {
|
||||
await printDockerServicePs(backendServiceNameArg);
|
||||
await printDockerServiceLogs(backendServiceNameArg);
|
||||
}
|
||||
await printDockerServicePs(coretrafficServiceName);
|
||||
await printDockerServiceLogs(coretrafficServiceName);
|
||||
await printDockerServicePs(coreflowProxyServiceName);
|
||||
await printDockerServiceLogs(coreflowProxyServiceName);
|
||||
await printDockerNetworkContainers('sznwebgateway');
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
const ensureDockerReady = async () => {
|
||||
await run('docker', ['version']);
|
||||
const { stdout } = await execFileAsync('docker', ['info', '--format', '{{.Swarm.LocalNodeState}}']);
|
||||
if (stdout.trim() !== 'active') {
|
||||
throw new Error('Docker Swarm must be active. In Vagrant this is handled by scripts/provision-vm.sh.');
|
||||
}
|
||||
};
|
||||
|
||||
const buildSmokeImage = async (revisionArg: string) => {
|
||||
mkdirSync(buildDir, { recursive: true });
|
||||
const imageTag = `${smokeId}:${revisionArg}`;
|
||||
writeFileSync(
|
||||
join(buildDir, 'Dockerfile'),
|
||||
`FROM caddy:2-alpine\nLABEL serve.zone.smoke.id="${smokeId}"\nLABEL serve.zone.smoke.revision="${revisionArg}"\n`,
|
||||
);
|
||||
await run('docker', ['build', '-t', imageTag, buildDir]);
|
||||
return imageTag;
|
||||
};
|
||||
|
||||
const dockerImageRemove = async (imageArg: string) => {
|
||||
await execFileAsync('docker', ['image', 'rm', imageArg]).catch(() => null);
|
||||
};
|
||||
|
||||
const dockerLogin = async (registryHostArg: string, usernameArg: string, passwordArg: string) => {
|
||||
await new Promise<void>((resolveArg, rejectArg) => {
|
||||
const childProcess = execFile('docker', [
|
||||
'login',
|
||||
registryHostArg,
|
||||
'-u',
|
||||
usernameArg,
|
||||
'--password-stdin',
|
||||
]);
|
||||
childProcess.stdin?.write(passwordArg);
|
||||
childProcess.stdin?.end();
|
||||
let output = '';
|
||||
childProcess.stdout?.on('data', (dataArg) => {
|
||||
output += dataArg.toString();
|
||||
});
|
||||
childProcess.stderr?.on('data', (dataArg) => {
|
||||
output += dataArg.toString();
|
||||
});
|
||||
childProcess.on('error', rejectArg);
|
||||
childProcess.on('exit', (codeArg) => {
|
||||
console.log(output.trim());
|
||||
if (codeArg === 0) {
|
||||
resolveArg();
|
||||
} else {
|
||||
rejectArg(new Error(`docker login exited with ${codeArg}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const createCloudlyConfig = async (): Promise<ICloudlyConfig> => {
|
||||
console.log('[registry-deploy-on-push] Starting isolated MongoDB and S3 helpers');
|
||||
const smartmongo = await tapNodeTools.createSmartmongo();
|
||||
stopFunctions.push(async () => {
|
||||
await smartmongo.stopAndDumpToDir(join(testingDir, '.nogit', 'mongodump', smokeId));
|
||||
});
|
||||
const smarts3 = await tapNodeTools.createSmarts3();
|
||||
stopFunctions.push(async () => {
|
||||
await smarts3.stop();
|
||||
});
|
||||
|
||||
const bucketName = `${smokeId}-bucket`;
|
||||
await smarts3.createBucket(bucketName);
|
||||
const smartnetwork = new SmartNetwork();
|
||||
const publicPort = await smartnetwork.findFreePort(30000, 40000, { randomize: true });
|
||||
if (!publicPort) {
|
||||
throw new Error('Could not find a free Cloudly scenario port');
|
||||
}
|
||||
|
||||
return {
|
||||
environment: 'integration',
|
||||
letsEncryptEmail: 'test@serve.zone',
|
||||
publicUrl: '127.0.0.1',
|
||||
publicPort: String(publicPort),
|
||||
mongoDescriptor: await smartmongo.getMongoDescriptor(),
|
||||
s3Descriptor: await smarts3.getS3Descriptor({
|
||||
bucketName,
|
||||
}),
|
||||
sslMode: 'none',
|
||||
servezoneAdminaccount: 'smokeadmin:smokepassword',
|
||||
};
|
||||
};
|
||||
|
||||
const main = async () => {
|
||||
let testCloudly: Cloudly | undefined;
|
||||
let testClient: cloudlyApiClient.CloudlyApiClient | undefined;
|
||||
let coreflow: Coreflow | undefined;
|
||||
let subscription: { unsubscribe: () => void } | undefined;
|
||||
let createdWebGatewayNetwork = false;
|
||||
let createdCorechatNetwork = false;
|
||||
let createdCoreflowProxyService = false;
|
||||
let createdCoretrafficService = false;
|
||||
let startedCoreflowInternalServer = false;
|
||||
let serviceName = '';
|
||||
let registryImageUrl = '';
|
||||
let localImageRevision1 = '';
|
||||
let localImageRevision2 = '';
|
||||
let serviceForCleanup: { id: string; data: { name: string } } | undefined;
|
||||
const routeDomain = `${smokeId}.test`;
|
||||
|
||||
try {
|
||||
await ensureDockerReady();
|
||||
|
||||
const cloudlyConfig = await createCloudlyConfig();
|
||||
testCloudly = new Cloudly(cloudlyConfig);
|
||||
console.log('[registry-deploy-on-push] Starting Cloudly');
|
||||
await testCloudly.start();
|
||||
|
||||
const machineUser = new testCloudly.authManager.CUser();
|
||||
machineUser.id = await testCloudly.authManager.CUser.getNewId();
|
||||
machineUser.data = {
|
||||
type: 'machine',
|
||||
username: 'smoke-admin',
|
||||
password: 'smoke-admin-token',
|
||||
tokens: [
|
||||
{
|
||||
token: 'smoke-admin-token',
|
||||
expiresAt: Date.now() + 3600 * 1000,
|
||||
assignedRoles: ['admin'],
|
||||
},
|
||||
],
|
||||
role: 'admin',
|
||||
};
|
||||
await machineUser.save();
|
||||
|
||||
const cloudlyUrl = `http://${cloudlyConfig.publicUrl}:${cloudlyConfig.publicPort}`;
|
||||
testClient = new cloudlyApiClient.CloudlyApiClient({
|
||||
registerAs: 'api',
|
||||
cloudlyUrl,
|
||||
});
|
||||
await testClient.start();
|
||||
await testClient.getIdentityByToken('smoke-admin-token');
|
||||
console.log(`[registry-deploy-on-push] Cloudly started at ${cloudlyUrl}`);
|
||||
|
||||
const cluster = await testClient.cluster.createCluster(`${smokeId} cluster`);
|
||||
const persistedCluster = await testCloudly.clusterManager.getConfigBy_ConfigID(cluster.id);
|
||||
const clusterUser = await testCloudly.authManager.CUser.getInstance({
|
||||
id: persistedCluster.data.userId,
|
||||
});
|
||||
const clusterToken = clusterUser.data.tokens?.[0]?.token;
|
||||
if (!clusterToken) {
|
||||
throw new Error('Cluster token was not created');
|
||||
}
|
||||
|
||||
const image = await testClient.image.createImage({
|
||||
name: `${smokeId} image`,
|
||||
description: 'End-to-end registry/Coreflow smoke image',
|
||||
});
|
||||
const secretBundle = await testClient.secretbundle.createSecretBundle({
|
||||
name: `${smokeId} secrets`,
|
||||
description: 'End-to-end registry/Coreflow smoke secrets',
|
||||
type: 'service',
|
||||
includedSecretGroupIds: [],
|
||||
includedTags: [],
|
||||
imageClaims: [],
|
||||
authorizations: [
|
||||
{
|
||||
environment: 'production',
|
||||
secretAccessKey: `${smokeId}-secret-access`,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
serviceName = smokeId;
|
||||
const service = await testClient.services.createService({
|
||||
name: serviceName,
|
||||
description: 'End-to-end registry/Coreflow smoke service',
|
||||
imageId: image.id,
|
||||
imageVersion: 'latest',
|
||||
environment: {},
|
||||
secretBundleId: secretBundle.id,
|
||||
serviceCategory: 'workload',
|
||||
deploymentStrategy: 'custom',
|
||||
scaleFactor: 1,
|
||||
balancingStrategy: 'round-robin',
|
||||
ports: {
|
||||
web: 80,
|
||||
},
|
||||
domains: [
|
||||
{
|
||||
name: routeDomain,
|
||||
port: 80,
|
||||
protocol: 'https',
|
||||
},
|
||||
],
|
||||
deploymentIds: [],
|
||||
});
|
||||
serviceForCleanup = service;
|
||||
|
||||
const registryTarget = await testClient.services.getRegistryTarget(service.id, 'latest');
|
||||
registryImageUrl = registryTarget.imageUrl;
|
||||
console.log(`[registry-deploy-on-push] Registry target: ${registryImageUrl}`);
|
||||
|
||||
process.env.CLOUDLY_URL = cloudlyUrl;
|
||||
process.env.JUMPCODE = clusterToken;
|
||||
coreflow = new Coreflow();
|
||||
await coreflow.dockerHost.start();
|
||||
await coreflow.internalServer.start();
|
||||
startedCoreflowInternalServer = true;
|
||||
coreflow.cloudlyConnector.getCertificateForDomainFromCloudly = (async () => {
|
||||
return await createSelfSignedCertificate(routeDomain);
|
||||
}) as any;
|
||||
|
||||
let webGatewayNetwork = await coreflow.dockerHost.getNetworkByName(
|
||||
coreflow.clusterManager.commonDockerData.networkNames.sznWebgateway,
|
||||
);
|
||||
if (!webGatewayNetwork) {
|
||||
webGatewayNetwork = await coreflow.dockerHost.createNetwork({
|
||||
Name: coreflow.clusterManager.commonDockerData.networkNames.sznWebgateway,
|
||||
});
|
||||
createdWebGatewayNetwork = true;
|
||||
}
|
||||
let corechatNetwork = await coreflow.dockerHost.getNetworkByName(
|
||||
coreflow.clusterManager.commonDockerData.networkNames.sznCorechat,
|
||||
);
|
||||
if (!corechatNetwork) {
|
||||
corechatNetwork = await coreflow.dockerHost.createNetwork({
|
||||
Name: coreflow.clusterManager.commonDockerData.networkNames.sznCorechat,
|
||||
});
|
||||
createdCorechatNetwork = true;
|
||||
}
|
||||
|
||||
const smartnetwork = new SmartNetwork();
|
||||
const coretrafficHttpsPort = await smartnetwork.findFreePort(41000, 43000, { randomize: true });
|
||||
if (!coretrafficHttpsPort) {
|
||||
throw new Error('Could not find a free Coretraffic HTTPS test port');
|
||||
}
|
||||
|
||||
await createCoreflowProxyService(coreflow.clusterManager.commonDockerData.networkNames.sznCorechat);
|
||||
createdCoreflowProxyService = true;
|
||||
await createCoretrafficService(
|
||||
coreflow.clusterManager.commonDockerData.networkNames.sznCorechat,
|
||||
coreflow.clusterManager.commonDockerData.networkNames.sznWebgateway,
|
||||
coretrafficHttpsPort,
|
||||
);
|
||||
createdCoretrafficService = true;
|
||||
await coreflow.cloudlyConnector.start();
|
||||
console.log('[registry-deploy-on-push] Coreflow connector authenticated and tagged');
|
||||
try {
|
||||
await waitFor(async () => {
|
||||
try {
|
||||
await coreflow!.corechatConnector.setReverseConfigs([]);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}, 'coretraffic connection to coreflow');
|
||||
} catch (error) {
|
||||
await printDockerServiceLogs(coretrafficServiceName);
|
||||
await printDockerServiceLogs(coreflowProxyServiceName);
|
||||
throw error;
|
||||
}
|
||||
|
||||
const configUpdates: Array<Record<string, any>> = [];
|
||||
subscription = coreflow.cloudlyConnector.cloudlyApiClient.configUpdateSubject.subscribe((updateArg) => {
|
||||
if (updateArg.services?.some((serviceArg: any) => serviceArg.id === service.id)) {
|
||||
configUpdates.push(updateArg);
|
||||
}
|
||||
});
|
||||
|
||||
await dockerLogin(registryTarget.registryHost, 'smoke-admin', 'smoke-admin-token');
|
||||
localImageRevision1 = await buildSmokeImage('revision1');
|
||||
await run('docker', ['tag', localImageRevision1, registryImageUrl]);
|
||||
await run('docker', ['push', registryImageUrl]);
|
||||
await waitFor(() => configUpdates.length >= 1, 'first registry config update');
|
||||
console.log('[registry-deploy-on-push] First docker push produced a Cloudly config update');
|
||||
|
||||
let refreshedService = await testClient.services.getServiceById(service.id);
|
||||
await coreflow.clusterManager.provisionWorkloadService(refreshedService as any);
|
||||
let dockerService = await coreflow.dockerHost.getServiceByName(serviceName);
|
||||
await waitForWorkloadContainer(webGatewayNetwork, dockerService);
|
||||
await coreflow.clusterManager.updateTrafficRouting(persistedCluster as any);
|
||||
await waitForCoretrafficRoute(
|
||||
routeDomain,
|
||||
coretrafficHttpsPort,
|
||||
'coretraffic HTTPS route to first deployment',
|
||||
serviceName,
|
||||
);
|
||||
console.log(`[registry-deploy-on-push] Coretraffic routed ${routeDomain} to first deployment`);
|
||||
const firstDockerServiceId = dockerService.ID;
|
||||
const firstDigest = dockerService.Spec.Labels['serve.zone.registryDigest'];
|
||||
if (!firstDigest) {
|
||||
throw new Error('First deployment did not record a registry digest label');
|
||||
}
|
||||
console.log(`[registry-deploy-on-push] First deployment: ${firstDockerServiceId} digest=${firstDigest}`);
|
||||
|
||||
localImageRevision2 = await buildSmokeImage('revision2');
|
||||
await run('docker', ['tag', localImageRevision2, registryImageUrl]);
|
||||
await run('docker', ['push', registryImageUrl]);
|
||||
await waitFor(() => configUpdates.length >= 2, 'second registry config update');
|
||||
console.log('[registry-deploy-on-push] Second docker push produced a Cloudly config update');
|
||||
|
||||
refreshedService = await testClient.services.getServiceById(service.id);
|
||||
await coreflow.clusterManager.provisionWorkloadService(refreshedService as any);
|
||||
dockerService = await coreflow.dockerHost.getServiceByName(serviceName);
|
||||
await waitForWorkloadContainer(webGatewayNetwork, dockerService);
|
||||
await coreflow.clusterManager.updateTrafficRouting(persistedCluster as any);
|
||||
await waitForCoretrafficRoute(
|
||||
routeDomain,
|
||||
coretrafficHttpsPort,
|
||||
'coretraffic HTTPS route to redeployment',
|
||||
serviceName,
|
||||
);
|
||||
const secondDockerServiceId = dockerService.ID;
|
||||
const secondDigest = dockerService.Spec.Labels['serve.zone.registryDigest'];
|
||||
if (firstDockerServiceId === secondDockerServiceId) {
|
||||
throw new Error('Docker service ID did not change after same-tag digest update');
|
||||
}
|
||||
if (firstDigest === secondDigest) {
|
||||
throw new Error('Registry digest label did not change after second push');
|
||||
}
|
||||
console.log(`[registry-deploy-on-push] Redeployment: ${secondDockerServiceId} digest=${secondDigest}`);
|
||||
console.log(`[registry-deploy-on-push] Coretraffic routed ${routeDomain} to redeployment`);
|
||||
console.log('[registry-deploy-on-push] PASS');
|
||||
} finally {
|
||||
subscription?.unsubscribe();
|
||||
if (coreflow) {
|
||||
if (createdCoretrafficService) {
|
||||
await removeDockerService(coretrafficServiceName);
|
||||
}
|
||||
if (createdCoreflowProxyService) {
|
||||
await removeDockerService(coreflowProxyServiceName);
|
||||
}
|
||||
if (serviceName) {
|
||||
const dockerService = await coreflow.dockerHost.getServiceByName(serviceName).catch(() => null);
|
||||
if (dockerService) {
|
||||
await dockerService.remove();
|
||||
await delayFor(5000);
|
||||
}
|
||||
}
|
||||
if (serviceForCleanup) {
|
||||
const dockerSecret = await coreflow.dockerHost
|
||||
.getSecretByName(getWorkloadSecretName(serviceForCleanup))
|
||||
.catch(() => null);
|
||||
if (dockerSecret) {
|
||||
await dockerSecret.remove();
|
||||
}
|
||||
}
|
||||
if (createdWebGatewayNetwork) {
|
||||
const webGatewayNetwork = await coreflow.dockerHost
|
||||
.getNetworkByName(coreflow.clusterManager.commonDockerData.networkNames.sznWebgateway)
|
||||
.catch(() => null);
|
||||
if (webGatewayNetwork) {
|
||||
await webGatewayNetwork.remove();
|
||||
}
|
||||
}
|
||||
if (createdCorechatNetwork) {
|
||||
const corechatNetwork = await coreflow.dockerHost
|
||||
.getNetworkByName(coreflow.clusterManager.commonDockerData.networkNames.sznCorechat)
|
||||
.catch(() => null);
|
||||
if (corechatNetwork) {
|
||||
await corechatNetwork.remove();
|
||||
}
|
||||
}
|
||||
await coreflow.cloudlyConnector.stop().catch(() => null);
|
||||
if (startedCoreflowInternalServer) {
|
||||
await coreflow.internalServer.stop().catch(() => null);
|
||||
}
|
||||
await coreflow.dockerHost.stop().catch(() => null);
|
||||
}
|
||||
if (testClient) {
|
||||
await testClient.stop().catch(() => null);
|
||||
}
|
||||
if (testCloudly) {
|
||||
await testCloudly.stop().catch(() => null);
|
||||
}
|
||||
await Promise.all(stopFunctions.map((stopFunction) => stopFunction().catch(() => null)));
|
||||
if (registryImageUrl) {
|
||||
await dockerImageRemove(registryImageUrl);
|
||||
}
|
||||
if (localImageRevision1) {
|
||||
await dockerImageRemove(localImageRevision1);
|
||||
}
|
||||
if (localImageRevision2) {
|
||||
await dockerImageRemove(localImageRevision2);
|
||||
}
|
||||
rmSync(buildDir, { force: true, recursive: true });
|
||||
}
|
||||
};
|
||||
|
||||
await main();
|
||||
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
apt-get install -y ca-certificates curl git docker.io openssl
|
||||
|
||||
if [ -d /serve.zone ]; then
|
||||
chown -R vagrant:vagrant /serve.zone
|
||||
fi
|
||||
|
||||
systemctl enable --now docker
|
||||
usermod -aG docker vagrant || true
|
||||
|
||||
if ! command -v node >/dev/null 2>&1; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
|
||||
apt-get install -y nodejs
|
||||
fi
|
||||
|
||||
corepack enable
|
||||
corepack prepare pnpm@10.7.0 --activate
|
||||
|
||||
swarm_state="$(docker info --format '{{.Swarm.LocalNodeState}}' 2>/dev/null || true)"
|
||||
if [ "${swarm_state}" = "inactive" ]; then
|
||||
docker swarm init --advertise-addr 127.0.0.1
|
||||
fi
|
||||
|
||||
docker pull caddy:2-alpine
|
||||
docker pull node:22-trixie-slim
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false,
|
||||
"verbatimModuleSyntax": true,
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"resolveJsonModule": true,
|
||||
"baseUrl": "..",
|
||||
"paths": {
|
||||
"ts/*": ["cloudly/ts/*"]
|
||||
},
|
||||
"types": ["node"],
|
||||
"noEmit": true
|
||||
},
|
||||
"include": ["scenarios/**/*.ts", "scripts/**/*.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user