813 lines
25 KiB
TypeScript
813 lines
25 KiB
TypeScript
import * as fs from 'node:fs';
|
|
import * as os from 'node:os';
|
|
import process from 'node:process';
|
|
import { execFile } from 'node:child_process';
|
|
import { promisify } from 'node:util';
|
|
import { Action, type IActionContext } from './base-action.ts';
|
|
import { logger } from '../logger.ts';
|
|
import { PROXMOX, UI } from '../constants.ts';
|
|
|
|
const execFileAsync = promisify(execFile);
|
|
type TNodeLikeGlobal = typeof globalThis & {
|
|
process?: {
|
|
env: Record<string, string | undefined>;
|
|
};
|
|
};
|
|
|
|
/**
|
|
* ProxmoxAction - Gracefully shuts down Proxmox VMs and LXC containers
|
|
*
|
|
* Supports two operation modes:
|
|
* - CLI mode: Uses qm/pct commands directly (requires running as root on a Proxmox host)
|
|
* - API mode: Uses the Proxmox REST API via HTTPS with API token authentication
|
|
*
|
|
* In 'auto' mode (default), CLI is preferred when available, falling back to API.
|
|
*
|
|
* This action should be placed BEFORE shutdown actions in the action chain
|
|
* so that VMs are stopped before the host is shut down.
|
|
*/
|
|
export class ProxmoxAction extends Action {
|
|
readonly type = 'proxmox';
|
|
private static readonly activeRunKeys = new Set<string>();
|
|
|
|
private static findCliTool(command: string): string | null {
|
|
for (const dir of PROXMOX.CLI_TOOL_PATHS) {
|
|
const candidate = `${dir}/${command}`;
|
|
try {
|
|
if (fs.existsSync(candidate)) {
|
|
return candidate;
|
|
}
|
|
} catch (_e) {
|
|
// continue
|
|
}
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
/**
|
|
* Check if Proxmox CLI tools (qm, pct) are available on the system
|
|
* Used by CLI wizards and by execute() for auto-detection
|
|
*/
|
|
static detectCliAvailability(): {
|
|
available: boolean;
|
|
qmPath: string | null;
|
|
pctPath: string | null;
|
|
haManagerPath: string | null;
|
|
isRoot: boolean;
|
|
} {
|
|
const qmPath = this.findCliTool('qm');
|
|
const pctPath = this.findCliTool('pct');
|
|
const haManagerPath = this.findCliTool('ha-manager');
|
|
|
|
const isRoot = !!(process.getuid && process.getuid() === 0);
|
|
|
|
return {
|
|
available: qmPath !== null && pctPath !== null && isRoot,
|
|
qmPath,
|
|
pctPath,
|
|
haManagerPath,
|
|
isRoot,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Resolve the operation mode based on config and environment
|
|
*/
|
|
private resolveMode(): { mode: 'api' | 'cli'; qmPath: string; pctPath: string } | {
|
|
mode: 'api';
|
|
qmPath?: undefined;
|
|
pctPath?: undefined;
|
|
} {
|
|
const configuredMode = this.config.proxmoxMode || 'auto';
|
|
|
|
if (configuredMode === 'api') {
|
|
return { mode: 'api' };
|
|
}
|
|
|
|
const detection = ProxmoxAction.detectCliAvailability();
|
|
|
|
if (configuredMode === 'cli') {
|
|
if (!detection.qmPath || !detection.pctPath) {
|
|
throw new Error('CLI mode requested but qm/pct not found. Are you on a Proxmox host?');
|
|
}
|
|
if (!detection.isRoot) {
|
|
throw new Error('CLI mode requires root access');
|
|
}
|
|
return { mode: 'cli', qmPath: detection.qmPath, pctPath: detection.pctPath };
|
|
}
|
|
|
|
// Auto-detect
|
|
if (detection.available && detection.qmPath && detection.pctPath) {
|
|
return { mode: 'cli', qmPath: detection.qmPath, pctPath: detection.pctPath };
|
|
}
|
|
return { mode: 'api' };
|
|
}
|
|
|
|
/**
|
|
* Execute the Proxmox shutdown action
|
|
*/
|
|
async execute(context: IActionContext): Promise<void> {
|
|
if (!this.shouldExecute(context)) {
|
|
logger.info(
|
|
`Proxmox action skipped (trigger mode: ${
|
|
this.config.triggerMode || 'powerChangesAndThresholds'
|
|
})`,
|
|
);
|
|
return;
|
|
}
|
|
|
|
const resolved = this.resolveMode();
|
|
const node = this.config.proxmoxNode || os.hostname();
|
|
const excludeIds = new Set(this.config.proxmoxExcludeIds || []);
|
|
const stopTimeout = (this.config.proxmoxStopTimeout || PROXMOX.DEFAULT_STOP_TIMEOUT_SECONDS) *
|
|
1000;
|
|
const forceStop = this.config.proxmoxForceStop !== false; // default true
|
|
const haPolicy = this.config.proxmoxHaPolicy || 'none';
|
|
const host = this.config.proxmoxHost || PROXMOX.DEFAULT_HOST;
|
|
const port = this.config.proxmoxPort || PROXMOX.DEFAULT_PORT;
|
|
const runKey = `${resolved.mode}:${node}:${
|
|
resolved.mode === 'api' ? `${host}:${port}` : 'local'
|
|
}`;
|
|
|
|
if (ProxmoxAction.activeRunKeys.has(runKey)) {
|
|
logger.info(`Proxmox action skipped: shutdown sequence already running for node ${node}`);
|
|
return;
|
|
}
|
|
|
|
ProxmoxAction.activeRunKeys.add(runKey);
|
|
|
|
logger.log('');
|
|
logger.logBoxTitle('Proxmox VM Shutdown', UI.WIDE_BOX_WIDTH, 'warning');
|
|
logger.logBoxLine(`Mode: ${resolved.mode === 'cli' ? 'CLI (qm/pct)' : 'API (REST)'}`);
|
|
logger.logBoxLine(`Node: ${node}`);
|
|
logger.logBoxLine(`HA Policy: ${haPolicy}`);
|
|
if (resolved.mode === 'api') {
|
|
logger.logBoxLine(`API: ${host}:${port}`);
|
|
}
|
|
logger.logBoxLine(`UPS: ${context.upsName} (${context.powerStatus})`);
|
|
logger.logBoxLine(`Trigger: ${context.triggerReason}`);
|
|
if (excludeIds.size > 0) {
|
|
logger.logBoxLine(`Excluded IDs: ${[...excludeIds].join(', ')}`);
|
|
}
|
|
logger.logBoxEnd();
|
|
logger.log('');
|
|
|
|
try {
|
|
let apiContext: {
|
|
baseUrl: string;
|
|
headers: Record<string, string>;
|
|
insecure: boolean;
|
|
} | null = null;
|
|
let runningVMs: Array<{ vmid: number; name: string }>;
|
|
let runningCTs: Array<{ vmid: number; name: string }>;
|
|
|
|
if (resolved.mode === 'cli') {
|
|
runningVMs = await this.getRunningVMsCli(resolved.qmPath);
|
|
runningCTs = await this.getRunningCTsCli(resolved.pctPath);
|
|
} else {
|
|
// API mode - validate token
|
|
const tokenId = this.config.proxmoxTokenId;
|
|
const tokenSecret = this.config.proxmoxTokenSecret;
|
|
const insecure = this.config.proxmoxInsecure !== false;
|
|
|
|
if (!tokenId || !tokenSecret) {
|
|
logger.error('Proxmox API token ID and secret are required for API mode');
|
|
logger.error('Either provide tokens or run on a Proxmox host as root for CLI mode');
|
|
return;
|
|
}
|
|
|
|
apiContext = {
|
|
baseUrl: `https://${host}:${port}${PROXMOX.API_BASE}`,
|
|
headers: {
|
|
'Authorization': `PVEAPIToken=${tokenId}=${tokenSecret}`,
|
|
},
|
|
insecure,
|
|
};
|
|
|
|
runningVMs = await this.getRunningVMsApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
runningCTs = await this.getRunningCTsApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
}
|
|
|
|
// Filter out excluded IDs
|
|
const vmsToStop = runningVMs.filter((vm) => !excludeIds.has(vm.vmid));
|
|
const ctsToStop = runningCTs.filter((ct) => !excludeIds.has(ct.vmid));
|
|
|
|
const totalToStop = vmsToStop.length + ctsToStop.length;
|
|
if (totalToStop === 0) {
|
|
logger.info('No running VMs or containers to shut down');
|
|
return;
|
|
}
|
|
|
|
const haManagedResources = haPolicy === 'haStop'
|
|
? await this.getHaManagedResources(resolved, apiContext)
|
|
: { qemu: new Set<number>(), lxc: new Set<number>() };
|
|
const haVmsToStop = vmsToStop.filter((vm) => haManagedResources.qemu.has(vm.vmid));
|
|
const haCtsToStop = ctsToStop.filter((ct) => haManagedResources.lxc.has(ct.vmid));
|
|
let directVmsToStop = vmsToStop.filter((vm) => !haManagedResources.qemu.has(vm.vmid));
|
|
let directCtsToStop = ctsToStop.filter((ct) => !haManagedResources.lxc.has(ct.vmid));
|
|
|
|
logger.info(`Shutting down ${vmsToStop.length} VMs and ${ctsToStop.length} containers...`);
|
|
|
|
if (resolved.mode === 'cli') {
|
|
const { haManagerPath } = ProxmoxAction.detectCliAvailability();
|
|
if (haPolicy === 'haStop' && (haVmsToStop.length > 0 || haCtsToStop.length > 0)) {
|
|
if (!haManagerPath) {
|
|
logger.warn(
|
|
'ha-manager not found, falling back to direct guest shutdown for HA-managed resources',
|
|
);
|
|
directVmsToStop = [...haVmsToStop, ...directVmsToStop];
|
|
directCtsToStop = [...haCtsToStop, ...directCtsToStop];
|
|
} else {
|
|
for (const vm of haVmsToStop) {
|
|
await this.requestHaStopCli(haManagerPath, `vm:${vm.vmid}`);
|
|
logger.dim(` HA stop requested for VM ${vm.vmid} (${vm.name || 'unnamed'})`);
|
|
}
|
|
for (const ct of haCtsToStop) {
|
|
await this.requestHaStopCli(haManagerPath, `ct:${ct.vmid}`);
|
|
logger.dim(` HA stop requested for CT ${ct.vmid} (${ct.name || 'unnamed'})`);
|
|
}
|
|
}
|
|
}
|
|
|
|
for (const vm of directVmsToStop) {
|
|
await this.shutdownVMCli(resolved.qmPath, vm.vmid);
|
|
logger.dim(` Shutdown sent to VM ${vm.vmid} (${vm.name || 'unnamed'})`);
|
|
}
|
|
for (const ct of directCtsToStop) {
|
|
await this.shutdownCTCli(resolved.pctPath, ct.vmid);
|
|
logger.dim(` Shutdown sent to CT ${ct.vmid} (${ct.name || 'unnamed'})`);
|
|
}
|
|
} else if (apiContext) {
|
|
for (const vm of haVmsToStop) {
|
|
await this.requestHaStopApi(
|
|
apiContext.baseUrl,
|
|
`vm:${vm.vmid}`,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
logger.dim(` HA stop requested for VM ${vm.vmid} (${vm.name || 'unnamed'})`);
|
|
}
|
|
for (const ct of haCtsToStop) {
|
|
await this.requestHaStopApi(
|
|
apiContext.baseUrl,
|
|
`ct:${ct.vmid}`,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
logger.dim(` HA stop requested for CT ${ct.vmid} (${ct.name || 'unnamed'})`);
|
|
}
|
|
|
|
for (const vm of directVmsToStop) {
|
|
await this.shutdownVMApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
vm.vmid,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
logger.dim(` Shutdown sent to VM ${vm.vmid} (${vm.name || 'unnamed'})`);
|
|
}
|
|
for (const ct of directCtsToStop) {
|
|
await this.shutdownCTApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
ct.vmid,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
logger.dim(` Shutdown sent to CT ${ct.vmid} (${ct.name || 'unnamed'})`);
|
|
}
|
|
}
|
|
|
|
// Poll until all stopped or timeout
|
|
const allIds = [
|
|
...vmsToStop.map((vm) => ({ type: 'qemu' as const, vmid: vm.vmid, name: vm.name })),
|
|
...ctsToStop.map((ct) => ({ type: 'lxc' as const, vmid: ct.vmid, name: ct.name })),
|
|
];
|
|
|
|
const remaining = await this.waitForShutdown(allIds, resolved, node, stopTimeout);
|
|
|
|
if (remaining.length > 0 && forceStop) {
|
|
logger.warn(`${remaining.length} VMs/CTs didn't shut down gracefully, force-stopping...`);
|
|
for (const item of remaining) {
|
|
try {
|
|
if (resolved.mode === 'cli') {
|
|
if (item.type === 'qemu') {
|
|
await this.stopVMCli(resolved.qmPath, item.vmid);
|
|
} else {
|
|
await this.stopCTCli(resolved.pctPath, item.vmid);
|
|
}
|
|
} else if (apiContext) {
|
|
if (item.type === 'qemu') {
|
|
await this.stopVMApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
item.vmid,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
} else {
|
|
await this.stopCTApi(
|
|
apiContext.baseUrl,
|
|
node,
|
|
item.vmid,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
}
|
|
}
|
|
logger.dim(` Force-stopped ${item.type} ${item.vmid} (${item.name || 'unnamed'})`);
|
|
} catch (error) {
|
|
logger.error(
|
|
` Failed to force-stop ${item.type} ${item.vmid}: ${
|
|
error instanceof Error ? error.message : String(error)
|
|
}`,
|
|
);
|
|
}
|
|
}
|
|
} else if (remaining.length > 0) {
|
|
logger.warn(`${remaining.length} VMs/CTs still running (force-stop disabled)`);
|
|
}
|
|
|
|
logger.success('Proxmox shutdown sequence completed');
|
|
} catch (error) {
|
|
logger.error(
|
|
`Proxmox action failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
);
|
|
} finally {
|
|
ProxmoxAction.activeRunKeys.delete(runKey);
|
|
}
|
|
}
|
|
|
|
// ─── CLI-based methods ─────────────────────────────────────────────
|
|
|
|
/**
|
|
* Get list of running QEMU VMs via qm list
|
|
*/
|
|
private async getRunningVMsCli(
|
|
qmPath: string,
|
|
): Promise<Array<{ vmid: number; name: string }>> {
|
|
try {
|
|
const { stdout } = await execFileAsync(qmPath, ['list']);
|
|
return this.parseQmList(stdout);
|
|
} catch (error) {
|
|
logger.error(
|
|
`Failed to list VMs via CLI: ${error instanceof Error ? error.message : String(error)}`,
|
|
);
|
|
return [];
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get list of running LXC containers via pct list
|
|
*/
|
|
private async getRunningCTsCli(
|
|
pctPath: string,
|
|
): Promise<Array<{ vmid: number; name: string }>> {
|
|
try {
|
|
const { stdout } = await execFileAsync(pctPath, ['list']);
|
|
return this.parsePctList(stdout);
|
|
} catch (error) {
|
|
logger.error(
|
|
`Failed to list CTs via CLI: ${error instanceof Error ? error.message : String(error)}`,
|
|
);
|
|
return [];
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Parse qm list output
|
|
* Format: VMID NAME STATUS MEM(MB) BOOTDISK(GB) PID
|
|
*/
|
|
private parseQmList(output: string): Array<{ vmid: number; name: string }> {
|
|
const results: Array<{ vmid: number; name: string }> = [];
|
|
const lines = output.trim().split('\n');
|
|
|
|
// Skip header line
|
|
for (let i = 1; i < lines.length; i++) {
|
|
const match = lines[i].match(/^\s*(\d+)\s+(\S+)\s+(running|stopped|paused)/);
|
|
if (match && match[3] === 'running') {
|
|
results.push({ vmid: parseInt(match[1], 10), name: match[2] });
|
|
}
|
|
}
|
|
return results;
|
|
}
|
|
|
|
/**
|
|
* Parse pct list output
|
|
* Format: VMID Status Lock Name
|
|
*/
|
|
private parsePctList(output: string): Array<{ vmid: number; name: string }> {
|
|
const results: Array<{ vmid: number; name: string }> = [];
|
|
const lines = output.trim().split('\n');
|
|
|
|
// Skip header line
|
|
for (let i = 1; i < lines.length; i++) {
|
|
const match = lines[i].match(/^\s*(\d+)\s+(running|stopped)\s+\S*\s*(.*)/);
|
|
if (match && match[2] === 'running') {
|
|
results.push({ vmid: parseInt(match[1], 10), name: match[3]?.trim() || '' });
|
|
}
|
|
}
|
|
return results;
|
|
}
|
|
|
|
private async shutdownVMCli(qmPath: string, vmid: number): Promise<void> {
|
|
await execFileAsync(qmPath, ['shutdown', String(vmid)]);
|
|
}
|
|
|
|
private async shutdownCTCli(pctPath: string, vmid: number): Promise<void> {
|
|
await execFileAsync(pctPath, ['shutdown', String(vmid)]);
|
|
}
|
|
|
|
private async stopVMCli(qmPath: string, vmid: number): Promise<void> {
|
|
await execFileAsync(qmPath, ['stop', String(vmid)]);
|
|
}
|
|
|
|
private async stopCTCli(pctPath: string, vmid: number): Promise<void> {
|
|
await execFileAsync(pctPath, ['stop', String(vmid)]);
|
|
}
|
|
|
|
/**
|
|
* Get VM/CT status via CLI
|
|
* Returns the status string (e.g., 'running', 'stopped')
|
|
*/
|
|
private async getStatusCli(
|
|
toolPath: string,
|
|
vmid: number,
|
|
): Promise<string> {
|
|
const { stdout } = await execFileAsync(toolPath, ['status', String(vmid)]);
|
|
// Output format: "status: running\n"
|
|
const status = stdout.trim().split(':')[1]?.trim() || 'unknown';
|
|
return status;
|
|
}
|
|
|
|
private async getHaManagedResources(
|
|
resolved: { mode: 'api' | 'cli'; qmPath?: string; pctPath?: string },
|
|
apiContext: {
|
|
baseUrl: string;
|
|
headers: Record<string, string>;
|
|
insecure: boolean;
|
|
} | null,
|
|
): Promise<{ qemu: Set<number>; lxc: Set<number> }> {
|
|
if (resolved.mode === 'cli') {
|
|
const { haManagerPath } = ProxmoxAction.detectCliAvailability();
|
|
if (!haManagerPath) {
|
|
return { qemu: new Set<number>(), lxc: new Set<number>() };
|
|
}
|
|
|
|
return await this.getHaManagedResourcesCli(haManagerPath);
|
|
}
|
|
|
|
if (!apiContext) {
|
|
return { qemu: new Set<number>(), lxc: new Set<number>() };
|
|
}
|
|
|
|
return await this.getHaManagedResourcesApi(
|
|
apiContext.baseUrl,
|
|
apiContext.headers,
|
|
apiContext.insecure,
|
|
);
|
|
}
|
|
|
|
private async getHaManagedResourcesCli(
|
|
haManagerPath: string,
|
|
): Promise<{ qemu: Set<number>; lxc: Set<number> }> {
|
|
try {
|
|
const { stdout } = await execFileAsync(haManagerPath, ['config']);
|
|
return this.parseHaManagerConfig(stdout);
|
|
} catch (error) {
|
|
logger.warn(
|
|
`Failed to list HA resources via CLI: ${
|
|
error instanceof Error ? error.message : String(error)
|
|
}`,
|
|
);
|
|
return { qemu: new Set<number>(), lxc: new Set<number>() };
|
|
}
|
|
}
|
|
|
|
private parseHaManagerConfig(output: string): { qemu: Set<number>; lxc: Set<number> } {
|
|
const resources = {
|
|
qemu: new Set<number>(),
|
|
lxc: new Set<number>(),
|
|
};
|
|
|
|
for (const line of output.trim().split('\n')) {
|
|
const match = line.match(/^\s*(vm|ct)\s*:\s*(\d+)\s*$/i);
|
|
if (!match) {
|
|
continue;
|
|
}
|
|
|
|
const vmid = parseInt(match[2], 10);
|
|
if (match[1].toLowerCase() === 'vm') {
|
|
resources.qemu.add(vmid);
|
|
} else {
|
|
resources.lxc.add(vmid);
|
|
}
|
|
}
|
|
|
|
return resources;
|
|
}
|
|
|
|
private async requestHaStopCli(haManagerPath: string, sid: string): Promise<void> {
|
|
await execFileAsync(haManagerPath, ['set', sid, '--state', 'stopped']);
|
|
}
|
|
|
|
// ─── API-based methods ─────────────────────────────────────────────
|
|
|
|
/**
|
|
* Make an API request to the Proxmox server
|
|
*/
|
|
private async apiRequest(
|
|
url: string,
|
|
method: string,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
body?: URLSearchParams,
|
|
): Promise<unknown> {
|
|
const requestHeaders = { ...headers };
|
|
const fetchOptions: RequestInit = {
|
|
method,
|
|
headers: requestHeaders,
|
|
};
|
|
|
|
if (body) {
|
|
requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8';
|
|
fetchOptions.body = body.toString();
|
|
}
|
|
|
|
// Use NODE_TLS_REJECT_UNAUTHORIZED for insecure mode (self-signed certs)
|
|
const nodeProcess = (globalThis as TNodeLikeGlobal).process;
|
|
if (insecure && nodeProcess?.env) {
|
|
nodeProcess.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
|
|
}
|
|
|
|
try {
|
|
const response = await fetch(url, fetchOptions);
|
|
|
|
if (!response.ok) {
|
|
const body = await response.text();
|
|
throw new Error(`Proxmox API error ${response.status}: ${body}`);
|
|
}
|
|
|
|
return await response.json();
|
|
} finally {
|
|
// Restore TLS verification
|
|
if (insecure && nodeProcess?.env) {
|
|
nodeProcess.env.NODE_TLS_REJECT_UNAUTHORIZED = '1';
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get list of running QEMU VMs via API
|
|
*/
|
|
private async getRunningVMsApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<Array<{ vmid: number; name: string }>> {
|
|
try {
|
|
const response = await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/qemu`,
|
|
'GET',
|
|
headers,
|
|
insecure,
|
|
) as { data: Array<{ vmid: number; name: string; status: string }> };
|
|
|
|
return (response.data || [])
|
|
.filter((vm) => vm.status === 'running')
|
|
.map((vm) => ({ vmid: vm.vmid, name: vm.name || '' }));
|
|
} catch (error) {
|
|
logger.error(
|
|
`Failed to list VMs: ${error instanceof Error ? error.message : String(error)}`,
|
|
);
|
|
return [];
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get list of running LXC containers via API
|
|
*/
|
|
private async getRunningCTsApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<Array<{ vmid: number; name: string }>> {
|
|
try {
|
|
const response = await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/lxc`,
|
|
'GET',
|
|
headers,
|
|
insecure,
|
|
) as { data: Array<{ vmid: number; name: string; status: string }> };
|
|
|
|
return (response.data || [])
|
|
.filter((ct) => ct.status === 'running')
|
|
.map((ct) => ({ vmid: ct.vmid, name: ct.name || '' }));
|
|
} catch (error) {
|
|
logger.error(
|
|
`Failed to list CTs: ${error instanceof Error ? error.message : String(error)}`,
|
|
);
|
|
return [];
|
|
}
|
|
}
|
|
|
|
private async getHaManagedResourcesApi(
|
|
baseUrl: string,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<{ qemu: Set<number>; lxc: Set<number> }> {
|
|
try {
|
|
const response = await this.apiRequest(
|
|
`${baseUrl}/cluster/ha/resources`,
|
|
'GET',
|
|
headers,
|
|
insecure,
|
|
) as { data: Array<{ sid?: string }> };
|
|
const resources = {
|
|
qemu: new Set<number>(),
|
|
lxc: new Set<number>(),
|
|
};
|
|
|
|
for (const item of response.data || []) {
|
|
const match = item.sid?.match(/^(vm|ct):(\d+)$/i);
|
|
if (!match) {
|
|
continue;
|
|
}
|
|
|
|
const vmid = parseInt(match[2], 10);
|
|
if (match[1].toLowerCase() === 'vm') {
|
|
resources.qemu.add(vmid);
|
|
} else {
|
|
resources.lxc.add(vmid);
|
|
}
|
|
}
|
|
|
|
return resources;
|
|
} catch (error) {
|
|
logger.warn(
|
|
`Failed to list HA resources via API: ${
|
|
error instanceof Error ? error.message : String(error)
|
|
}`,
|
|
);
|
|
return { qemu: new Set<number>(), lxc: new Set<number>() };
|
|
}
|
|
}
|
|
|
|
private async requestHaStopApi(
|
|
baseUrl: string,
|
|
sid: string,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<void> {
|
|
await this.apiRequest(
|
|
`${baseUrl}/cluster/ha/resources/${encodeURIComponent(sid)}`,
|
|
'PUT',
|
|
headers,
|
|
insecure,
|
|
new URLSearchParams({ state: 'stopped' }),
|
|
);
|
|
}
|
|
|
|
private async shutdownVMApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
vmid: number,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<void> {
|
|
await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/qemu/${vmid}/status/shutdown`,
|
|
'POST',
|
|
headers,
|
|
insecure,
|
|
);
|
|
}
|
|
|
|
private async shutdownCTApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
vmid: number,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<void> {
|
|
await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/lxc/${vmid}/status/shutdown`,
|
|
'POST',
|
|
headers,
|
|
insecure,
|
|
);
|
|
}
|
|
|
|
private async stopVMApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
vmid: number,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<void> {
|
|
await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/qemu/${vmid}/status/stop`,
|
|
'POST',
|
|
headers,
|
|
insecure,
|
|
);
|
|
}
|
|
|
|
private async stopCTApi(
|
|
baseUrl: string,
|
|
node: string,
|
|
vmid: number,
|
|
headers: Record<string, string>,
|
|
insecure: boolean,
|
|
): Promise<void> {
|
|
await this.apiRequest(
|
|
`${baseUrl}/nodes/${node}/lxc/${vmid}/status/stop`,
|
|
'POST',
|
|
headers,
|
|
insecure,
|
|
);
|
|
}
|
|
|
|
// ─── Shared methods ────────────────────────────────────────────────
|
|
|
|
/**
|
|
* Wait for VMs/CTs to shut down, return any that are still running after timeout
|
|
*/
|
|
private async waitForShutdown(
|
|
items: Array<{ type: 'qemu' | 'lxc'; vmid: number; name: string }>,
|
|
resolved: { mode: 'api' | 'cli'; qmPath?: string; pctPath?: string },
|
|
node: string,
|
|
timeout: number,
|
|
): Promise<Array<{ type: 'qemu' | 'lxc'; vmid: number; name: string }>> {
|
|
const startTime = Date.now();
|
|
let remaining = [...items];
|
|
|
|
while (remaining.length > 0 && (Date.now() - startTime) < timeout) {
|
|
// Wait before polling
|
|
await new Promise((resolve) =>
|
|
setTimeout(resolve, PROXMOX.STATUS_POLL_INTERVAL_SECONDS * 1000)
|
|
);
|
|
|
|
// Check which are still running
|
|
const stillRunning: typeof remaining = [];
|
|
|
|
for (const item of remaining) {
|
|
try {
|
|
let status: string;
|
|
|
|
if (resolved.mode === 'cli') {
|
|
const toolPath = item.type === 'qemu' ? resolved.qmPath! : resolved.pctPath!;
|
|
status = await this.getStatusCli(toolPath, item.vmid);
|
|
} else {
|
|
const host = this.config.proxmoxHost || PROXMOX.DEFAULT_HOST;
|
|
const port = this.config.proxmoxPort || PROXMOX.DEFAULT_PORT;
|
|
const insecure = this.config.proxmoxInsecure !== false;
|
|
const baseUrl = `https://${host}:${port}${PROXMOX.API_BASE}`;
|
|
const headers: Record<string, string> = {
|
|
'Authorization':
|
|
`PVEAPIToken=${this.config.proxmoxTokenId}=${this.config.proxmoxTokenSecret}`,
|
|
};
|
|
const statusUrl = `${baseUrl}/nodes/${node}/${item.type}/${item.vmid}/status/current`;
|
|
const response = await this.apiRequest(statusUrl, 'GET', headers, insecure) as {
|
|
data: { status: string };
|
|
};
|
|
status = response.data?.status || 'unknown';
|
|
}
|
|
|
|
if (status === 'running') {
|
|
stillRunning.push(item);
|
|
} else {
|
|
logger.dim(` ${item.type} ${item.vmid} (${item.name}) stopped`);
|
|
}
|
|
} catch (_error) {
|
|
// If we can't check status, assume it might still be running
|
|
stillRunning.push(item);
|
|
}
|
|
}
|
|
|
|
remaining = stillRunning;
|
|
|
|
if (remaining.length > 0) {
|
|
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
logger.dim(` Waiting... ${remaining.length} still running (${elapsed}s elapsed)`);
|
|
}
|
|
}
|
|
|
|
return remaining;
|
|
}
|
|
}
|