Compare commits
32 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d01da261ee | |||
| 2c5ea744f1 | |||
| 2d4846cfed | |||
| 2b87d63121 | |||
| 140ce716f2 | |||
| b9b7f2b4a3 | |||
| aedcc3f875 | |||
| f85241dcd5 | |||
| 45b593cd7c | |||
| 352562b1a5 | |||
| e02b5b7046 | |||
| 7727fafeec | |||
| 0539d183b1 | |||
| ec4eed38e4 | |||
| c8ab9afbc6 | |||
| 3125b77020 | |||
| de10e1dd1f | |||
| 21f7a44a53 | |||
| 98398e962f | |||
| 06cea4bb37 | |||
| ee631c21c4 | |||
| 50d437aed7 | |||
| dd5ea36636 | |||
| 7d6aace6d9 | |||
| 99a04df8b0 | |||
| ee3b6dd6ae | |||
| 708917bb9b | |||
| 5c6d4f4802 | |||
| 31cc3af1b4 | |||
| 6a3be55cee | |||
| 1e86acff55 | |||
| fabd6e9e3d |
108
.gitea/release-upload.ts
Normal file
108
.gitea/release-upload.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Release asset uploader for Gitea
|
||||
* Streams large files without loading them into memory (bypasses curl's 2GB multipart limit)
|
||||
*
|
||||
* Usage: GITEA_TOKEN=xxx RELEASE_ID=123 GITEA_REPO=owner/repo tsx release-upload.ts
|
||||
*/
|
||||
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as https from 'https';
|
||||
|
||||
const token = process.env.GITEA_TOKEN;
|
||||
const releaseId = process.env.RELEASE_ID;
|
||||
const repo = process.env.GITEA_REPO;
|
||||
|
||||
if (!token || !releaseId || !repo) {
|
||||
console.error('Missing required env vars: GITEA_TOKEN, RELEASE_ID, GITEA_REPO');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const boundary = '----FormBoundary' + Date.now().toString(16);
|
||||
|
||||
async function uploadFile(filepath: string): Promise<void> {
|
||||
const filename = path.basename(filepath);
|
||||
const stats = fs.statSync(filepath);
|
||||
console.log(`Uploading ${filename} (${stats.size} bytes)...`);
|
||||
|
||||
const header = Buffer.from(
|
||||
`--${boundary}\r\n` +
|
||||
`Content-Disposition: form-data; name="attachment"; filename="${filename}"\r\n` +
|
||||
`Content-Type: application/octet-stream\r\n\r\n`
|
||||
);
|
||||
const footer = Buffer.from(`\r\n--${boundary}--\r\n`);
|
||||
const contentLength = header.length + stats.size + footer.length;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = https.request({
|
||||
hostname: 'code.foss.global',
|
||||
path: `/api/v1/repos/${repo}/releases/${releaseId}/assets?name=${encodeURIComponent(filename)}`,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `token ${token}`,
|
||||
'Content-Type': `multipart/form-data; boundary=${boundary}`,
|
||||
'Content-Length': contentLength
|
||||
}
|
||||
}, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
clearInterval(progressInterval);
|
||||
console.log(data);
|
||||
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
|
||||
console.log(`✓ ${filename} uploaded successfully`);
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Upload failed: ${res.statusCode} ${data}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (err) => {
|
||||
clearInterval(progressInterval);
|
||||
reject(err);
|
||||
});
|
||||
|
||||
// Track upload progress
|
||||
let bytesWritten = header.length;
|
||||
const progressInterval = setInterval(() => {
|
||||
const percent = Math.round((bytesWritten / contentLength) * 100);
|
||||
console.log(` ${filename}: ${percent}% (${Math.round(bytesWritten / 1024 / 1024)}MB / ${Math.round(contentLength / 1024 / 1024)}MB)`);
|
||||
}, 10000);
|
||||
|
||||
// Stream: write header, pipe file, write footer
|
||||
req.write(header);
|
||||
const stream = fs.createReadStream(filepath);
|
||||
stream.on('data', (chunk) => {
|
||||
bytesWritten += chunk.length;
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
clearInterval(progressInterval);
|
||||
reject(err);
|
||||
});
|
||||
stream.on('end', () => {
|
||||
bytesWritten += footer.length;
|
||||
req.write(footer);
|
||||
req.end();
|
||||
});
|
||||
stream.pipe(req, { end: false });
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const distDir = 'dist';
|
||||
const files = fs.readdirSync(distDir)
|
||||
.map(f => path.join(distDir, f))
|
||||
.filter(f => fs.statSync(f).isFile());
|
||||
|
||||
for (const file of files) {
|
||||
await uploadFile(file);
|
||||
}
|
||||
|
||||
console.log('All assets uploaded successfully');
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -32,15 +32,8 @@ jobs:
|
||||
npm version ${{ steps.version.outputs.version_number }} --no-git-tag-version --allow-same-version
|
||||
echo "export const VERSION = \"${{ steps.version.outputs.version_number }}\";" > ecoos_daemon/ts/version.ts
|
||||
|
||||
- name: Build daemon binary
|
||||
run: pnpm run daemon:bundle
|
||||
|
||||
- name: Build ISO with Docker
|
||||
run: |
|
||||
cp ecoos_daemon/bundle/eco-daemon isobuild/config/includes.chroot/opt/eco/bin/
|
||||
mkdir -p .nogit/iso
|
||||
docker build -t ecoos-builder -f isobuild/Dockerfile .
|
||||
docker run --rm --privileged -v $(pwd)/.nogit/iso:/output ecoos-builder
|
||||
- name: Build ISO
|
||||
run: pnpm run build
|
||||
|
||||
- name: Prepare release assets
|
||||
run: |
|
||||
@@ -82,16 +75,11 @@ jobs:
|
||||
|
||||
echo "Created release with ID: $RELEASE_ID"
|
||||
|
||||
# Upload assets
|
||||
for asset in dist/*; do
|
||||
filename=$(basename "$asset")
|
||||
echo "Uploading $filename..."
|
||||
curl -X POST -s \
|
||||
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@$asset" \
|
||||
"https://code.foss.global/api/v1/repos/${{ gitea.repository }}/releases/$RELEASE_ID/assets?name=$filename"
|
||||
done
|
||||
# Upload assets using TypeScript (curl has 2GB multipart limit)
|
||||
GITEA_TOKEN="${{ secrets.GITHUB_TOKEN }}" \
|
||||
GITEA_REPO="${{ gitea.repository }}" \
|
||||
RELEASE_ID="$RELEASE_ID" \
|
||||
npx tsx .gitea/release-upload.ts
|
||||
|
||||
- name: Cleanup old releases (keep 3 latest)
|
||||
run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
node_modules/
|
||||
|
||||
# Build outputs
|
||||
isobuild/output/
|
||||
*.iso
|
||||
|
||||
103
changelog.md
103
changelog.md
@@ -1,5 +1,108 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-01-13 - 0.7.0 - feat(isobuild)
|
||||
add multi-architecture build and Raspberry Pi support in installer and build tooling
|
||||
|
||||
- Bump package version to 0.6.6 and update ecoos_daemon version export
|
||||
- Installer: detect target architecture (amd64, arm64, rpi) and adapt partitioning, formatting, mounting, fstab and boot configuration accordingly
|
||||
- Installer: add full Raspberry Pi support (MBR partitioning for Pi, boot partition layout, config.txt and cmdline.txt generation, copying kernel/initrd/DTBs/firmware) and conditional GRUB vs native Pi boot handling
|
||||
- Add create-rpi-image.sh to generate Raspberry Pi bootable .img from squashfs or chroot
|
||||
- Dockerfile: add TARGET_ARCH build arg and conditional package installation and build steps for amd64, arm64 and rpi flows (including producing ecoos.iso, ecoos-arm64.iso or ecoos-rpi.img)
|
||||
- Add architecture-specific package lists (base-amd64, base-arm64, base-rpi) and update base.list.chroot to delegate EFI packages to arch-specific lists
|
||||
- Build/test tooling: update docker-build.sh, isotest/run-test.sh and package.json scripts to support build:amd64|arm64|rpi and corresponding test targets; improve output naming and automation
|
||||
- Installer and scripts: improved device name handling (nvme/mmcblk), boot partition naming, mount/unmount cleanup, and logging
|
||||
|
||||
## 2026-01-12 - 0.6.0 - feat(ecoos-daemon)
|
||||
integrate a bundled daemon web UI with components, interfaces, styles, bundling config, and server support
|
||||
|
||||
- Adds a new TypeScript UI bundle package (@ecobridge/ecoos-daemon-ui) and build config (npmextra.json) to produce a bundled /app.js
|
||||
- Implements web components: ecoos-app, ecoos-overview, ecoos-devices, ecoos-displays, ecoos-updates, ecoos-logs and shared styles/utilities
|
||||
- Introduces TypeScript interfaces for status, display and updates under ts_interfaces for API contracts
|
||||
- Server integration: UIServer now serves the bundled JS at /app.js and includes the app version in the HTML title
|
||||
- Updates root package.json (adds dependencies key) and .gitignore to ignore node_modules
|
||||
|
||||
## 2026-01-10 - 0.5.0 - feat(ui,isotest)
|
||||
Group disabled displays into a collapsible section and refactor display item rendering; start a background screenshot loop during isotest and improve test-run cleanup
|
||||
|
||||
- Refactored display rendering: introduced renderDisplayItem() and simplified updateDisplaysUI() to separate enabled/disabled displays
|
||||
- Disabled displays are collapsed under a <details> summary showing count ("Disabled Displays (N)")
|
||||
- Added a background screenshot loop in isotest/run-test.sh that runs screenshot.sh every 5 seconds and records SCREENSHOT_LOOP_PID
|
||||
- Improved cleanup in isotest/run-test.sh to kill SCREENSHOT_LOOP_PID and ENABLE_PID if they are running
|
||||
|
||||
## 2026-01-10 - 0.4.15 - fix(isotest)
|
||||
Improve robustness of SPICE display enabler: add logging, wait-for-port and URI parsing, retries and reconnection logic, stabilization delay before configuring, and verification/retry of monitor configuration
|
||||
|
||||
- Add immediate-flush logging helper for clearer background output
|
||||
- Wait for SPICE TCP port (wait_for_port) and parse spice:// URIs before connecting
|
||||
- Add stabilization delay before sending monitor config and track retry counts
|
||||
- Add verify_and_retry to confirm configuration or retry up to configurable attempts
|
||||
- Detect agent disconnects (VM reboots) and keep running to reconfigure on reconnect; add reconnect and periodic health checks
|
||||
|
||||
## 2026-01-09 - 0.4.1 - fix(release-upload)
|
||||
clear progress timer on upload completion/error and add periodic upload progress reporting
|
||||
|
||||
- Clear the progress interval on response end and on stream/error to avoid leaking timers.
|
||||
- Track bytesWritten (header + stream chunks + footer) to compute accurate progress percentages.
|
||||
- Log upload progress (percent and MB) every 10 seconds for visibility.
|
||||
- Handle stream errors by clearing the progress timer and rejecting with the error.
|
||||
|
||||
## 2026-01-09 - 0.4.0 - feat(displays)
|
||||
add display detection and management (sway) with daemon APIs and UI controls
|
||||
|
||||
- Introduce DisplayInfo type in system-info.ts
|
||||
- Add ProcessManager methods: getDisplays, setDisplayEnabled, setKioskDisplay (invoke swaymsg via runuser)
|
||||
- Add daemon methods to expose getDisplays, setDisplayEnabled and setKioskDisplay with runtime/Wayland context and status checks
|
||||
- Add UI server endpoints: GET /api/displays and POST /api/displays/{name}/(enable|disable|primary) and frontend UI to list and control displays (polling + buttons)
|
||||
- Bump VERSION and package.json to 0.3.9
|
||||
|
||||
## 2026-01-09 - 0.3.8 - fix(ci(release-workflow))
|
||||
use npx tsx to run release-upload.ts in the Gitea release workflow instead of installing tsx globally
|
||||
|
||||
- Removed 'pnpm install -g tsx' to avoid global installs in CI
|
||||
- Replaced direct 'tsx' invocation with 'npx tsx' to run .gitea/release-upload.ts
|
||||
- Reduces CI image footprint and avoids unnecessary global package installation
|
||||
|
||||
## 2026-01-09 - 0.3.7 - fix(daemon)
|
||||
Point updater at the correct repository API (code.foss.global ecobridge.xyz/eco_os) and bump project/daemon versions to 0.3.6
|
||||
|
||||
- Updated repo API URL in ecoos_daemon/ts/daemon/updater.ts from 'https://code.foss.global/api/v1/repos/ecobridge/eco-os/releases' to 'https://code.foss.global/api/v1/repos/ecobridge.xyz/eco_os/releases'
|
||||
- Bumped daemon version in ecoos_daemon/ts/version.ts from 0.3.4 to 0.3.6
|
||||
- Bumped package version in package.json from 0.3.5 to 0.3.6
|
||||
- Included rebuilt daemon binary at isobuild/config/includes.chroot/opt/eco/bin/eco-daemon (bundle updated)
|
||||
|
||||
## 2026-01-09 - 0.3.5 - fix(ci)
|
||||
add Gitea release asset uploader and switch release workflow to use it; bump package and daemon versions to 0.3.4
|
||||
|
||||
- Add .gitea/release-upload.ts: streams assets to Gitea to avoid curl's 2GB multipart limit
|
||||
- Update CI workflow (.gitea/workflows/release.yml) to run the TypeScript uploader via tsx
|
||||
- Bump package.json and ecoos_daemon/ts/version.ts to 0.3.4
|
||||
- Update bundled eco-daemon binary in isobuild/config/includes.chroot/opt/eco/bin/
|
||||
|
||||
## 2026-01-09 - 0.3.2 - fix(release)
|
||||
bump package and daemon to v0.3.1, add project README, and fix Gitea release upload flag
|
||||
|
||||
- package.json version updated from 0.3.0 to 0.3.1
|
||||
- ecoos_daemon/ts/version.ts updated to export VERSION = "0.3.1"
|
||||
- Added comprehensive readme.md documenting the project, development and release workflow
|
||||
- Fix .gitea/workflows/release.yml: use curl -T for uploading release assets instead of --data-binary
|
||||
- Updated bundled eco-daemon binary in isobuild/config/includes.chroot/opt/eco/bin/ (new build artifact)
|
||||
|
||||
## 2026-01-09 - 0.3.0 - feat(daemon)
|
||||
add automatic update mechanism (Updater), switch to system journal logs, and expose update controls in the UI
|
||||
|
||||
- Introduce Updater class: fetches releases from Gitea, computes auto-upgrade eligibility, downloads daemon binary, replaces binary and restarts service.
|
||||
- Integrate updater into EcoDaemon: new methods getUpdateInfo, checkForUpdates, upgradeToVersion; run initial update check on startup and periodic auto-upgrade checks (hourly).
|
||||
- Replace serial console reader with a journalctl-based system journal reader; rename serialLogs → systemLogs and update related logic and limits.
|
||||
- UI/server: add API endpoints /api/updates, /api/updates/check and /api/upgrade; add an Updates panel to show current version, available releases, auto-upgrade status, and client-side actions to check and trigger upgrades; poll update info periodically.
|
||||
- Version bump to 0.2.2 (package.json and ecoos_daemon/ts/version.ts).
|
||||
- Build/workflow changes: release workflow now runs build step (Build ISO) and package.json build script adjusted for CI and updated Docker build/run handling.
|
||||
|
||||
## 2026-01-09 - 0.2.1 - fix(ci)
|
||||
use GitHub Actions workspace for docker volume and add listing of build output directory for debugging
|
||||
|
||||
- Replace $(pwd) with ${{ github.workspace }} in docker run volume mount to work correctly in GitHub Actions runner
|
||||
- Add ls -la .nogit/iso/ to list generated artifacts and aid debugging of release workflow
|
||||
|
||||
## 2026-01-09 - 0.2.0 - feat(daemon)
|
||||
add serial console reader and UI tab for serial logs; add version propagation and CI/release workflows
|
||||
|
||||
|
||||
13
ecoos_daemon/npmextra.json
Normal file
13
ecoos_daemon/npmextra.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"@git.zone/tsbundle": {
|
||||
"bundles": [
|
||||
{
|
||||
"from": "./ts_web/index.ts",
|
||||
"to": "./ts/daemon/bundledui.ts",
|
||||
"bundler": "esbuild",
|
||||
"outputMode": "base64ts",
|
||||
"production": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
17
ecoos_daemon/package.json
Normal file
17
ecoos_daemon/package.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"name": "@ecobridge/ecoos-daemon-ui",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsbundle"
|
||||
},
|
||||
"dependencies": {
|
||||
"@design.estate/dees-catalog": "^3.34.1",
|
||||
"@design.estate/dees-element": "^2.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@git.zone/tsbundle": "^2.8.0",
|
||||
"typescript": "^5.0.0"
|
||||
}
|
||||
}
|
||||
4528
ecoos_daemon/pnpm-lock.yaml
generated
Normal file
4528
ecoos_daemon/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
7
ecoos_daemon/ts/daemon/bundledui.ts
Normal file
7
ecoos_daemon/ts/daemon/bundledui.ts
Normal file
File diff suppressed because one or more lines are too long
@@ -5,7 +5,8 @@
|
||||
*/
|
||||
|
||||
import { ProcessManager } from './process-manager.ts';
|
||||
import { SystemInfo } from './system-info.ts';
|
||||
import { SystemInfo, type DisplayInfo } from './system-info.ts';
|
||||
import { Updater } from './updater.ts';
|
||||
import { UIServer } from '../ui/server.ts';
|
||||
import { runCommand } from '../utils/command.ts';
|
||||
import { VERSION } from '../version.ts';
|
||||
@@ -28,12 +29,14 @@ export class EcoDaemon {
|
||||
private config: DaemonConfig;
|
||||
private processManager: ProcessManager;
|
||||
private systemInfo: SystemInfo;
|
||||
private updater: Updater;
|
||||
private uiServer: UIServer;
|
||||
private logs: string[] = [];
|
||||
private serialLogs: string[] = [];
|
||||
private systemLogs: string[] = [];
|
||||
private swayStatus: ServiceStatus = { state: 'stopped' };
|
||||
private chromiumStatus: ServiceStatus = { state: 'stopped' };
|
||||
private manualRestartUntil: number = 0; // Timestamp until which auto-restart is disabled
|
||||
private lastAutoUpgradeCheck: number = 0; // Timestamp of last auto-upgrade check
|
||||
|
||||
constructor(config?: Partial<DaemonConfig>) {
|
||||
this.config = {
|
||||
@@ -45,6 +48,7 @@ export class EcoDaemon {
|
||||
|
||||
this.processManager = new ProcessManager(this.config.user);
|
||||
this.systemInfo = new SystemInfo();
|
||||
this.updater = new Updater((msg) => this.log(msg));
|
||||
this.uiServer = new UIServer(this.config.uiPort, this);
|
||||
}
|
||||
|
||||
@@ -64,8 +68,8 @@ export class EcoDaemon {
|
||||
return [...this.logs];
|
||||
}
|
||||
|
||||
getSerialLogs(): string[] {
|
||||
return [...this.serialLogs];
|
||||
getSystemLogs(): string[] {
|
||||
return [...this.systemLogs];
|
||||
}
|
||||
|
||||
async getStatus(): Promise<Record<string, unknown>> {
|
||||
@@ -78,7 +82,7 @@ export class EcoDaemon {
|
||||
chromiumStatus: this.chromiumStatus,
|
||||
systemInfo,
|
||||
logs: this.logs.slice(-50),
|
||||
serialLogs: this.serialLogs.slice(-50),
|
||||
systemLogs: this.systemLogs.slice(-50),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -131,6 +135,60 @@ export class EcoDaemon {
|
||||
}
|
||||
}
|
||||
|
||||
async getUpdateInfo(): Promise<unknown> {
|
||||
return this.updater.getUpdateInfo();
|
||||
}
|
||||
|
||||
async checkForUpdates(): Promise<void> {
|
||||
await this.updater.checkForUpdates();
|
||||
}
|
||||
|
||||
async upgradeToVersion(version: string): Promise<{ success: boolean; message: string }> {
|
||||
return this.updater.upgradeToVersion(version);
|
||||
}
|
||||
|
||||
async getDisplays(): Promise<DisplayInfo[]> {
|
||||
if (this.swayStatus.state !== 'running') {
|
||||
this.log(`[displays] Sway not running (state: ${this.swayStatus.state}), skipping display query`);
|
||||
return [];
|
||||
}
|
||||
const uid = await this.getUserUid();
|
||||
return this.processManager.getDisplays({
|
||||
runtimeDir: `/run/user/${uid}`,
|
||||
waylandDisplay: this.config.waylandDisplay,
|
||||
});
|
||||
}
|
||||
|
||||
async setDisplayEnabled(name: string, enabled: boolean): Promise<{ success: boolean; message: string }> {
|
||||
if (this.swayStatus.state !== 'running') {
|
||||
return { success: false, message: 'Sway is not running' };
|
||||
}
|
||||
this.log(`${enabled ? 'Enabling' : 'Disabling'} display ${name}`);
|
||||
const uid = await this.getUserUid();
|
||||
const result = await this.processManager.setDisplayEnabled(
|
||||
{ runtimeDir: `/run/user/${uid}`, waylandDisplay: this.config.waylandDisplay },
|
||||
name,
|
||||
enabled
|
||||
);
|
||||
return { success: result, message: result ? `Display ${name} ${enabled ? 'enabled' : 'disabled'}` : 'Failed' };
|
||||
}
|
||||
|
||||
async setKioskDisplay(name: string): Promise<{ success: boolean; message: string }> {
|
||||
if (this.swayStatus.state !== 'running') {
|
||||
return { success: false, message: 'Sway is not running' };
|
||||
}
|
||||
if (this.chromiumStatus.state !== 'running') {
|
||||
return { success: false, message: 'Chromium is not running' };
|
||||
}
|
||||
this.log(`Moving kiosk to display ${name}`);
|
||||
const uid = await this.getUserUid();
|
||||
const result = await this.processManager.setKioskDisplay(
|
||||
{ runtimeDir: `/run/user/${uid}`, waylandDisplay: this.config.waylandDisplay },
|
||||
name
|
||||
);
|
||||
return { success: result, message: result ? `Kiosk moved to ${name}` : 'Failed' };
|
||||
}
|
||||
|
||||
async start(): Promise<void> {
|
||||
this.log('EcoOS Daemon starting...');
|
||||
|
||||
@@ -139,8 +197,11 @@ export class EcoDaemon {
|
||||
await this.uiServer.start();
|
||||
this.log('Management UI started successfully');
|
||||
|
||||
// Start serial console reader in the background
|
||||
this.startSerialReader();
|
||||
// Start system journal reader in the background
|
||||
this.startJournalReader();
|
||||
|
||||
// Check for updates on startup
|
||||
this.updater.checkForUpdates().catch((e) => this.log(`Initial update check failed: ${e}`));
|
||||
|
||||
// Start the Sway/Chromium initialization in the background
|
||||
// This allows the UI server to remain responsive even if Sway fails
|
||||
@@ -244,12 +305,13 @@ export class EcoDaemon {
|
||||
|
||||
private async startSwayWithMode(mode: 'drm' | 'headless'): Promise<void> {
|
||||
const uid = await this.getUserUid();
|
||||
|
||||
// Ensure XDG_RUNTIME_DIR exists
|
||||
const gid = await this.getUserGid();
|
||||
const runtimeDir = `/run/user/${uid}`;
|
||||
await runCommand('mkdir', ['-p', runtimeDir]);
|
||||
await runCommand('chown', [`${this.config.user}:${this.config.user}`, runtimeDir]);
|
||||
await runCommand('chmod', ['700', runtimeDir]);
|
||||
|
||||
// Ensure XDG_RUNTIME_DIR exists as a proper tmpfs mount
|
||||
// This is critical - if Sway creates sockets before the tmpfs is mounted,
|
||||
// they become hidden when systemd-logind mounts the tmpfs later
|
||||
await this.ensureRuntimeDirTmpfs(runtimeDir, uid, gid);
|
||||
|
||||
if (mode === 'drm') {
|
||||
this.log('Starting Sway with DRM backend (hardware rendering)');
|
||||
@@ -313,12 +375,69 @@ export class EcoDaemon {
|
||||
return parseInt(result.stdout.trim(), 10);
|
||||
}
|
||||
|
||||
private startSerialReader(): void {
|
||||
private async getUserGid(): Promise<number> {
|
||||
const result = await runCommand('id', ['-g', this.config.user]);
|
||||
if (!result.success) {
|
||||
throw new Error('Failed to get user GID: ' + result.stderr);
|
||||
}
|
||||
return parseInt(result.stdout.trim(), 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the user runtime directory exists as a proper tmpfs mount.
|
||||
* This prevents race conditions where Sway creates sockets before
|
||||
* systemd-logind mounts the tmpfs, causing sockets to be hidden.
|
||||
*/
|
||||
private async ensureRuntimeDirTmpfs(runtimeDir: string, uid: number, gid: number): Promise<void> {
|
||||
// Check if runtime dir is already a tmpfs mount
|
||||
const mountCheck = await runCommand('findmnt', ['-n', '-o', 'FSTYPE', runtimeDir]);
|
||||
if (mountCheck.success && mountCheck.stdout.trim() === 'tmpfs') {
|
||||
this.log(`Runtime directory ${runtimeDir} is already a tmpfs mount`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
await runCommand('mkdir', ['-p', runtimeDir]);
|
||||
|
||||
// Mount a tmpfs if not already mounted
|
||||
this.log(`Mounting tmpfs on ${runtimeDir}`);
|
||||
const mountResult = await runCommand('mount', [
|
||||
'-t', 'tmpfs',
|
||||
'-o', `mode=700,uid=${uid},gid=${gid},size=100M`,
|
||||
'tmpfs',
|
||||
runtimeDir
|
||||
]);
|
||||
|
||||
if (!mountResult.success) {
|
||||
// If mount fails, maybe it's already mounted by systemd-logind
|
||||
// Double-check and continue if it's now a tmpfs
|
||||
const recheckMount = await runCommand('findmnt', ['-n', '-o', 'FSTYPE', runtimeDir]);
|
||||
if (recheckMount.success && recheckMount.stdout.trim() === 'tmpfs') {
|
||||
this.log(`Runtime directory ${runtimeDir} was mounted by another process`);
|
||||
return;
|
||||
}
|
||||
this.log(`Warning: Failed to mount tmpfs on ${runtimeDir}: ${mountResult.stderr}`);
|
||||
// Fall back to just ensuring the directory exists with correct permissions
|
||||
await runCommand('chown', [`${uid}:${gid}`, runtimeDir]);
|
||||
await runCommand('chmod', ['700', runtimeDir]);
|
||||
} else {
|
||||
this.log(`Successfully mounted tmpfs on ${runtimeDir}`);
|
||||
}
|
||||
}
|
||||
|
||||
private startJournalReader(): void {
|
||||
(async () => {
|
||||
try {
|
||||
const file = await Deno.open('/dev/ttyS0', { read: true });
|
||||
this.log('Serial console reader started on /dev/ttyS0');
|
||||
const reader = file.readable.getReader();
|
||||
const cmd = new Deno.Command('journalctl', {
|
||||
args: ['-f', '--no-pager', '-n', '100', '-o', 'short-iso'],
|
||||
stdout: 'piped',
|
||||
stderr: 'piped',
|
||||
});
|
||||
|
||||
const process = cmd.spawn();
|
||||
this.log('System journal reader started');
|
||||
|
||||
const reader = process.stdout.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
while (true) {
|
||||
@@ -326,14 +445,14 @@ export class EcoDaemon {
|
||||
if (done) break;
|
||||
const text = decoder.decode(value);
|
||||
for (const line of text.split('\n').filter((l) => l.trim())) {
|
||||
this.serialLogs.push(line);
|
||||
if (this.serialLogs.length > 1000) {
|
||||
this.serialLogs = this.serialLogs.slice(-1000);
|
||||
this.systemLogs.push(line);
|
||||
if (this.systemLogs.length > 1000) {
|
||||
this.systemLogs = this.systemLogs.slice(-1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
this.log(`Serial reader not available: ${error}`);
|
||||
this.log(`Journal reader not available: ${error}`);
|
||||
}
|
||||
})();
|
||||
}
|
||||
@@ -383,6 +502,16 @@ export class EcoDaemon {
|
||||
await this.tryStartSwayAndChromium();
|
||||
}
|
||||
}
|
||||
|
||||
// Check for auto-upgrades every hour
|
||||
const now = Date.now();
|
||||
const oneHour = 60 * 60 * 1000;
|
||||
if (now - this.lastAutoUpgradeCheck > oneHour) {
|
||||
this.lastAutoUpgradeCheck = now;
|
||||
this.updater.checkAutoUpgrade().catch((e) =>
|
||||
this.log(`Auto-upgrade check failed: ${e}`)
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
this.log(`Error in monitoring loop: ${error}`);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
import { runCommand } from '../utils/command.ts';
|
||||
import type { DisplayInfo } from './system-info.ts';
|
||||
|
||||
export interface SwayConfig {
|
||||
runtimeDir: string;
|
||||
@@ -27,11 +28,39 @@ export class ProcessManager {
|
||||
private user: string;
|
||||
private swayProcess: Deno.ChildProcess | null = null;
|
||||
private browserProcess: Deno.ChildProcess | null = null;
|
||||
private swaySocket: string | null = null;
|
||||
|
||||
constructor(user: string) {
|
||||
this.user = user;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the Sway IPC socket path in the runtime directory
|
||||
* Sway creates sockets like: sway-ipc.$UID.$PID.sock
|
||||
*/
|
||||
async findSwaySocket(runtimeDir: string): Promise<string | null> {
|
||||
try {
|
||||
for await (const entry of Deno.readDir(runtimeDir)) {
|
||||
if (entry.name.startsWith('sway-ipc.') && entry.name.endsWith('.sock')) {
|
||||
const socketPath = `${runtimeDir}/${entry.name}`;
|
||||
console.log(`[sway] Found IPC socket: ${socketPath}`);
|
||||
return socketPath;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[sway] Error finding socket: ${error}`);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
getSwaySocket(): string | null {
|
||||
return this.swaySocket;
|
||||
}
|
||||
|
||||
setSwaySocket(socket: string | null): void {
|
||||
this.swaySocket = socket;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate Sway configuration content for kiosk mode
|
||||
*/
|
||||
@@ -102,9 +131,14 @@ for_window [app_id="chromium-browser"] fullscreen enable
|
||||
// Write sway config before starting
|
||||
const configPath = await this.writeSwayConfig(config);
|
||||
|
||||
// Use a fixed socket path so we can reliably connect
|
||||
const swaySocketPath = `${config.runtimeDir}/sway-ipc.sock`;
|
||||
this.swaySocket = swaySocketPath;
|
||||
|
||||
const env: Record<string, string> = {
|
||||
XDG_RUNTIME_DIR: config.runtimeDir,
|
||||
WLR_BACKENDS: config.backends,
|
||||
SWAYSOCK: swaySocketPath,
|
||||
};
|
||||
|
||||
if (config.allowSoftwareRendering) {
|
||||
@@ -145,9 +179,19 @@ for_window [app_id="chromium-browser"] fullscreen enable
|
||||
* Run a swaymsg command to control Sway
|
||||
*/
|
||||
async swaymsg(config: { runtimeDir: string; waylandDisplay: string }, command: string): Promise<boolean> {
|
||||
// Find socket if not already found
|
||||
if (!this.swaySocket) {
|
||||
this.swaySocket = await this.findSwaySocket(config.runtimeDir);
|
||||
}
|
||||
|
||||
if (!this.swaySocket) {
|
||||
console.error('[swaymsg] No Sway IPC socket found');
|
||||
return false;
|
||||
}
|
||||
|
||||
const env: Record<string, string> = {
|
||||
XDG_RUNTIME_DIR: config.runtimeDir,
|
||||
WAYLAND_DISPLAY: config.waylandDisplay,
|
||||
SWAYSOCK: this.swaySocket,
|
||||
};
|
||||
|
||||
const envString = Object.entries(env)
|
||||
@@ -291,6 +335,7 @@ for_window [app_id="chromium-browser"] fullscreen enable
|
||||
// Process may already be dead
|
||||
}
|
||||
this.swayProcess = null;
|
||||
this.swaySocket = null; // Reset socket so we find new one on restart
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +351,112 @@ for_window [app_id="chromium-browser"] fullscreen enable
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connected displays via swaymsg
|
||||
*/
|
||||
async getDisplays(config: { runtimeDir: string; waylandDisplay: string }): Promise<DisplayInfo[]> {
|
||||
// Find socket if not already found
|
||||
if (!this.swaySocket) {
|
||||
this.swaySocket = await this.findSwaySocket(config.runtimeDir);
|
||||
}
|
||||
|
||||
if (!this.swaySocket) {
|
||||
console.error('[displays] No Sway IPC socket found');
|
||||
return [];
|
||||
}
|
||||
|
||||
const env: Record<string, string> = {
|
||||
XDG_RUNTIME_DIR: config.runtimeDir,
|
||||
SWAYSOCK: this.swaySocket,
|
||||
};
|
||||
|
||||
const envString = Object.entries(env)
|
||||
.map(([k, v]) => `${k}=${v}`)
|
||||
.join(' ');
|
||||
|
||||
const cmd = new Deno.Command('runuser', {
|
||||
args: ['-u', this.user, '--', 'sh', '-c', `${envString} swaymsg -t get_outputs`],
|
||||
stdout: 'piped',
|
||||
stderr: 'piped',
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await cmd.output();
|
||||
if (!result.success) {
|
||||
const stderr = new TextDecoder().decode(result.stderr);
|
||||
console.error(`[displays] Failed to get outputs: ${stderr}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
const outputs = JSON.parse(new TextDecoder().decode(result.stdout));
|
||||
return outputs.map((output: {
|
||||
name: string;
|
||||
make: string;
|
||||
model: string;
|
||||
serial: string;
|
||||
active: boolean;
|
||||
current_mode?: { width: number; height: number; refresh: number };
|
||||
focused: boolean;
|
||||
}) => ({
|
||||
name: output.name,
|
||||
make: output.make || 'Unknown',
|
||||
model: output.model || 'Unknown',
|
||||
serial: output.serial || '',
|
||||
active: output.active,
|
||||
width: output.current_mode?.width || 0,
|
||||
height: output.current_mode?.height || 0,
|
||||
refreshRate: Math.round((output.current_mode?.refresh || 0) / 1000),
|
||||
isPrimary: output.focused,
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error(`[displays] Error: ${error}`);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable or disable a display
|
||||
*/
|
||||
async setDisplayEnabled(
|
||||
config: { runtimeDir: string; waylandDisplay: string },
|
||||
name: string,
|
||||
enabled: boolean
|
||||
): Promise<boolean> {
|
||||
if (enabled) {
|
||||
console.log(`[displays] Enabling ${name}`);
|
||||
// First try to set resolution, then enable
|
||||
await this.swaymsg(config, `output ${name} resolution 1920x1080`);
|
||||
return this.swaymsg(config, `output ${name} enable`);
|
||||
} else {
|
||||
console.log(`[displays] Disabling ${name}`);
|
||||
return this.swaymsg(config, `output ${name} disable`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move the kiosk browser to a specific display
|
||||
*/
|
||||
async setKioskDisplay(
|
||||
config: { runtimeDir: string; waylandDisplay: string },
|
||||
name: string
|
||||
): Promise<boolean> {
|
||||
console.log(`[displays] Setting primary display to ${name}`);
|
||||
|
||||
// Focus the chromium window and move it to the target output
|
||||
const commands = [
|
||||
`[app_id="chromium-browser"] focus`,
|
||||
`move container to output ${name}`,
|
||||
`focus output ${name}`,
|
||||
`[app_id="chromium-browser"] fullscreen enable`,
|
||||
];
|
||||
|
||||
for (const cmd of commands) {
|
||||
await this.swaymsg(config, cmd);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private async pipeOutput(
|
||||
process: Deno.ChildProcess,
|
||||
name: string
|
||||
@@ -337,6 +488,7 @@ for_window [app_id="chromium-browser"] fullscreen enable
|
||||
console.log(`[${name}] Process exited with code ${status.code}`);
|
||||
if (name === 'sway' && this.swayProcess === process) {
|
||||
this.swayProcess = null;
|
||||
this.swaySocket = null; // Reset socket so we find new one on restart
|
||||
} else if (name === 'chromium' && this.browserProcess === process) {
|
||||
this.browserProcess = null;
|
||||
}
|
||||
|
||||
@@ -52,6 +52,18 @@ export interface AudioDevice {
|
||||
isDefault: boolean;
|
||||
}
|
||||
|
||||
export interface DisplayInfo {
|
||||
name: string; // e.g., "DP-1", "HDMI-A-1", "HEADLESS-1"
|
||||
make: string; // Manufacturer
|
||||
model: string; // Model name
|
||||
serial: string; // Serial number
|
||||
active: boolean; // Currently enabled
|
||||
width: number; // Resolution width
|
||||
height: number; // Resolution height
|
||||
refreshRate: number; // Hz
|
||||
isPrimary: boolean; // Has the focused window (kiosk)
|
||||
}
|
||||
|
||||
export interface SystemInfoData {
|
||||
hostname: string;
|
||||
cpu: CpuInfo;
|
||||
@@ -66,6 +78,8 @@ export interface SystemInfoData {
|
||||
}
|
||||
|
||||
export class SystemInfo {
|
||||
private lastCpuStats: { total: number; idle: number } | null = null;
|
||||
|
||||
async getInfo(): Promise<SystemInfoData> {
|
||||
const [hostname, cpu, memory, disks, network, gpu, uptime, inputDevices, speakers, microphones] =
|
||||
await Promise.all([
|
||||
@@ -100,13 +114,23 @@ export class SystemInfo {
|
||||
const modelMatch = cpuinfo.match(/model name\s*:\s*(.+)/);
|
||||
const coreMatches = cpuinfo.match(/processor\s*:/g);
|
||||
|
||||
// Get CPU usage from /proc/stat
|
||||
// Get CPU usage from /proc/stat (delta between readings)
|
||||
const stat = await Deno.readTextFile('/proc/stat');
|
||||
const cpuLine = stat.split('\n')[0];
|
||||
const values = cpuLine.split(/\s+/).slice(1).map(Number);
|
||||
const total = values.reduce((a, b) => a + b, 0);
|
||||
const idle = values[3];
|
||||
const usage = ((total - idle) / total) * 100;
|
||||
const idle = values[3] + values[4]; // idle + iowait
|
||||
|
||||
let usage = 0;
|
||||
if (this.lastCpuStats) {
|
||||
const totalDelta = total - this.lastCpuStats.total;
|
||||
const idleDelta = idle - this.lastCpuStats.idle;
|
||||
if (totalDelta > 0) {
|
||||
usage = ((totalDelta - idleDelta) / totalDelta) * 100;
|
||||
}
|
||||
}
|
||||
|
||||
this.lastCpuStats = { total, idle };
|
||||
|
||||
return {
|
||||
model: modelMatch ? modelMatch[1] : 'Unknown',
|
||||
|
||||
270
ecoos_daemon/ts/daemon/updater.ts
Normal file
270
ecoos_daemon/ts/daemon/updater.ts
Normal file
@@ -0,0 +1,270 @@
|
||||
/**
|
||||
* Updater
|
||||
*
|
||||
* Handles checking for updates, downloading new versions, and performing upgrades
|
||||
*/
|
||||
|
||||
import { VERSION } from '../version.ts';
|
||||
import { runCommand } from '../utils/command.ts';
|
||||
|
||||
export interface Release {
|
||||
version: string;
|
||||
tagName: string;
|
||||
publishedAt: Date;
|
||||
downloadUrl: string;
|
||||
isCurrent: boolean;
|
||||
isNewer: boolean;
|
||||
ageHours: number;
|
||||
}
|
||||
|
||||
export interface AutoUpgradeStatus {
|
||||
enabled: boolean;
|
||||
targetVersion: string | null;
|
||||
scheduledIn: string | null;
|
||||
waitingForStability: boolean;
|
||||
}
|
||||
|
||||
export interface UpdateInfo {
|
||||
currentVersion: string;
|
||||
releases: Release[];
|
||||
autoUpgrade: AutoUpgradeStatus;
|
||||
lastCheck: string | null;
|
||||
}
|
||||
|
||||
interface GiteaRelease {
|
||||
id: number;
|
||||
tag_name: string;
|
||||
name: string;
|
||||
body: string;
|
||||
published_at: string;
|
||||
assets: GiteaAsset[];
|
||||
}
|
||||
|
||||
interface GiteaAsset {
|
||||
id: number;
|
||||
name: string;
|
||||
browser_download_url: string;
|
||||
size: number;
|
||||
}
|
||||
|
||||
export class Updater {
|
||||
private repoApiUrl = 'https://code.foss.global/api/v1/repos/ecobridge.xyz/eco_os/releases';
|
||||
private binaryPath = '/opt/eco/bin/eco-daemon';
|
||||
private releases: Release[] = [];
|
||||
private lastCheck: Date | null = null;
|
||||
private logFn: (msg: string) => void;
|
||||
|
||||
constructor(logFn: (msg: string) => void) {
|
||||
this.logFn = logFn;
|
||||
}
|
||||
|
||||
private log(message: string): void {
|
||||
this.logFn(`[Updater] ${message}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare semantic versions
|
||||
* Returns: -1 if a < b, 0 if a == b, 1 if a > b
|
||||
*/
|
||||
private compareVersions(a: string, b: string): number {
|
||||
const partsA = a.replace(/^v/, '').split('.').map(Number);
|
||||
const partsB = b.replace(/^v/, '').split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(partsA.length, partsB.length); i++) {
|
||||
const numA = partsA[i] || 0;
|
||||
const numB = partsB[i] || 0;
|
||||
if (numA < numB) return -1;
|
||||
if (numA > numB) return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch available releases from Gitea
|
||||
*/
|
||||
async checkForUpdates(): Promise<Release[]> {
|
||||
this.log('Checking for updates...');
|
||||
|
||||
try {
|
||||
const response = await fetch(this.repoApiUrl);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const giteaReleases: GiteaRelease[] = await response.json();
|
||||
const currentVersion = VERSION;
|
||||
const now = new Date();
|
||||
|
||||
this.releases = giteaReleases
|
||||
.filter((r) => r.tag_name.startsWith('v'))
|
||||
.map((r) => {
|
||||
const version = r.tag_name.replace(/^v/, '');
|
||||
const publishedAt = new Date(r.published_at);
|
||||
const ageMs = now.getTime() - publishedAt.getTime();
|
||||
const ageHours = ageMs / (1000 * 60 * 60);
|
||||
|
||||
// Find the daemon binary asset
|
||||
const daemonAsset = r.assets.find((a) =>
|
||||
a.name.includes('eco-daemon')
|
||||
);
|
||||
|
||||
return {
|
||||
version,
|
||||
tagName: r.tag_name,
|
||||
publishedAt,
|
||||
downloadUrl: daemonAsset?.browser_download_url || '',
|
||||
isCurrent: version === currentVersion,
|
||||
isNewer: this.compareVersions(version, currentVersion) > 0,
|
||||
ageHours: Math.round(ageHours * 10) / 10,
|
||||
};
|
||||
})
|
||||
.filter((r) => r.downloadUrl) // Only include releases with daemon binary
|
||||
.sort((a, b) => this.compareVersions(b.version, a.version)); // Newest first
|
||||
|
||||
this.lastCheck = now;
|
||||
this.log(`Found ${this.releases.length} releases, ${this.releases.filter((r) => r.isNewer).length} newer than current`);
|
||||
|
||||
return this.releases;
|
||||
} catch (error) {
|
||||
this.log(`Failed to check for updates: ${error}`);
|
||||
return this.releases;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached releases (call checkForUpdates first)
|
||||
*/
|
||||
getReleases(): Release[] {
|
||||
return this.releases;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if auto-upgrade should happen and to which version
|
||||
*/
|
||||
getAutoUpgradeStatus(): AutoUpgradeStatus {
|
||||
const newerReleases = this.releases.filter((r) => r.isNewer);
|
||||
|
||||
if (newerReleases.length === 0) {
|
||||
return {
|
||||
enabled: true,
|
||||
targetVersion: null,
|
||||
scheduledIn: null,
|
||||
waitingForStability: false,
|
||||
};
|
||||
}
|
||||
|
||||
// Find the latest newer release
|
||||
const latest = newerReleases[0];
|
||||
const hoursUntilUpgrade = 24 - latest.ageHours;
|
||||
|
||||
if (hoursUntilUpgrade <= 0) {
|
||||
// Ready to upgrade now
|
||||
return {
|
||||
enabled: true,
|
||||
targetVersion: latest.version,
|
||||
scheduledIn: 'now',
|
||||
waitingForStability: false,
|
||||
};
|
||||
}
|
||||
|
||||
// Still waiting for stability period
|
||||
const hours = Math.floor(hoursUntilUpgrade);
|
||||
const minutes = Math.round((hoursUntilUpgrade - hours) * 60);
|
||||
const scheduledIn = hours > 0 ? `${hours}h ${minutes}m` : `${minutes}m`;
|
||||
|
||||
return {
|
||||
enabled: true,
|
||||
targetVersion: latest.version,
|
||||
scheduledIn,
|
||||
waitingForStability: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get full update info for API response
|
||||
*/
|
||||
getUpdateInfo(): UpdateInfo {
|
||||
return {
|
||||
currentVersion: VERSION,
|
||||
releases: this.releases,
|
||||
autoUpgrade: this.getAutoUpgradeStatus(),
|
||||
lastCheck: this.lastCheck?.toISOString() || null,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Download and install a specific version
|
||||
*/
|
||||
async upgradeToVersion(version: string): Promise<{ success: boolean; message: string }> {
|
||||
const release = this.releases.find((r) => r.version === version);
|
||||
|
||||
if (!release) {
|
||||
return { success: false, message: `Version ${version} not found` };
|
||||
}
|
||||
|
||||
if (release.isCurrent) {
|
||||
return { success: false, message: `Already running version ${version}` };
|
||||
}
|
||||
|
||||
this.log(`Starting upgrade to version ${version}...`);
|
||||
|
||||
try {
|
||||
// Download new binary
|
||||
const tempPath = '/tmp/eco-daemon-new';
|
||||
this.log(`Downloading from ${release.downloadUrl}...`);
|
||||
|
||||
const response = await fetch(release.downloadUrl);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Download failed: HTTP ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.arrayBuffer();
|
||||
await Deno.writeFile(tempPath, new Uint8Array(data));
|
||||
|
||||
// Verify download
|
||||
const stat = await Deno.stat(tempPath);
|
||||
if (stat.size < 1000000) {
|
||||
// Daemon should be at least 1MB
|
||||
throw new Error(`Downloaded file too small: ${stat.size} bytes`);
|
||||
}
|
||||
this.log(`Downloaded ${stat.size} bytes`);
|
||||
|
||||
// Make executable
|
||||
await Deno.chmod(tempPath, 0o755);
|
||||
|
||||
// Replace binary
|
||||
this.log('Replacing binary...');
|
||||
await runCommand('mv', [tempPath, this.binaryPath]);
|
||||
await Deno.chmod(this.binaryPath, 0o755);
|
||||
|
||||
// Restart daemon via systemd
|
||||
this.log('Restarting daemon...');
|
||||
// Use spawn to avoid waiting for the restart
|
||||
const restartCmd = new Deno.Command('systemctl', {
|
||||
args: ['restart', 'eco-daemon'],
|
||||
stdout: 'null',
|
||||
stderr: 'null',
|
||||
});
|
||||
restartCmd.spawn();
|
||||
|
||||
return { success: true, message: `Upgrading to v${version}...` };
|
||||
} catch (error) {
|
||||
this.log(`Upgrade failed: ${error}`);
|
||||
return { success: false, message: String(error) };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and perform auto-upgrade if conditions are met
|
||||
*/
|
||||
async checkAutoUpgrade(): Promise<void> {
|
||||
await this.checkForUpdates();
|
||||
|
||||
const status = this.getAutoUpgradeStatus();
|
||||
|
||||
if (status.targetVersion && status.scheduledIn === 'now') {
|
||||
this.log(`Auto-upgrading to version ${status.targetVersion}...`);
|
||||
await this.upgradeToVersion(status.targetVersion);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
import type { EcoDaemon } from '../daemon/index.ts';
|
||||
import { VERSION } from '../version.ts';
|
||||
import { files as bundledFiles } from '../daemon/bundledui.ts';
|
||||
|
||||
export class UIServer {
|
||||
private port: number;
|
||||
@@ -43,6 +44,11 @@ export class UIServer {
|
||||
return this.serveHtml();
|
||||
}
|
||||
|
||||
// Bundled JavaScript
|
||||
if (path === '/app.js') {
|
||||
return this.serveAppJs();
|
||||
}
|
||||
|
||||
return new Response('Not Found', { status: 404 });
|
||||
}
|
||||
|
||||
@@ -104,532 +110,101 @@ export class UIServer {
|
||||
return new Response(JSON.stringify(result), { headers });
|
||||
}
|
||||
|
||||
if (path === '/api/updates') {
|
||||
const updates = await this.daemon.getUpdateInfo();
|
||||
return new Response(JSON.stringify(updates), { headers });
|
||||
}
|
||||
|
||||
if (path === '/api/updates/check' && req.method === 'POST') {
|
||||
await this.daemon.checkForUpdates();
|
||||
const updates = await this.daemon.getUpdateInfo();
|
||||
return new Response(JSON.stringify(updates), { headers });
|
||||
}
|
||||
|
||||
if (path === '/api/upgrade' && req.method === 'POST') {
|
||||
try {
|
||||
const body = await req.json();
|
||||
const version = body.version;
|
||||
if (!version) {
|
||||
return new Response(JSON.stringify({ success: false, message: 'Version required' }), { headers });
|
||||
}
|
||||
const result = await this.daemon.upgradeToVersion(version);
|
||||
return new Response(JSON.stringify(result), { headers });
|
||||
} catch (error) {
|
||||
return new Response(JSON.stringify({ success: false, message: String(error) }), { headers });
|
||||
}
|
||||
}
|
||||
|
||||
if (path === '/api/displays') {
|
||||
const displays = await this.daemon.getDisplays();
|
||||
return new Response(JSON.stringify({ displays }), { headers });
|
||||
}
|
||||
|
||||
// Display control endpoints: /api/displays/{name}/{action}
|
||||
const displayMatch = path.match(/^\/api\/displays\/([^/]+)\/(enable|disable|primary)$/);
|
||||
if (displayMatch && req.method === 'POST') {
|
||||
const name = decodeURIComponent(displayMatch[1]);
|
||||
const action = displayMatch[2];
|
||||
|
||||
let result;
|
||||
if (action === 'enable') {
|
||||
result = await this.daemon.setDisplayEnabled(name, true);
|
||||
} else if (action === 'disable') {
|
||||
result = await this.daemon.setDisplayEnabled(name, false);
|
||||
} else if (action === 'primary') {
|
||||
result = await this.daemon.setKioskDisplay(name);
|
||||
}
|
||||
return new Response(JSON.stringify(result), { headers });
|
||||
}
|
||||
|
||||
return new Response(JSON.stringify({ error: 'Not Found' }), {
|
||||
status: 404,
|
||||
headers,
|
||||
});
|
||||
}
|
||||
|
||||
private serveAppJs(): Response {
|
||||
// Find the bundle.js file in the bundled content
|
||||
const bundleFile = bundledFiles.find(f => f.path === 'bundle.js');
|
||||
if (!bundleFile) {
|
||||
return new Response('Bundle not found', { status: 500 });
|
||||
}
|
||||
|
||||
// Decode base64 content
|
||||
const jsContent = atob(bundleFile.contentBase64);
|
||||
|
||||
return new Response(jsContent, {
|
||||
headers: {
|
||||
'Content-Type': 'application/javascript; charset=utf-8',
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
private serveHtml(): Response {
|
||||
const html = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>EcoOS Management</title>
|
||||
<title>EcoOS Management v${VERSION}</title>
|
||||
<style>
|
||||
:root {
|
||||
--bg: #0a0a0a;
|
||||
--card: #141414;
|
||||
--border: #2a2a2a;
|
||||
--text: #e0e0e0;
|
||||
--text-dim: #888;
|
||||
--accent: #3b82f6;
|
||||
--success: #22c55e;
|
||||
--warning: #f59e0b;
|
||||
--error: #ef4444;
|
||||
}
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
background: var(--bg);
|
||||
color: var(--text);
|
||||
min-height: 100vh;
|
||||
padding: 20px;
|
||||
}
|
||||
.container { max-width: 1200px; margin: 0 auto; }
|
||||
.header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
h1 { font-size: 24px; margin: 0; }
|
||||
.clock {
|
||||
font-size: 18px;
|
||||
font-weight: 500;
|
||||
color: var(--text);
|
||||
font-variant-numeric: tabular-nums;
|
||||
}
|
||||
.grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
||||
gap: 20px;
|
||||
}
|
||||
.card {
|
||||
background: var(--card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 8px;
|
||||
padding: 16px;
|
||||
}
|
||||
.card h2 {
|
||||
font-size: 14px;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.stat { margin-bottom: 8px; }
|
||||
.stat-label { color: var(--text-dim); font-size: 12px; }
|
||||
.stat-value { font-size: 18px; font-weight: 600; }
|
||||
.progress-bar {
|
||||
background: var(--border);
|
||||
height: 6px;
|
||||
border-radius: 3px;
|
||||
margin-top: 4px;
|
||||
html, body {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
overflow: hidden;
|
||||
}
|
||||
.progress-fill {
|
||||
ecoos-app {
|
||||
display: block;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background: var(--accent);
|
||||
border-radius: 3px;
|
||||
transition: width 0.3s;
|
||||
}
|
||||
.logs {
|
||||
height: 300px;
|
||||
overflow-y: auto;
|
||||
font-family: 'SF Mono', Monaco, monospace;
|
||||
font-size: 12px;
|
||||
line-height: 1.6;
|
||||
background: #0d0d0d;
|
||||
padding: 12px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.log-entry { white-space: pre-wrap; word-break: break-all; }
|
||||
.status-dot {
|
||||
display: inline-block;
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.status-dot.running { background: var(--success); }
|
||||
.status-dot.stopped { background: var(--error); }
|
||||
.network-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
padding: 8px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.network-item:last-child { border-bottom: none; }
|
||||
.btn {
|
||||
padding: 10px 16px;
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
font-size: 14px;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
transition: opacity 0.2s;
|
||||
margin-right: 8px;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
.btn:hover { opacity: 0.85; }
|
||||
.btn:disabled { opacity: 0.5; cursor: not-allowed; }
|
||||
.btn-primary { background: var(--accent); color: white; }
|
||||
.btn-danger { background: var(--error); color: white; }
|
||||
.device-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 8px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.device-item:last-child { border-bottom: none; }
|
||||
.device-name { font-weight: 500; }
|
||||
.device-type {
|
||||
font-size: 11px;
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
background: var(--border);
|
||||
color: var(--text-dim);
|
||||
}
|
||||
.device-default {
|
||||
font-size: 11px;
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
background: var(--success);
|
||||
color: white;
|
||||
}
|
||||
.tabs {
|
||||
display: flex;
|
||||
border-bottom: 1px solid var(--border);
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.tab {
|
||||
padding: 8px 16px;
|
||||
cursor: pointer;
|
||||
color: var(--text-dim);
|
||||
border-bottom: 2px solid transparent;
|
||||
margin-bottom: -1px;
|
||||
font-size: 12px;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
.tab:hover { color: var(--text); }
|
||||
.tab.active {
|
||||
color: var(--accent);
|
||||
border-bottom-color: var(--accent);
|
||||
}
|
||||
.tab-content { display: none; }
|
||||
.tab-content.active { display: block; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h1>EcoOS Management <span style="font-size: 12px; color: var(--text-dim); font-weight: normal;">v${VERSION}</span></h1>
|
||||
<div class="clock" id="clock"></div>
|
||||
</div>
|
||||
<div class="grid">
|
||||
<div class="card">
|
||||
<h2>Services</h2>
|
||||
<div class="stat">
|
||||
<span class="status-dot" id="sway-status"></span>
|
||||
Sway Compositor
|
||||
</div>
|
||||
<div class="stat">
|
||||
<span class="status-dot" id="chromium-status"></span>
|
||||
Chromium Browser
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>CPU</h2>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Model</div>
|
||||
<div class="stat-value" id="cpu-model">-</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Cores</div>
|
||||
<div class="stat-value" id="cpu-cores">-</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Usage</div>
|
||||
<div class="stat-value" id="cpu-usage">-</div>
|
||||
<div class="progress-bar"><div class="progress-fill" id="cpu-bar"></div></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Memory</h2>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Used / Total</div>
|
||||
<div class="stat-value" id="memory-usage">-</div>
|
||||
<div class="progress-bar"><div class="progress-fill" id="memory-bar"></div></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Network</h2>
|
||||
<div id="network-list"></div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Disks</h2>
|
||||
<div id="disk-list"></div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>System</h2>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Hostname</div>
|
||||
<div class="stat-value" id="hostname">-</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-label">Uptime</div>
|
||||
<div class="stat-value" id="uptime">-</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<div class="stat-label">GPU</div>
|
||||
<div class="stat-value" id="gpu">-</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Controls</h2>
|
||||
<button class="btn btn-primary" id="btn-restart-chromium" onclick="restartChromium()">
|
||||
Restart Browser
|
||||
</button>
|
||||
<button class="btn btn-danger" id="btn-reboot" onclick="rebootSystem()">
|
||||
Reboot System
|
||||
</button>
|
||||
<div id="control-status" style="margin-top: 8px; font-size: 12px; color: var(--text-dim);"></div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Input Devices</h2>
|
||||
<div id="input-devices-list"></div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Speakers</h2>
|
||||
<div id="speakers-list"></div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<h2>Microphones</h2>
|
||||
<div id="microphones-list"></div>
|
||||
</div>
|
||||
<div class="card" style="grid-column: 1 / -1;">
|
||||
<div class="tabs">
|
||||
<div class="tab active" onclick="switchTab('daemon')">Daemon Logs</div>
|
||||
<div class="tab" onclick="switchTab('serial')">Serial Console</div>
|
||||
</div>
|
||||
<div id="daemon-tab" class="tab-content active">
|
||||
<div class="logs" id="logs"></div>
|
||||
</div>
|
||||
<div id="serial-tab" class="tab-content">
|
||||
<div class="logs" id="serial-logs"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
function formatBytes(bytes) {
|
||||
if (bytes === 0) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
|
||||
}
|
||||
|
||||
function formatUptime(seconds) {
|
||||
const days = Math.floor(seconds / 86400);
|
||||
const hours = Math.floor((seconds % 86400) / 3600);
|
||||
const mins = Math.floor((seconds % 3600) / 60);
|
||||
if (days > 0) return days + 'd ' + hours + 'h ' + mins + 'm';
|
||||
if (hours > 0) return hours + 'h ' + mins + 'm';
|
||||
return mins + 'm';
|
||||
}
|
||||
|
||||
let initialVersion = null;
|
||||
|
||||
function updateStatus(data) {
|
||||
// Check for version change and reload if needed
|
||||
if (data.version) {
|
||||
if (initialVersion === null) {
|
||||
initialVersion = data.version;
|
||||
} else if (data.version !== initialVersion) {
|
||||
console.log('Server version changed from ' + initialVersion + ' to ' + data.version + ', reloading...');
|
||||
location.reload();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Services
|
||||
document.getElementById('sway-status').className =
|
||||
'status-dot ' + (data.sway ? 'running' : 'stopped');
|
||||
document.getElementById('chromium-status').className =
|
||||
'status-dot ' + (data.chromium ? 'running' : 'stopped');
|
||||
|
||||
// System info
|
||||
if (data.systemInfo) {
|
||||
const info = data.systemInfo;
|
||||
|
||||
// CPU
|
||||
if (info.cpu) {
|
||||
document.getElementById('cpu-model').textContent = info.cpu.model;
|
||||
document.getElementById('cpu-cores').textContent = info.cpu.cores;
|
||||
document.getElementById('cpu-usage').textContent = info.cpu.usage + '%';
|
||||
document.getElementById('cpu-bar').style.width = info.cpu.usage + '%';
|
||||
}
|
||||
|
||||
// Memory
|
||||
if (info.memory) {
|
||||
document.getElementById('memory-usage').textContent =
|
||||
formatBytes(info.memory.used) + ' / ' + formatBytes(info.memory.total);
|
||||
document.getElementById('memory-bar').style.width = info.memory.usagePercent + '%';
|
||||
}
|
||||
|
||||
// Network
|
||||
if (info.network) {
|
||||
const list = document.getElementById('network-list');
|
||||
list.innerHTML = info.network.map(n =>
|
||||
'<div class="network-item"><span>' + n.name + '</span><span>' + n.ip + '</span></div>'
|
||||
).join('');
|
||||
}
|
||||
|
||||
// Disks
|
||||
if (info.disks) {
|
||||
const list = document.getElementById('disk-list');
|
||||
list.innerHTML = info.disks.map(d =>
|
||||
'<div class="stat" style="margin-bottom: 12px;">' +
|
||||
'<div class="stat-label">' + d.mountpoint + '</div>' +
|
||||
'<div class="stat-value">' + formatBytes(d.used) + ' / ' + formatBytes(d.total) + '</div>' +
|
||||
'<div class="progress-bar"><div class="progress-fill" style="width: ' + d.usagePercent + '%"></div></div>' +
|
||||
'</div>'
|
||||
).join('');
|
||||
}
|
||||
|
||||
// Hostname
|
||||
if (info.hostname) {
|
||||
document.getElementById('hostname').textContent = info.hostname;
|
||||
}
|
||||
|
||||
// Uptime
|
||||
if (info.uptime !== undefined) {
|
||||
document.getElementById('uptime').textContent = formatUptime(info.uptime);
|
||||
}
|
||||
|
||||
// GPU
|
||||
if (info.gpu && info.gpu.length > 0) {
|
||||
document.getElementById('gpu').textContent = info.gpu.map(g => g.name).join(', ');
|
||||
} else {
|
||||
document.getElementById('gpu').textContent = 'None detected';
|
||||
}
|
||||
|
||||
// Input Devices
|
||||
if (info.inputDevices) {
|
||||
const list = document.getElementById('input-devices-list');
|
||||
if (info.inputDevices.length === 0) {
|
||||
list.innerHTML = '<div style="color: var(--text-dim);">No input devices detected</div>';
|
||||
} else {
|
||||
list.innerHTML = info.inputDevices.map(d =>
|
||||
'<div class="device-item">' +
|
||||
'<span class="device-name">' + d.name + '</span>' +
|
||||
'<span class="device-type">' + d.type + '</span>' +
|
||||
'</div>'
|
||||
).join('');
|
||||
}
|
||||
}
|
||||
|
||||
// Speakers
|
||||
if (info.speakers) {
|
||||
const list = document.getElementById('speakers-list');
|
||||
if (info.speakers.length === 0) {
|
||||
list.innerHTML = '<div style="color: var(--text-dim);">No speakers detected</div>';
|
||||
} else {
|
||||
list.innerHTML = info.speakers.map(s =>
|
||||
'<div class="device-item">' +
|
||||
'<span class="device-name">' + s.description + '</span>' +
|
||||
(s.isDefault ? '<span class="device-default">Default</span>' : '') +
|
||||
'</div>'
|
||||
).join('');
|
||||
}
|
||||
}
|
||||
|
||||
// Microphones
|
||||
if (info.microphones) {
|
||||
const list = document.getElementById('microphones-list');
|
||||
if (info.microphones.length === 0) {
|
||||
list.innerHTML = '<div style="color: var(--text-dim);">No microphones detected</div>';
|
||||
} else {
|
||||
list.innerHTML = info.microphones.map(m =>
|
||||
'<div class="device-item">' +
|
||||
'<span class="device-name">' + m.description + '</span>' +
|
||||
(m.isDefault ? '<span class="device-default">Default</span>' : '') +
|
||||
'</div>'
|
||||
).join('');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Daemon Logs
|
||||
if (data.logs) {
|
||||
const logsEl = document.getElementById('logs');
|
||||
logsEl.innerHTML = data.logs.map(l =>
|
||||
'<div class="log-entry">' + l + '</div>'
|
||||
).join('');
|
||||
logsEl.scrollTop = logsEl.scrollHeight;
|
||||
}
|
||||
|
||||
// Serial Logs
|
||||
if (data.serialLogs) {
|
||||
const serialEl = document.getElementById('serial-logs');
|
||||
if (data.serialLogs.length === 0) {
|
||||
serialEl.innerHTML = '<div style="color: var(--text-dim);">No serial data available</div>';
|
||||
} else {
|
||||
serialEl.innerHTML = data.serialLogs.map(l =>
|
||||
'<div class="log-entry">' + l + '</div>'
|
||||
).join('');
|
||||
serialEl.scrollTop = serialEl.scrollHeight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function switchTab(tab) {
|
||||
document.querySelectorAll('.tab').forEach(t => t.classList.remove('active'));
|
||||
document.querySelectorAll('.tab-content').forEach(c => c.classList.remove('active'));
|
||||
if (tab === 'daemon') {
|
||||
document.querySelector('.tab:first-child').classList.add('active');
|
||||
document.getElementById('daemon-tab').classList.add('active');
|
||||
} else {
|
||||
document.querySelector('.tab:last-child').classList.add('active');
|
||||
document.getElementById('serial-tab').classList.add('active');
|
||||
}
|
||||
}
|
||||
|
||||
function setControlStatus(msg, isError) {
|
||||
const el = document.getElementById('control-status');
|
||||
el.textContent = msg;
|
||||
el.style.color = isError ? 'var(--error)' : 'var(--success)';
|
||||
}
|
||||
|
||||
function restartChromium() {
|
||||
const btn = document.getElementById('btn-restart-chromium');
|
||||
btn.disabled = true;
|
||||
setControlStatus('Restarting browser...', false);
|
||||
|
||||
fetch('/api/restart-chromium', { method: 'POST' })
|
||||
.then(r => r.json())
|
||||
.then(result => {
|
||||
setControlStatus(result.message, !result.success);
|
||||
btn.disabled = false;
|
||||
})
|
||||
.catch(err => {
|
||||
setControlStatus('Error: ' + err, true);
|
||||
btn.disabled = false;
|
||||
});
|
||||
}
|
||||
|
||||
function rebootSystem() {
|
||||
if (!confirm('Are you sure you want to reboot the system?')) return;
|
||||
|
||||
const btn = document.getElementById('btn-reboot');
|
||||
btn.disabled = true;
|
||||
setControlStatus('Rebooting system...', false);
|
||||
|
||||
fetch('/api/reboot', { method: 'POST' })
|
||||
.then(r => r.json())
|
||||
.then(result => {
|
||||
setControlStatus(result.message, !result.success);
|
||||
if (!result.success) btn.disabled = false;
|
||||
})
|
||||
.catch(err => {
|
||||
setControlStatus('Error: ' + err, true);
|
||||
btn.disabled = false;
|
||||
});
|
||||
}
|
||||
|
||||
// Initial fetch
|
||||
fetch('/api/status')
|
||||
.then(r => r.json())
|
||||
.then(updateStatus)
|
||||
.catch(console.error);
|
||||
|
||||
// Periodic refresh
|
||||
setInterval(() => {
|
||||
fetch('/api/status')
|
||||
.then(r => r.json())
|
||||
.then(updateStatus)
|
||||
.catch(console.error);
|
||||
}, 3000);
|
||||
|
||||
// WebSocket for live updates
|
||||
const ws = new WebSocket('ws://' + location.host + '/ws');
|
||||
ws.onmessage = (e) => {
|
||||
try {
|
||||
updateStatus(JSON.parse(e.data));
|
||||
} catch {}
|
||||
};
|
||||
|
||||
// Clock update
|
||||
function updateClock() {
|
||||
const now = new Date();
|
||||
const options = {
|
||||
weekday: 'short',
|
||||
year: 'numeric',
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
second: '2-digit',
|
||||
hour12: false
|
||||
};
|
||||
document.getElementById('clock').textContent = now.toLocaleString('en-US', options);
|
||||
}
|
||||
updateClock();
|
||||
setInterval(updateClock, 1000);
|
||||
</script>
|
||||
<ecoos-app></ecoos-app>
|
||||
<script type="module" src="/app.js"></script>
|
||||
</body>
|
||||
</html>`;
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
export const VERSION = "0.1.3";
|
||||
export const VERSION = "0.6.7";
|
||||
|
||||
24
ecoos_daemon/ts_interfaces/display.ts
Normal file
24
ecoos_daemon/ts_interfaces/display.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Display interfaces - API contracts for display management
|
||||
*/
|
||||
|
||||
export interface IDisplayInfo {
|
||||
name: string; // e.g., "DP-1", "HDMI-A-1", "HEADLESS-1"
|
||||
make: string; // Manufacturer
|
||||
model: string; // Model name
|
||||
serial: string; // Serial number
|
||||
active: boolean; // Currently enabled
|
||||
width: number; // Resolution width
|
||||
height: number; // Resolution height
|
||||
refreshRate: number; // Hz
|
||||
isPrimary: boolean; // Has the focused window (kiosk)
|
||||
}
|
||||
|
||||
export interface IDisplaysResponse {
|
||||
displays: IDisplayInfo[];
|
||||
}
|
||||
|
||||
export interface IDisplayActionResult {
|
||||
success: boolean;
|
||||
message: string;
|
||||
}
|
||||
7
ecoos_daemon/ts_interfaces/index.ts
Normal file
7
ecoos_daemon/ts_interfaces/index.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
/**
|
||||
* Re-export all interfaces
|
||||
*/
|
||||
|
||||
export * from './status.ts';
|
||||
export * from './display.ts';
|
||||
export * from './updates.ts';
|
||||
81
ecoos_daemon/ts_interfaces/status.ts
Normal file
81
ecoos_daemon/ts_interfaces/status.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* Status interfaces - API contracts for system status data
|
||||
*/
|
||||
|
||||
export type TServiceState = 'stopped' | 'starting' | 'running' | 'failed';
|
||||
|
||||
export interface IServiceStatus {
|
||||
state: TServiceState;
|
||||
error?: string;
|
||||
lastAttempt?: string;
|
||||
}
|
||||
|
||||
export interface ICpuInfo {
|
||||
model: string;
|
||||
cores: number;
|
||||
usage: number;
|
||||
}
|
||||
|
||||
export interface IMemoryInfo {
|
||||
total: number;
|
||||
used: number;
|
||||
free: number;
|
||||
usagePercent: number;
|
||||
}
|
||||
|
||||
export interface IDiskInfo {
|
||||
device: string;
|
||||
mountpoint: string;
|
||||
total: number;
|
||||
used: number;
|
||||
free: number;
|
||||
usagePercent: number;
|
||||
}
|
||||
|
||||
export interface INetworkInterface {
|
||||
name: string;
|
||||
ip: string;
|
||||
mac: string;
|
||||
state: 'up' | 'down';
|
||||
}
|
||||
|
||||
export interface IGpuInfo {
|
||||
name: string;
|
||||
driver: string;
|
||||
}
|
||||
|
||||
export interface IInputDevice {
|
||||
name: string;
|
||||
type: 'keyboard' | 'mouse' | 'touchpad' | 'other';
|
||||
path: string;
|
||||
}
|
||||
|
||||
export interface IAudioDevice {
|
||||
name: string;
|
||||
description: string;
|
||||
isDefault: boolean;
|
||||
}
|
||||
|
||||
export interface ISystemInfo {
|
||||
hostname: string;
|
||||
cpu: ICpuInfo;
|
||||
memory: IMemoryInfo;
|
||||
disks: IDiskInfo[];
|
||||
network: INetworkInterface[];
|
||||
gpu: IGpuInfo[];
|
||||
uptime: number;
|
||||
inputDevices: IInputDevice[];
|
||||
speakers: IAudioDevice[];
|
||||
microphones: IAudioDevice[];
|
||||
}
|
||||
|
||||
export interface IStatus {
|
||||
version: string;
|
||||
sway: boolean;
|
||||
swayStatus: IServiceStatus;
|
||||
chromium: boolean;
|
||||
chromiumStatus: IServiceStatus;
|
||||
systemInfo: ISystemInfo;
|
||||
logs: string[];
|
||||
systemLogs: string[];
|
||||
}
|
||||
32
ecoos_daemon/ts_interfaces/updates.ts
Normal file
32
ecoos_daemon/ts_interfaces/updates.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Update interfaces - API contracts for update/upgrade system
|
||||
*/
|
||||
|
||||
export interface IRelease {
|
||||
version: string;
|
||||
tagName: string;
|
||||
publishedAt: Date;
|
||||
downloadUrl: string;
|
||||
isCurrent: boolean;
|
||||
isNewer: boolean;
|
||||
ageHours: number;
|
||||
}
|
||||
|
||||
export interface IAutoUpgradeStatus {
|
||||
enabled: boolean;
|
||||
targetVersion: string | null;
|
||||
scheduledIn: string | null;
|
||||
waitingForStability: boolean;
|
||||
}
|
||||
|
||||
export interface IUpdateInfo {
|
||||
currentVersion: string;
|
||||
releases: IRelease[];
|
||||
autoUpgrade: IAutoUpgradeStatus;
|
||||
lastCheck: string | null;
|
||||
}
|
||||
|
||||
export interface IUpgradeResult {
|
||||
success: boolean;
|
||||
message: string;
|
||||
}
|
||||
265
ecoos_daemon/ts_web/elements/ecoos-app.ts
Normal file
265
ecoos_daemon/ts_web/elements/ecoos-app.ts
Normal file
@@ -0,0 +1,265 @@
|
||||
/**
|
||||
* EcoOS App - Main application component
|
||||
* Uses dees-simple-appdash as the dashboard shell
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
state,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
|
||||
import { DeesSimpleAppdash, type IView } from '@design.estate/dees-catalog';
|
||||
|
||||
import type { IStatus } from '../../ts_interfaces/status.js';
|
||||
import type { IDisplayInfo } from '../../ts_interfaces/display.js';
|
||||
import type { IUpdateInfo } from '../../ts_interfaces/updates.js';
|
||||
|
||||
import { EcoosOverview } from './ecoos-overview.js';
|
||||
import { EcoosDevices } from './ecoos-devices.js';
|
||||
import { EcoosDisplays } from './ecoos-displays.js';
|
||||
import { EcoosUpdates } from './ecoos-updates.js';
|
||||
import { EcoosLogs } from './ecoos-logs.js';
|
||||
|
||||
@customElement('ecoos-app')
|
||||
export class EcoosApp extends DeesElement {
|
||||
@state()
|
||||
private accessor status: IStatus | null = null;
|
||||
|
||||
@state()
|
||||
private accessor displays: IDisplayInfo[] = [];
|
||||
|
||||
@state()
|
||||
private accessor updateInfo: IUpdateInfo | null = null;
|
||||
|
||||
@state()
|
||||
private accessor initialVersion: string | null = null;
|
||||
|
||||
private ws: WebSocket | null = null;
|
||||
private statusInterval: number | null = null;
|
||||
private displaysInterval: number | null = null;
|
||||
private updatesInterval: number | null = null;
|
||||
|
||||
public static styles = [
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
background: #0a0a0a;
|
||||
}
|
||||
|
||||
dees-simple-appdash {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
private viewTabs: IView[] = [
|
||||
{
|
||||
name: 'Overview',
|
||||
iconName: 'lucide:layoutGrid',
|
||||
element: EcoosOverview,
|
||||
},
|
||||
{
|
||||
name: 'Devices',
|
||||
iconName: 'lucide:cpu',
|
||||
element: EcoosDevices,
|
||||
},
|
||||
{
|
||||
name: 'Displays',
|
||||
iconName: 'lucide:monitor',
|
||||
element: EcoosDisplays,
|
||||
},
|
||||
{
|
||||
name: 'Updates',
|
||||
iconName: 'lucide:download',
|
||||
element: EcoosUpdates,
|
||||
},
|
||||
{
|
||||
name: 'Logs',
|
||||
iconName: 'lucide:scrollText',
|
||||
element: EcoosLogs,
|
||||
},
|
||||
];
|
||||
|
||||
connectedCallback(): void {
|
||||
super.connectedCallback();
|
||||
this.startPolling();
|
||||
this.connectWebSocket();
|
||||
}
|
||||
|
||||
disconnectedCallback(): void {
|
||||
super.disconnectedCallback();
|
||||
this.stopPolling();
|
||||
this.disconnectWebSocket();
|
||||
}
|
||||
|
||||
render(): TemplateResult {
|
||||
return html`
|
||||
<dees-simple-appdash
|
||||
name="EcoOS Management"
|
||||
.viewTabs=${this.viewTabs}
|
||||
@view-select=${this.handleViewSelect}
|
||||
></dees-simple-appdash>
|
||||
`;
|
||||
}
|
||||
|
||||
updated(changedProperties: Map<string, unknown>): void {
|
||||
super.updated(changedProperties);
|
||||
|
||||
// Pass data to view components when they're rendered
|
||||
this.updateViewData();
|
||||
}
|
||||
|
||||
private updateViewData(): void {
|
||||
// Find and update the active view component
|
||||
const appdash = this.shadowRoot?.querySelector('dees-simple-appdash');
|
||||
if (!appdash) return;
|
||||
|
||||
// Get the current view content
|
||||
const overview = appdash.shadowRoot?.querySelector('ecoos-overview') as EcoosOverview;
|
||||
const devices = appdash.shadowRoot?.querySelector('ecoos-devices') as EcoosDevices;
|
||||
const displays = appdash.shadowRoot?.querySelector('ecoos-displays') as EcoosDisplays;
|
||||
const updates = appdash.shadowRoot?.querySelector('ecoos-updates') as EcoosUpdates;
|
||||
const logs = appdash.shadowRoot?.querySelector('ecoos-logs') as EcoosLogs;
|
||||
|
||||
if (overview && this.status) {
|
||||
overview.status = this.status;
|
||||
}
|
||||
|
||||
if (devices && this.status?.systemInfo) {
|
||||
devices.systemInfo = this.status.systemInfo;
|
||||
}
|
||||
|
||||
if (displays) {
|
||||
displays.displays = this.displays;
|
||||
}
|
||||
|
||||
if (updates && this.updateInfo) {
|
||||
updates.updateInfo = this.updateInfo;
|
||||
}
|
||||
|
||||
if (logs && this.status) {
|
||||
logs.daemonLogs = this.status.logs || [];
|
||||
logs.systemLogs = this.status.systemLogs || [];
|
||||
}
|
||||
}
|
||||
|
||||
private handleViewSelect(event: CustomEvent): void {
|
||||
console.log('View selected:', event.detail.view.name);
|
||||
// Trigger a data update for the new view
|
||||
setTimeout(() => this.updateViewData(), 100);
|
||||
}
|
||||
|
||||
private startPolling(): void {
|
||||
// Initial fetches
|
||||
this.fetchStatus();
|
||||
this.fetchDisplays();
|
||||
this.fetchUpdates();
|
||||
|
||||
// Periodic polling
|
||||
this.statusInterval = window.setInterval(() => this.fetchStatus(), 3000);
|
||||
this.displaysInterval = window.setInterval(() => this.fetchDisplays(), 5000);
|
||||
this.updatesInterval = window.setInterval(() => this.fetchUpdates(), 60000);
|
||||
}
|
||||
|
||||
private stopPolling(): void {
|
||||
if (this.statusInterval) {
|
||||
clearInterval(this.statusInterval);
|
||||
this.statusInterval = null;
|
||||
}
|
||||
if (this.displaysInterval) {
|
||||
clearInterval(this.displaysInterval);
|
||||
this.displaysInterval = null;
|
||||
}
|
||||
if (this.updatesInterval) {
|
||||
clearInterval(this.updatesInterval);
|
||||
this.updatesInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
private connectWebSocket(): void {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsUrl = `${protocol}//${window.location.host}/ws`;
|
||||
|
||||
this.ws = new WebSocket(wsUrl);
|
||||
|
||||
this.ws.onmessage = (event) => {
|
||||
try {
|
||||
const data = JSON.parse(event.data) as IStatus;
|
||||
this.handleStatusUpdate(data);
|
||||
} catch (e) {
|
||||
console.error('WebSocket message parse error:', e);
|
||||
}
|
||||
};
|
||||
|
||||
this.ws.onclose = () => {
|
||||
console.log('WebSocket disconnected, reconnecting in 3s...');
|
||||
setTimeout(() => this.connectWebSocket(), 3000);
|
||||
};
|
||||
|
||||
this.ws.onerror = (error) => {
|
||||
console.error('WebSocket error:', error);
|
||||
};
|
||||
}
|
||||
|
||||
private disconnectWebSocket(): void {
|
||||
if (this.ws) {
|
||||
this.ws.close();
|
||||
this.ws = null;
|
||||
}
|
||||
}
|
||||
|
||||
private handleStatusUpdate(data: IStatus): void {
|
||||
// Check for version change and reload if needed
|
||||
if (data.version) {
|
||||
if (this.initialVersion === null) {
|
||||
this.initialVersion = data.version;
|
||||
} else if (data.version !== this.initialVersion) {
|
||||
console.log(`Version changed from ${this.initialVersion} to ${data.version}, reloading...`);
|
||||
window.location.reload();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.status = data;
|
||||
this.updateViewData();
|
||||
}
|
||||
|
||||
private async fetchStatus(): Promise<void> {
|
||||
try {
|
||||
const response = await fetch('/api/status');
|
||||
const data = await response.json() as IStatus;
|
||||
this.handleStatusUpdate(data);
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch status:', error);
|
||||
}
|
||||
}
|
||||
|
||||
private async fetchDisplays(): Promise<void> {
|
||||
try {
|
||||
const response = await fetch('/api/displays');
|
||||
const data = await response.json();
|
||||
this.displays = data.displays || [];
|
||||
this.updateViewData();
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch displays:', error);
|
||||
}
|
||||
}
|
||||
|
||||
private async fetchUpdates(): Promise<void> {
|
||||
try {
|
||||
const response = await fetch('/api/updates');
|
||||
const data = await response.json() as IUpdateInfo;
|
||||
this.updateInfo = data;
|
||||
this.updateViewData();
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch updates:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
217
ecoos_daemon/ts_web/elements/ecoos-devices.ts
Normal file
217
ecoos_daemon/ts_web/elements/ecoos-devices.ts
Normal file
@@ -0,0 +1,217 @@
|
||||
/**
|
||||
* EcoOS Devices View
|
||||
* Card-based view for network, storage, input, and audio devices
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
property,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
import { DeesPanel, DeesBadge } from '@design.estate/dees-catalog';
|
||||
|
||||
import { sharedStyles, formatBytes } from '../styles/shared.js';
|
||||
import type { ISystemInfo } from '../../ts_interfaces/status.js';
|
||||
|
||||
@customElement('ecoos-devices')
|
||||
export class EcoosDevices extends DeesElement {
|
||||
@property({ type: Object })
|
||||
public accessor systemInfo: ISystemInfo | null = null;
|
||||
|
||||
public static styles = [
|
||||
sharedStyles,
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.cards-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.cards-grid {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
|
||||
.device-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 8px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.device-row:last-child {
|
||||
border-bottom: none;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
.device-row:first-child {
|
||||
padding-top: 0;
|
||||
}
|
||||
|
||||
.device-name {
|
||||
font-weight: 500;
|
||||
font-size: var(--text-sm);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.device-info {
|
||||
flex: 1;
|
||||
text-align: right;
|
||||
font-family: 'SF Mono', monospace;
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-secondary);
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.device-secondary {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.progress-mini {
|
||||
width: 60px;
|
||||
height: 4px;
|
||||
background: var(--border);
|
||||
border-radius: 2px;
|
||||
overflow: hidden;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.progress-mini-bar {
|
||||
height: 100%;
|
||||
background: var(--accent);
|
||||
transition: width 300ms ease;
|
||||
}
|
||||
|
||||
.progress-mini-bar.warning {
|
||||
background: var(--warning);
|
||||
}
|
||||
|
||||
.progress-mini-bar.error {
|
||||
background: var(--error);
|
||||
}
|
||||
|
||||
.usage-info {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.usage-text {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-secondary);
|
||||
font-family: 'SF Mono', monospace;
|
||||
}
|
||||
|
||||
.empty-text {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-tertiary);
|
||||
padding: 8px 0;
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
render(): TemplateResult {
|
||||
if (!this.systemInfo) {
|
||||
return html`<div class="empty">Loading...</div>`;
|
||||
}
|
||||
|
||||
return html`
|
||||
<div class="cards-grid">
|
||||
<!-- Network -->
|
||||
<dees-panel .title=${'Network'}>
|
||||
${this.systemInfo.network?.length
|
||||
? this.systemInfo.network.map(n => html`
|
||||
<div class="device-row">
|
||||
<span class="device-name">${n.name}</span>
|
||||
<span class="device-info">${n.ip || '—'}</span>
|
||||
<dees-badge .type=${n.state === 'up' ? 'success' : 'error'}>${n.state}</dees-badge>
|
||||
</div>
|
||||
`)
|
||||
: html`<div class="empty-text">No network interfaces</div>`
|
||||
}
|
||||
</dees-panel>
|
||||
|
||||
<!-- Storage -->
|
||||
<dees-panel .title=${'Storage'}>
|
||||
${this.systemInfo.disks?.length
|
||||
? this.systemInfo.disks.map(d => html`
|
||||
<div class="device-row">
|
||||
<div>
|
||||
<div class="device-name">${d.mountpoint}</div>
|
||||
<div class="device-secondary">${d.device}</div>
|
||||
</div>
|
||||
<div class="usage-info">
|
||||
<span class="usage-text">${formatBytes(d.used)} / ${formatBytes(d.total)}</span>
|
||||
<div class="progress-mini">
|
||||
<div class="progress-mini-bar ${this.getUsageClass(d.usagePercent || 0)}" style="width: ${d.usagePercent || 0}%"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`)
|
||||
: html`<div class="empty-text">No disks</div>`
|
||||
}
|
||||
</dees-panel>
|
||||
|
||||
<!-- Input Devices -->
|
||||
<dees-panel .title=${'Input Devices'}>
|
||||
${this.systemInfo.inputDevices?.length
|
||||
? this.systemInfo.inputDevices.map(d => html`
|
||||
<div class="device-row">
|
||||
<span class="device-name">${d.name}</span>
|
||||
<dees-badge .type=${'default'}>${d.type}</dees-badge>
|
||||
</div>
|
||||
`)
|
||||
: html`<div class="empty-text">No input devices</div>`
|
||||
}
|
||||
</dees-panel>
|
||||
|
||||
<!-- Audio Output -->
|
||||
<dees-panel .title=${'Audio Output'}>
|
||||
${this.systemInfo.speakers?.length
|
||||
? this.systemInfo.speakers.map(s => html`
|
||||
<div class="device-row">
|
||||
<span class="device-name">${s.description}</span>
|
||||
${s.isDefault ? html`<dees-badge .type=${'success'}>Default</dees-badge>` : ''}
|
||||
</div>
|
||||
`)
|
||||
: html`<div class="empty-text">No speakers</div>`
|
||||
}
|
||||
</dees-panel>
|
||||
|
||||
<!-- Audio Input -->
|
||||
<dees-panel .title=${'Audio Input'}>
|
||||
${this.systemInfo.microphones?.length
|
||||
? this.systemInfo.microphones.map(m => html`
|
||||
<div class="device-row">
|
||||
<span class="device-name">${m.description}</span>
|
||||
${m.isDefault ? html`<dees-badge .type=${'success'}>Default</dees-badge>` : ''}
|
||||
</div>
|
||||
`)
|
||||
: html`<div class="empty-text">No microphones</div>`
|
||||
}
|
||||
</dees-panel>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
private getUsageClass(usage: number): string {
|
||||
if (usage > 90) return 'error';
|
||||
if (usage > 75) return 'warning';
|
||||
return '';
|
||||
}
|
||||
}
|
||||
233
ecoos_daemon/ts_web/elements/ecoos-displays.ts
Normal file
233
ecoos_daemon/ts_web/elements/ecoos-displays.ts
Normal file
@@ -0,0 +1,233 @@
|
||||
/**
|
||||
* EcoOS Displays View
|
||||
* Card-based display management
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
property,
|
||||
state,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
import { DeesButton, DeesPanel, DeesBadge } from '@design.estate/dees-catalog';
|
||||
|
||||
import { sharedStyles } from '../styles/shared.js';
|
||||
import type { IDisplayInfo } from '../../ts_interfaces/display.js';
|
||||
|
||||
@customElement('ecoos-displays')
|
||||
export class EcoosDisplays extends DeesElement {
|
||||
@property({ type: Array })
|
||||
public accessor displays: IDisplayInfo[] = [];
|
||||
|
||||
@state()
|
||||
private accessor loading: boolean = false;
|
||||
|
||||
@state()
|
||||
private accessor message: string = '';
|
||||
|
||||
@state()
|
||||
private accessor messageError: boolean = false;
|
||||
|
||||
public static styles = [
|
||||
sharedStyles,
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.display-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.display-card {
|
||||
opacity: 1;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
.display-card.disabled {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.display-meta {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.badge-row {
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.actions-row {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.message-bar {
|
||||
margin-top: 16px;
|
||||
padding: 8px 12px;
|
||||
border-radius: 6px;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.message-bar.success {
|
||||
background: hsla(142.1, 76.2%, 36.3%, 0.15);
|
||||
color: var(--success);
|
||||
}
|
||||
|
||||
.message-bar.error {
|
||||
background: hsla(0, 84.2%, 60.2%, 0.15);
|
||||
color: var(--error);
|
||||
}
|
||||
|
||||
.empty-state {
|
||||
text-align: center;
|
||||
padding: 32px;
|
||||
color: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.disabled-section {
|
||||
margin-top: 16px;
|
||||
}
|
||||
|
||||
.disabled-header {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-tertiary);
|
||||
margin-bottom: 12px;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.disabled-header::before {
|
||||
content: '▶';
|
||||
font-size: 8px;
|
||||
transition: transform 150ms ease;
|
||||
}
|
||||
|
||||
.disabled-header.open::before {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
render(): TemplateResult {
|
||||
const enabledDisplays = this.displays.filter(d => d.active);
|
||||
const disabledDisplays = this.displays.filter(d => !d.active);
|
||||
|
||||
if (this.displays.length === 0) {
|
||||
return html`
|
||||
<div class="empty-state">
|
||||
No displays detected
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
return html`
|
||||
<div class="display-grid">
|
||||
${enabledDisplays.map(d => this.renderDisplayCard(d))}
|
||||
</div>
|
||||
|
||||
${disabledDisplays.length > 0 ? html`
|
||||
<details class="disabled-section">
|
||||
<summary class="disabled-header">
|
||||
Disabled Displays (${disabledDisplays.length})
|
||||
</summary>
|
||||
<div class="display-grid" style="margin-top: 12px;">
|
||||
${disabledDisplays.map(d => this.renderDisplayCard(d))}
|
||||
</div>
|
||||
</details>
|
||||
` : ''}
|
||||
|
||||
${this.message ? html`
|
||||
<div class="message-bar ${this.messageError ? 'error' : 'success'}">${this.message}</div>
|
||||
` : ''}
|
||||
`;
|
||||
}
|
||||
|
||||
private renderDisplayCard(display: IDisplayInfo): TemplateResult {
|
||||
return html`
|
||||
<dees-panel
|
||||
class="display-card ${display.active ? '' : 'disabled'}"
|
||||
.title=${display.name}
|
||||
.subtitle=${`${display.width}×${display.height} @ ${display.refreshRate}Hz`}
|
||||
.variant=${display.active ? 'default' : 'ghost'}
|
||||
>
|
||||
${display.make && display.make !== 'Unknown' ? html`
|
||||
<div class="display-meta">${display.make}${display.model ? ` ${display.model}` : ''}</div>
|
||||
` : ''}
|
||||
|
||||
${display.isPrimary ? html`
|
||||
<div class="badge-row">
|
||||
<dees-badge .type=${'primary'}>Primary</dees-badge>
|
||||
</div>
|
||||
` : ''}
|
||||
|
||||
<div class="actions-row">
|
||||
${display.active && !display.isPrimary ? html`
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.text=${'Set Primary'}
|
||||
.disabled=${this.loading}
|
||||
@click=${() => this.setPrimary(display.name)}
|
||||
></dees-button>
|
||||
` : ''}
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.status=${display.active ? 'error' : 'success'}
|
||||
.text=${display.active ? 'Disable' : 'Enable'}
|
||||
.disabled=${this.loading}
|
||||
@click=${() => this.toggleDisplay(display.name, !display.active)}
|
||||
></dees-button>
|
||||
</div>
|
||||
</dees-panel>
|
||||
`;
|
||||
}
|
||||
|
||||
private async toggleDisplay(name: string, enable: boolean): Promise<void> {
|
||||
this.loading = true;
|
||||
this.message = '';
|
||||
try {
|
||||
const action = enable ? 'enable' : 'disable';
|
||||
const response = await fetch(`/api/displays/${encodeURIComponent(name)}/${action}`, {
|
||||
method: 'POST',
|
||||
});
|
||||
const result = await response.json();
|
||||
this.message = result.message;
|
||||
this.messageError = !result.success;
|
||||
this.dispatchEvent(new CustomEvent('refresh-displays'));
|
||||
} catch (error) {
|
||||
this.message = `Error: ${error}`;
|
||||
this.messageError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
|
||||
private async setPrimary(name: string): Promise<void> {
|
||||
this.loading = true;
|
||||
this.message = '';
|
||||
try {
|
||||
const response = await fetch(`/api/displays/${encodeURIComponent(name)}/primary`, {
|
||||
method: 'POST',
|
||||
});
|
||||
const result = await response.json();
|
||||
this.message = result.message;
|
||||
this.messageError = !result.success;
|
||||
this.dispatchEvent(new CustomEvent('refresh-displays'));
|
||||
} catch (error) {
|
||||
this.message = `Error: ${error}`;
|
||||
this.messageError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
242
ecoos_daemon/ts_web/elements/ecoos-logs.ts
Normal file
242
ecoos_daemon/ts_web/elements/ecoos-logs.ts
Normal file
@@ -0,0 +1,242 @@
|
||||
/**
|
||||
* EcoOS Logs View
|
||||
* Panel-wrapped terminal-style log viewer
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
property,
|
||||
state,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
import { DeesPanel } from '@design.estate/dees-catalog';
|
||||
|
||||
import { sharedStyles } from '../styles/shared.js';
|
||||
|
||||
@customElement('ecoos-logs')
|
||||
export class EcoosLogs extends DeesElement {
|
||||
@property({ type: Array })
|
||||
public accessor daemonLogs: string[] = [];
|
||||
|
||||
@property({ type: Array })
|
||||
public accessor systemLogs: string[] = [];
|
||||
|
||||
@state()
|
||||
private accessor activeTab: 'daemon' | 'system' = 'daemon';
|
||||
|
||||
@state()
|
||||
private accessor autoScroll: boolean = true;
|
||||
|
||||
public static styles = [
|
||||
sharedStyles,
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: calc(100vh - 140px);
|
||||
min-height: 300px;
|
||||
}
|
||||
|
||||
.tabs {
|
||||
display: flex;
|
||||
gap: 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.tab {
|
||||
padding: 8px 16px;
|
||||
font-size: var(--text-sm);
|
||||
font-weight: 500;
|
||||
color: var(--text-tertiary);
|
||||
cursor: pointer;
|
||||
border-bottom: 2px solid transparent;
|
||||
margin-bottom: -1px;
|
||||
transition: color 150ms ease;
|
||||
}
|
||||
|
||||
.tab:hover {
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.tab.active {
|
||||
color: var(--text);
|
||||
border-bottom-color: var(--accent);
|
||||
}
|
||||
|
||||
.header-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
.count {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
font-family: 'SF Mono', monospace;
|
||||
}
|
||||
|
||||
.auto-scroll {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
cursor: pointer;
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
transition: background 150ms ease;
|
||||
}
|
||||
|
||||
.auto-scroll:hover {
|
||||
background: var(--bg-hover);
|
||||
}
|
||||
|
||||
.auto-scroll.active {
|
||||
color: var(--accent);
|
||||
}
|
||||
|
||||
.auto-scroll .indicator {
|
||||
width: 6px;
|
||||
height: 6px;
|
||||
border-radius: 50%;
|
||||
background: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.auto-scroll.active .indicator {
|
||||
background: var(--accent);
|
||||
}
|
||||
|
||||
.terminal {
|
||||
flex: 1;
|
||||
background: hsl(0 0% 2%);
|
||||
font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace;
|
||||
font-size: 11px;
|
||||
line-height: 1.5;
|
||||
padding: 12px;
|
||||
overflow-y: auto;
|
||||
overflow-x: hidden;
|
||||
border-radius: 6px;
|
||||
}
|
||||
|
||||
.line {
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
padding: 1px 0;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.line.error {
|
||||
color: var(--error);
|
||||
}
|
||||
|
||||
.line.warning {
|
||||
color: var(--warning);
|
||||
}
|
||||
|
||||
.empty-logs {
|
||||
color: var(--text-tertiary);
|
||||
font-style: italic;
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
render(): TemplateResult {
|
||||
const logs = this.activeTab === 'daemon' ? this.daemonLogs : this.systemLogs;
|
||||
|
||||
return html`
|
||||
<dees-panel .title=${'Logs'}>
|
||||
<div class="container">
|
||||
<div class="tabs">
|
||||
<div
|
||||
class="tab ${this.activeTab === 'daemon' ? 'active' : ''}"
|
||||
@click=${() => this.switchTab('daemon')}
|
||||
>Daemon</div>
|
||||
<div
|
||||
class="tab ${this.activeTab === 'system' ? 'active' : ''}"
|
||||
@click=${() => this.switchTab('system')}
|
||||
>System</div>
|
||||
</div>
|
||||
|
||||
<div class="header-row">
|
||||
<span class="count">${logs.length} lines</span>
|
||||
<div
|
||||
class="auto-scroll ${this.autoScroll ? 'active' : ''}"
|
||||
@click=${this.toggleAutoScroll}
|
||||
>
|
||||
<span class="indicator"></span>
|
||||
Auto-scroll
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="terminal" id="terminal" @scroll=${this.handleScroll}>
|
||||
${logs.length === 0
|
||||
? html`<div class="empty-logs">No logs</div>`
|
||||
: logs.map(log => html`<div class="line ${this.getLogLevel(log)}">${log}</div>`)
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
</dees-panel>
|
||||
`;
|
||||
}
|
||||
|
||||
private getLogLevel(log: string): string {
|
||||
const lower = log.toLowerCase();
|
||||
if (lower.includes('error') || lower.includes('fail') || lower.includes('fatal')) {
|
||||
return 'error';
|
||||
}
|
||||
if (lower.includes('warn')) {
|
||||
return 'warning';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
private switchTab(tab: 'daemon' | 'system'): void {
|
||||
this.activeTab = tab;
|
||||
if (this.autoScroll) {
|
||||
this.scrollToBottom();
|
||||
}
|
||||
}
|
||||
|
||||
private toggleAutoScroll(): void {
|
||||
this.autoScroll = !this.autoScroll;
|
||||
if (this.autoScroll) {
|
||||
this.scrollToBottom();
|
||||
}
|
||||
}
|
||||
|
||||
private handleScroll(): void {
|
||||
const terminal = this.shadowRoot?.getElementById('terminal');
|
||||
if (terminal) {
|
||||
const isAtBottom = terminal.scrollHeight - terminal.scrollTop <= terminal.clientHeight + 50;
|
||||
if (!isAtBottom && this.autoScroll) {
|
||||
this.autoScroll = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
updated(changedProperties: Map<string, unknown>): void {
|
||||
super.updated(changedProperties);
|
||||
if ((changedProperties.has('daemonLogs') || changedProperties.has('systemLogs')) && this.autoScroll) {
|
||||
this.scrollToBottom();
|
||||
}
|
||||
}
|
||||
|
||||
private scrollToBottom(): void {
|
||||
requestAnimationFrame(() => {
|
||||
const terminal = this.shadowRoot?.getElementById('terminal');
|
||||
if (terminal) {
|
||||
terminal.scrollTop = terminal.scrollHeight;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
304
ecoos_daemon/ts_web/elements/ecoos-overview.ts
Normal file
304
ecoos_daemon/ts_web/elements/ecoos-overview.ts
Normal file
@@ -0,0 +1,304 @@
|
||||
/**
|
||||
* EcoOS Overview View
|
||||
* Dashboard with stats grid, service panels, and system info
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
property,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
import {
|
||||
DeesButton,
|
||||
DeesPanel,
|
||||
DeesStatsgrid,
|
||||
DeesBadge,
|
||||
type IStatsTile,
|
||||
} from '@design.estate/dees-catalog';
|
||||
|
||||
import { sharedStyles, formatBytes, formatUptime } from '../styles/shared.js';
|
||||
import type { IStatus, IServiceStatus } from '../../ts_interfaces/status.js';
|
||||
|
||||
@customElement('ecoos-overview')
|
||||
export class EcoosOverview extends DeesElement {
|
||||
@property({ type: Object })
|
||||
public accessor status: IStatus | null = null;
|
||||
|
||||
@property({ type: Boolean })
|
||||
public accessor loading: boolean = false;
|
||||
|
||||
@property({ type: String })
|
||||
public accessor controlMessage: string = '';
|
||||
|
||||
@property({ type: Boolean })
|
||||
public accessor controlError: boolean = false;
|
||||
|
||||
public static styles = [
|
||||
sharedStyles,
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.page {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.cards-row {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.cards-row {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
|
||||
.service-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 8px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.service-row:last-child {
|
||||
border-bottom: none;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
.service-row:first-child {
|
||||
padding-top: 0;
|
||||
}
|
||||
|
||||
.service-name {
|
||||
font-weight: 500;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.service-error {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--error);
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.info-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.info-item {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 2px;
|
||||
}
|
||||
|
||||
.info-label {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.03em;
|
||||
}
|
||||
|
||||
.info-value {
|
||||
font-size: var(--text-sm);
|
||||
font-family: 'SF Mono', monospace;
|
||||
}
|
||||
|
||||
.actions-row {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.message {
|
||||
font-size: var(--text-xs);
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.message.success {
|
||||
background: hsla(142.1, 76.2%, 36.3%, 0.15);
|
||||
color: var(--success);
|
||||
}
|
||||
|
||||
.message.error {
|
||||
background: hsla(0, 84.2%, 60.2%, 0.15);
|
||||
color: var(--error);
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
render(): TemplateResult {
|
||||
if (!this.status) {
|
||||
return html`<div class="empty">Loading...</div>`;
|
||||
}
|
||||
|
||||
const { systemInfo, swayStatus, chromiumStatus } = this.status;
|
||||
const cpuUsage = systemInfo?.cpu?.usage || 0;
|
||||
const memUsage = systemInfo?.memory?.usagePercent || 0;
|
||||
|
||||
const statsTiles: IStatsTile[] = [
|
||||
{
|
||||
id: 'cpu',
|
||||
title: 'CPU',
|
||||
value: Math.round(cpuUsage),
|
||||
type: 'percentage',
|
||||
icon: 'lucide:cpu',
|
||||
description: `${systemInfo?.cpu?.cores || 0} cores`,
|
||||
},
|
||||
{
|
||||
id: 'memory',
|
||||
title: 'Memory',
|
||||
value: Math.round(memUsage),
|
||||
type: 'percentage',
|
||||
icon: 'lucide:database',
|
||||
description: `${formatBytes(systemInfo?.memory?.used || 0)} / ${formatBytes(systemInfo?.memory?.total || 0)}`,
|
||||
},
|
||||
{
|
||||
id: 'uptime',
|
||||
title: 'Uptime',
|
||||
value: formatUptime(systemInfo?.uptime || 0),
|
||||
type: 'text',
|
||||
icon: 'lucide:clock',
|
||||
},
|
||||
];
|
||||
|
||||
return html`
|
||||
<div class="page">
|
||||
<!-- Stats Grid -->
|
||||
<dees-statsgrid
|
||||
.tiles=${statsTiles}
|
||||
.minTileWidth=${200}
|
||||
.gap=${16}
|
||||
></dees-statsgrid>
|
||||
|
||||
<!-- Services & System Info -->
|
||||
<div class="cards-row">
|
||||
<dees-panel .title=${'Services'}>
|
||||
<div class="service-row">
|
||||
<div>
|
||||
<div class="service-name">Sway Compositor</div>
|
||||
${swayStatus?.error ? html`<div class="service-error">${swayStatus.error}</div>` : ''}
|
||||
</div>
|
||||
${this.renderStatusBadge(swayStatus)}
|
||||
</div>
|
||||
<div class="service-row">
|
||||
<div>
|
||||
<div class="service-name">Chromium Browser</div>
|
||||
${chromiumStatus?.error ? html`<div class="service-error">${chromiumStatus.error}</div>` : ''}
|
||||
</div>
|
||||
${this.renderStatusBadge(chromiumStatus)}
|
||||
</div>
|
||||
</dees-panel>
|
||||
|
||||
<dees-panel .title=${'System'}>
|
||||
<div class="info-grid">
|
||||
<div class="info-item">
|
||||
<span class="info-label">Hostname</span>
|
||||
<span class="info-value">${systemInfo?.hostname || '—'}</span>
|
||||
</div>
|
||||
<div class="info-item">
|
||||
<span class="info-label">CPU Model</span>
|
||||
<span class="info-value">${this.truncate(systemInfo?.cpu?.model || '—', 20)}</span>
|
||||
</div>
|
||||
<div class="info-item" style="grid-column: span 2;">
|
||||
<span class="info-label">GPU</span>
|
||||
<span class="info-value">${systemInfo?.gpu?.length ? systemInfo.gpu.map(g => g.name).join(', ') : 'None'}</span>
|
||||
</div>
|
||||
</div>
|
||||
</dees-panel>
|
||||
</div>
|
||||
|
||||
<!-- Actions -->
|
||||
<dees-panel .title=${'Actions'}>
|
||||
<div class="actions-row">
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.text=${'Restart Browser'}
|
||||
.disabled=${this.loading}
|
||||
@click=${this.restartChromium}
|
||||
></dees-button>
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.status=${'error'}
|
||||
.text=${'Reboot System'}
|
||||
.disabled=${this.loading}
|
||||
@click=${this.rebootSystem}
|
||||
></dees-button>
|
||||
${this.controlMessage ? html`
|
||||
<span class="message ${this.controlError ? 'error' : 'success'}">${this.controlMessage}</span>
|
||||
` : ''}
|
||||
</div>
|
||||
</dees-panel>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
private renderStatusBadge(status: IServiceStatus): TemplateResult {
|
||||
const state = status?.state || 'stopped';
|
||||
let badgeType: 'default' | 'success' | 'warning' | 'error' = 'default';
|
||||
let label = 'Stopped';
|
||||
|
||||
if (state === 'running') {
|
||||
badgeType = 'success';
|
||||
label = 'Running';
|
||||
} else if (state === 'starting') {
|
||||
badgeType = 'warning';
|
||||
label = 'Starting';
|
||||
} else if (state === 'failed') {
|
||||
badgeType = 'error';
|
||||
label = 'Failed';
|
||||
}
|
||||
|
||||
return html`<dees-badge .type=${badgeType}>${label}</dees-badge>`;
|
||||
}
|
||||
|
||||
private truncate(str: string, len: number): string {
|
||||
return str.length > len ? str.substring(0, len) + '...' : str;
|
||||
}
|
||||
|
||||
private async restartChromium(): Promise<void> {
|
||||
this.loading = true;
|
||||
this.controlMessage = '';
|
||||
try {
|
||||
const response = await fetch('/api/restart-chromium', { method: 'POST' });
|
||||
const result = await response.json();
|
||||
this.controlMessage = result.message;
|
||||
this.controlError = !result.success;
|
||||
} catch (error) {
|
||||
this.controlMessage = `Error: ${error}`;
|
||||
this.controlError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
|
||||
private async rebootSystem(): Promise<void> {
|
||||
if (!confirm('Are you sure you want to reboot?')) return;
|
||||
|
||||
this.loading = true;
|
||||
this.controlMessage = '';
|
||||
try {
|
||||
const response = await fetch('/api/reboot', { method: 'POST' });
|
||||
const result = await response.json();
|
||||
this.controlMessage = result.message;
|
||||
this.controlError = !result.success;
|
||||
} catch (error) {
|
||||
this.controlMessage = `Error: ${error}`;
|
||||
this.controlError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
237
ecoos_daemon/ts_web/elements/ecoos-updates.ts
Normal file
237
ecoos_daemon/ts_web/elements/ecoos-updates.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
/**
|
||||
* EcoOS Updates View
|
||||
* Card-based update management
|
||||
*/
|
||||
|
||||
import {
|
||||
html,
|
||||
DeesElement,
|
||||
customElement,
|
||||
property,
|
||||
state,
|
||||
css,
|
||||
type TemplateResult,
|
||||
} from '@design.estate/dees-element';
|
||||
import { DeesButton, DeesPanel, DeesBadge } from '@design.estate/dees-catalog';
|
||||
|
||||
import { sharedStyles, formatAge } from '../styles/shared.js';
|
||||
import type { IUpdateInfo } from '../../ts_interfaces/updates.js';
|
||||
|
||||
@customElement('ecoos-updates')
|
||||
export class EcoosUpdates extends DeesElement {
|
||||
@property({ type: Object })
|
||||
public accessor updateInfo: IUpdateInfo | null = null;
|
||||
|
||||
@state()
|
||||
private accessor loading: boolean = false;
|
||||
|
||||
@state()
|
||||
private accessor message: string = '';
|
||||
|
||||
@state()
|
||||
private accessor messageError: boolean = false;
|
||||
|
||||
public static styles = [
|
||||
sharedStyles,
|
||||
css`
|
||||
:host {
|
||||
display: block;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.page {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.version-display {
|
||||
font-size: var(--text-2xl);
|
||||
font-weight: 600;
|
||||
font-family: 'SF Mono', monospace;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
.last-check {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.banner-upgrade {
|
||||
background: hsla(217.2, 91.2%, 59.8%, 0.1);
|
||||
border-color: hsla(217.2, 91.2%, 59.8%, 0.3);
|
||||
}
|
||||
|
||||
.banner-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.banner-content strong {
|
||||
font-family: 'SF Mono', monospace;
|
||||
}
|
||||
|
||||
.update-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 10px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.update-row:last-child {
|
||||
border-bottom: none;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
.update-row:first-child {
|
||||
padding-top: 0;
|
||||
}
|
||||
|
||||
.update-version {
|
||||
font-family: 'SF Mono', monospace;
|
||||
font-size: var(--text-sm);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.update-age {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.empty-text {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-tertiary);
|
||||
padding: 8px 0;
|
||||
}
|
||||
|
||||
.message-bar {
|
||||
padding: 8px 12px;
|
||||
border-radius: 6px;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.message-bar.success {
|
||||
background: hsla(142.1, 76.2%, 36.3%, 0.15);
|
||||
color: var(--success);
|
||||
}
|
||||
|
||||
.message-bar.error {
|
||||
background: hsla(0, 84.2%, 60.2%, 0.15);
|
||||
color: var(--error);
|
||||
}
|
||||
`,
|
||||
];
|
||||
|
||||
render(): TemplateResult {
|
||||
if (!this.updateInfo) {
|
||||
return html`<div class="empty">Loading...</div>`;
|
||||
}
|
||||
|
||||
const newerReleases = this.updateInfo.releases.filter(r => r.isNewer);
|
||||
const { autoUpgrade, lastCheck } = this.updateInfo;
|
||||
|
||||
return html`
|
||||
<div class="page">
|
||||
<!-- Current Version -->
|
||||
<dees-panel .title=${'Current Version'}>
|
||||
<div class="version-display">v${this.updateInfo.currentVersion}</div>
|
||||
${lastCheck ? html`<div class="last-check">Last check: ${new Date(lastCheck).toLocaleString()}</div>` : ''}
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.text=${this.loading ? 'Checking...' : 'Check for Updates'}
|
||||
.disabled=${this.loading}
|
||||
@click=${this.checkForUpdates}
|
||||
></dees-button>
|
||||
</dees-panel>
|
||||
|
||||
<!-- Auto-upgrade Banner -->
|
||||
${autoUpgrade?.targetVersion ? html`
|
||||
<dees-panel .variant=${'outline'} class="banner-upgrade">
|
||||
<div class="banner-content">
|
||||
${autoUpgrade.waitingForStability
|
||||
? html`Auto-upgrade to <strong>v${autoUpgrade.targetVersion}</strong> in ${autoUpgrade.scheduledIn}`
|
||||
: html`Auto-upgrade to <strong>v${autoUpgrade.targetVersion}</strong> pending`
|
||||
}
|
||||
</div>
|
||||
</dees-panel>
|
||||
` : ''}
|
||||
|
||||
<!-- Available Updates -->
|
||||
<dees-panel .title=${'Available Updates'}>
|
||||
${newerReleases.length === 0
|
||||
? html`<div class="empty-text">You're up to date</div>`
|
||||
: newerReleases.map(r => html`
|
||||
<div class="update-row">
|
||||
<span class="update-version">v${r.version}</span>
|
||||
<span class="update-age">${formatAge(r.ageHours)}</span>
|
||||
<dees-button
|
||||
.type=${'default'}
|
||||
.status=${'success'}
|
||||
.text=${'Upgrade'}
|
||||
.disabled=${this.loading}
|
||||
@click=${() => this.upgradeToVersion(r.version)}
|
||||
></dees-button>
|
||||
</div>
|
||||
`)
|
||||
}
|
||||
</dees-panel>
|
||||
|
||||
<!-- Message -->
|
||||
${this.message ? html`
|
||||
<div class="message-bar ${this.messageError ? 'error' : 'success'}">${this.message}</div>
|
||||
` : ''}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
private async checkForUpdates(): Promise<void> {
|
||||
this.loading = true;
|
||||
this.message = '';
|
||||
try {
|
||||
const response = await fetch('/api/updates/check', { method: 'POST' });
|
||||
const result = await response.json();
|
||||
this.updateInfo = result;
|
||||
this.dispatchEvent(new CustomEvent('updates-checked', { detail: result }));
|
||||
} catch (error) {
|
||||
console.error('Failed to check updates:', error);
|
||||
this.message = `Failed: ${error}`;
|
||||
this.messageError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
|
||||
private async upgradeToVersion(version: string): Promise<void> {
|
||||
if (!confirm(`Upgrade to v${version}?`)) return;
|
||||
|
||||
this.loading = true;
|
||||
this.message = '';
|
||||
try {
|
||||
const response = await fetch('/api/upgrade', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ version }),
|
||||
});
|
||||
const result = await response.json();
|
||||
if (result.success) {
|
||||
this.message = result.message;
|
||||
this.messageError = false;
|
||||
this.dispatchEvent(new CustomEvent('upgrade-started', { detail: result }));
|
||||
} else {
|
||||
this.message = `Failed: ${result.message}`;
|
||||
this.messageError = true;
|
||||
}
|
||||
} catch (error) {
|
||||
this.message = `Error: ${error}`;
|
||||
this.messageError = true;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
15
ecoos_daemon/ts_web/index.ts
Normal file
15
ecoos_daemon/ts_web/index.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* EcoOS Daemon UI Entry Point
|
||||
* Bundles all components for the daemon UI
|
||||
*/
|
||||
|
||||
// Import all components to ensure they're registered
|
||||
import './elements/ecoos-app.js';
|
||||
import './elements/ecoos-overview.js';
|
||||
import './elements/ecoos-devices.js';
|
||||
import './elements/ecoos-displays.js';
|
||||
import './elements/ecoos-updates.js';
|
||||
import './elements/ecoos-logs.js';
|
||||
|
||||
// Export the main app component
|
||||
export { EcoosApp } from './elements/ecoos-app.js';
|
||||
440
ecoos_daemon/ts_web/styles/shared.ts
Normal file
440
ecoos_daemon/ts_web/styles/shared.ts
Normal file
@@ -0,0 +1,440 @@
|
||||
/**
|
||||
* EcoOS UI Design System
|
||||
* Based on dees-catalog design patterns
|
||||
*/
|
||||
|
||||
import { css } from '@design.estate/dees-element';
|
||||
|
||||
export const sharedStyles = css`
|
||||
:host {
|
||||
/* Colors - dees-catalog theme (HSL) */
|
||||
--bg: hsl(0 0% 3.9%);
|
||||
--bg-elevated: hsl(0 0% 7.8%);
|
||||
--bg-hover: hsl(0 0% 14.9%);
|
||||
--border: hsl(0 0% 14.9%);
|
||||
--border-hover: hsl(0 0% 20.9%);
|
||||
--text: hsl(0 0% 95%);
|
||||
--text-secondary: hsl(215 20.2% 55.1%);
|
||||
--text-tertiary: hsl(215 20.2% 45%);
|
||||
|
||||
/* Semantic colors */
|
||||
--accent: hsl(217.2 91.2% 59.8%);
|
||||
--success: hsl(142.1 76.2% 36.3%);
|
||||
--warning: hsl(45.4 93.4% 47.5%);
|
||||
--error: hsl(0 84.2% 60.2%);
|
||||
|
||||
/* Typography scale */
|
||||
--text-xs: 11px;
|
||||
--text-sm: 12px;
|
||||
--text-base: 13px;
|
||||
--text-lg: 15px;
|
||||
--text-xl: 18px;
|
||||
--text-2xl: 24px;
|
||||
|
||||
display: block;
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
font-size: var(--text-base);
|
||||
color: var(--text);
|
||||
line-height: 1.5;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
/* Monospace utility */
|
||||
.mono {
|
||||
font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace;
|
||||
}
|
||||
|
||||
/* Section - lightweight container */
|
||||
.section {
|
||||
padding: 12px 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.section:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.section-title {
|
||||
font-size: var(--text-xs);
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
color: var(--text-tertiary);
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
/* Card - minimal styling */
|
||||
.card {
|
||||
background: var(--bg-elevated);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 6px;
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
/* Table styling */
|
||||
.table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.table th {
|
||||
text-align: left;
|
||||
font-size: var(--text-xs);
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.03em;
|
||||
color: var(--text-tertiary);
|
||||
padding: 6px 8px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.table td {
|
||||
padding: 8px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.table tr:last-child td {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.table tr:hover td {
|
||||
background: var(--bg-hover);
|
||||
}
|
||||
|
||||
.table .mono {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
/* Status dot */
|
||||
.dot {
|
||||
display: inline-block;
|
||||
width: 6px;
|
||||
height: 6px;
|
||||
border-radius: 50%;
|
||||
background: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.dot.success { background: var(--success); }
|
||||
.dot.warning { background: var(--warning); }
|
||||
.dot.error { background: var(--error); }
|
||||
.dot.accent { background: var(--accent); }
|
||||
|
||||
.dot.pulse {
|
||||
animation: pulse 2s ease-in-out infinite;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
0%, 100% { opacity: 1; }
|
||||
50% { opacity: 0.4; }
|
||||
}
|
||||
|
||||
/* Status with dot and text */
|
||||
.status {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.status.success { color: var(--success); }
|
||||
.status.warning { color: var(--warning); }
|
||||
.status.error { color: var(--error); }
|
||||
|
||||
/* Progress bar - thin */
|
||||
.progress {
|
||||
height: 3px;
|
||||
background: var(--border);
|
||||
border-radius: 1.5px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.progress-bar {
|
||||
height: 100%;
|
||||
background: var(--accent);
|
||||
transition: width 300ms ease;
|
||||
}
|
||||
|
||||
.progress-bar.success { background: var(--success); }
|
||||
.progress-bar.warning { background: var(--warning); }
|
||||
.progress-bar.error { background: var(--error); }
|
||||
|
||||
/* Badge - compact */
|
||||
.badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
font-size: var(--text-xs);
|
||||
font-weight: 500;
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
background: var(--border);
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.badge.primary {
|
||||
background: var(--accent);
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.badge.success {
|
||||
background: rgba(12, 206, 107, 0.15);
|
||||
color: var(--success);
|
||||
}
|
||||
|
||||
.badge.warning {
|
||||
background: rgba(245, 166, 35, 0.15);
|
||||
color: var(--warning);
|
||||
}
|
||||
|
||||
.badge.error {
|
||||
background: rgba(238, 0, 0, 0.15);
|
||||
color: var(--error);
|
||||
}
|
||||
|
||||
/* Data row - key value pair */
|
||||
.data-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 6px 0;
|
||||
}
|
||||
|
||||
.data-row + .data-row {
|
||||
border-top: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.data-label {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.data-value {
|
||||
font-family: 'SF Mono', monospace;
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
/* Stat - large value display */
|
||||
.stat {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.stat-label {
|
||||
font-size: var(--text-xs);
|
||||
color: var(--text-tertiary);
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
|
||||
.stat-value {
|
||||
font-size: var(--text-2xl);
|
||||
font-weight: 600;
|
||||
font-family: 'SF Mono', monospace;
|
||||
letter-spacing: -0.02em;
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.stat-value.sm {
|
||||
font-size: var(--text-lg);
|
||||
}
|
||||
|
||||
/* Grid layouts */
|
||||
.grid {
|
||||
display: grid;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.grid-2 {
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
}
|
||||
|
||||
.grid-3 {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
|
||||
.grid-auto {
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
}
|
||||
|
||||
/* Flex utilities */
|
||||
.flex {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.flex-between {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
.gap-4 { gap: 4px; }
|
||||
.gap-6 { gap: 6px; }
|
||||
.gap-8 { gap: 8px; }
|
||||
.gap-12 { gap: 12px; }
|
||||
|
||||
/* Tabs - underline style */
|
||||
.tabs {
|
||||
display: flex;
|
||||
gap: 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.tab {
|
||||
padding: 8px 12px;
|
||||
font-size: var(--text-sm);
|
||||
font-weight: 500;
|
||||
color: var(--text-tertiary);
|
||||
cursor: pointer;
|
||||
border-bottom: 2px solid transparent;
|
||||
margin-bottom: -1px;
|
||||
transition: color 150ms ease;
|
||||
}
|
||||
|
||||
.tab:hover {
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.tab.active {
|
||||
color: var(--text);
|
||||
border-bottom-color: var(--text);
|
||||
}
|
||||
|
||||
/* Empty state */
|
||||
.empty {
|
||||
padding: 24px;
|
||||
text-align: center;
|
||||
color: var(--text-tertiary);
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
/* Actions row */
|
||||
.actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
margin-top: 12px;
|
||||
}
|
||||
|
||||
/* Collapsible details */
|
||||
details summary {
|
||||
cursor: pointer;
|
||||
padding: 8px 0;
|
||||
font-size: var(--text-sm);
|
||||
color: var(--text-secondary);
|
||||
list-style: none;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
details summary::-webkit-details-marker {
|
||||
display: none;
|
||||
}
|
||||
|
||||
details summary::before {
|
||||
content: '▶';
|
||||
font-size: 8px;
|
||||
transition: transform 150ms ease;
|
||||
}
|
||||
|
||||
details[open] summary::before {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
|
||||
details summary:hover {
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.details-content {
|
||||
padding: 8px 0 8px 14px;
|
||||
}
|
||||
|
||||
/* Alert/Banner - slim */
|
||||
.banner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 8px 12px;
|
||||
font-size: var(--text-sm);
|
||||
border-radius: 4px;
|
||||
background: rgba(0, 112, 243, 0.1);
|
||||
border: 1px solid rgba(0, 112, 243, 0.2);
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.banner.success {
|
||||
background: rgba(12, 206, 107, 0.1);
|
||||
border-color: rgba(12, 206, 107, 0.2);
|
||||
}
|
||||
|
||||
.banner.warning {
|
||||
background: rgba(245, 166, 35, 0.1);
|
||||
border-color: rgba(245, 166, 35, 0.2);
|
||||
}
|
||||
|
||||
.banner.error {
|
||||
background: rgba(238, 0, 0, 0.1);
|
||||
border-color: rgba(238, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
/* Scrollbar - minimal */
|
||||
::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
height: 6px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: var(--border);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--border-hover);
|
||||
}
|
||||
|
||||
/* Text utilities */
|
||||
.text-xs { font-size: var(--text-xs); }
|
||||
.text-sm { font-size: var(--text-sm); }
|
||||
.text-base { font-size: var(--text-base); }
|
||||
.text-lg { font-size: var(--text-lg); }
|
||||
.text-secondary { color: var(--text-secondary); }
|
||||
.text-tertiary { color: var(--text-tertiary); }
|
||||
`;
|
||||
|
||||
/**
|
||||
* Format bytes to human readable string
|
||||
*/
|
||||
export function formatBytes(bytes: number): string {
|
||||
if (bytes === 0) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Format uptime seconds to human readable string
|
||||
*/
|
||||
export function formatUptime(seconds: number): string {
|
||||
const days = Math.floor(seconds / 86400);
|
||||
const hours = Math.floor((seconds % 86400) / 3600);
|
||||
const mins = Math.floor((seconds % 3600) / 60);
|
||||
if (days > 0) return `${days}d ${hours}h ${mins}m`;
|
||||
if (hours > 0) return `${hours}h ${mins}m`;
|
||||
return `${mins}m`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format age in hours to human readable string
|
||||
*/
|
||||
export function formatAge(hours: number): string {
|
||||
if (hours < 1) return `${Math.round(hours * 60)}m ago`;
|
||||
if (hours < 24) return `${Math.round(hours)}h ago`;
|
||||
return `${Math.round(hours / 24)}d ago`;
|
||||
}
|
||||
455
ecoos_daemon/vdagent/eco-vdagent.py
Normal file
455
ecoos_daemon/vdagent/eco-vdagent.py
Normal file
@@ -0,0 +1,455 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
EcoOS Wayland Display Agent (eco-vdagent)
|
||||
|
||||
A Wayland-native replacement for spice-vdagent that uses swaymsg/wlr-output-management
|
||||
instead of xrandr to configure displays.
|
||||
|
||||
Listens on the SPICE virtio-serial port for VD_AGENT_MONITORS_CONFIG messages
|
||||
and applies the configuration to Sway outputs.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import struct
|
||||
import subprocess
|
||||
import json
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - eco-vdagent - %(levelname)s - %(message)s'
|
||||
)
|
||||
log = logging.getLogger('eco-vdagent')
|
||||
|
||||
# SPICE VDAgent Protocol Constants
|
||||
VD_AGENT_PROTOCOL = 1
|
||||
|
||||
# Message types
|
||||
VD_AGENT_MOUSE_STATE = 1
|
||||
VD_AGENT_MONITORS_CONFIG = 2
|
||||
VD_AGENT_REPLY = 3
|
||||
VD_AGENT_CLIPBOARD = 4
|
||||
VD_AGENT_DISPLAY_CONFIG = 5
|
||||
VD_AGENT_ANNOUNCE_CAPABILITIES = 6
|
||||
VD_AGENT_CLIPBOARD_GRAB = 7
|
||||
VD_AGENT_CLIPBOARD_REQUEST = 8
|
||||
VD_AGENT_CLIPBOARD_RELEASE = 9
|
||||
VD_AGENT_FILE_XFER_START = 10
|
||||
VD_AGENT_FILE_XFER_STATUS = 11
|
||||
VD_AGENT_FILE_XFER_DATA = 12
|
||||
VD_AGENT_CLIENT_DISCONNECTED = 13
|
||||
VD_AGENT_MAX_CLIPBOARD = 14
|
||||
VD_AGENT_AUDIO_VOLUME_SYNC = 15
|
||||
VD_AGENT_GRAPHICS_DEVICE_INFO = 16
|
||||
|
||||
# Reply error codes
|
||||
VD_AGENT_SUCCESS = 1
|
||||
VD_AGENT_ERROR = 2
|
||||
|
||||
# Capability bits
|
||||
VD_AGENT_CAP_MOUSE_STATE = 0
|
||||
VD_AGENT_CAP_MONITORS_CONFIG = 1
|
||||
VD_AGENT_CAP_REPLY = 2
|
||||
VD_AGENT_CAP_CLIPBOARD = 3
|
||||
VD_AGENT_CAP_DISPLAY_CONFIG = 4
|
||||
VD_AGENT_CAP_CLIPBOARD_BY_DEMAND = 5
|
||||
VD_AGENT_CAP_CLIPBOARD_SELECTION = 6
|
||||
VD_AGENT_CAP_SPARSE_MONITORS_CONFIG = 7
|
||||
VD_AGENT_CAP_GUEST_LINEEND_LF = 8
|
||||
VD_AGENT_CAP_GUEST_LINEEND_CRLF = 9
|
||||
VD_AGENT_CAP_MAX_CLIPBOARD = 10
|
||||
VD_AGENT_CAP_AUDIO_VOLUME_SYNC = 11
|
||||
VD_AGENT_CAP_MONITORS_CONFIG_POSITION = 12
|
||||
VD_AGENT_CAP_FILE_XFER_DISABLED = 13
|
||||
VD_AGENT_CAP_FILE_XFER_DETAILED_ERRORS = 14
|
||||
VD_AGENT_CAP_GRAPHICS_DEVICE_INFO = 15
|
||||
VD_AGENT_CAP_CLIPBOARD_NO_RELEASE_ON_REGRAB = 16
|
||||
VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL = 17
|
||||
|
||||
# Virtio serial port path
|
||||
VIRTIO_PORT = '/dev/virtio-ports/com.redhat.spice.0'
|
||||
|
||||
# VDI Chunk header: port(4) + size(4) = 8 bytes
|
||||
VDI_CHUNK_HEADER_SIZE = 8
|
||||
VDI_CHUNK_HEADER_FMT = '<II' # port, size
|
||||
|
||||
# VDI Port constants
|
||||
VDP_CLIENT_PORT = 1
|
||||
VDP_SERVER_PORT = 2
|
||||
|
||||
# VDAgentMessage header: protocol(4) + type(4) + opaque(8) + size(4) = 20 bytes
|
||||
VDAGENT_MSG_HEADER_SIZE = 20
|
||||
VDAGENT_MSG_HEADER_FMT = '<IIQI' # little-endian: uint32, uint32, uint64, uint32
|
||||
|
||||
# VDAgentMonitorsConfig header: num_of_monitors(4) + flags(4) = 8 bytes
|
||||
MONITORS_CONFIG_HEADER_SIZE = 8
|
||||
MONITORS_CONFIG_HEADER_FMT = '<II'
|
||||
|
||||
# VDAgentMonConfig: height(4) + width(4) + depth(4) + x(4) + y(4) = 20 bytes
|
||||
MON_CONFIG_SIZE = 20
|
||||
MON_CONFIG_FMT = '<IIIii' # height, width, depth, x, y (x,y are signed)
|
||||
|
||||
|
||||
class EcoVDAgent:
|
||||
def __init__(self):
|
||||
self.port_fd = None
|
||||
self.running = True
|
||||
self.sway_socket = None
|
||||
|
||||
def find_sway_socket(self):
|
||||
"""Find the Sway IPC socket"""
|
||||
# Check environment first
|
||||
if 'SWAYSOCK' in os.environ:
|
||||
return os.environ['SWAYSOCK']
|
||||
|
||||
# Search common locations
|
||||
runtime_dir = os.environ.get('XDG_RUNTIME_DIR', '/run/user/1000')
|
||||
|
||||
# Try to find sway socket - check fixed path first, then glob patterns
|
||||
import glob
|
||||
|
||||
# Check for fixed socket path first (set by eco-daemon)
|
||||
fixed_socket = f'{runtime_dir}/sway-ipc.sock'
|
||||
if os.path.exists(fixed_socket):
|
||||
return fixed_socket
|
||||
|
||||
# Fall back to glob patterns for standard Sway socket naming
|
||||
for pattern in [f'{runtime_dir}/sway-ipc.*.sock', '/run/user/*/sway-ipc.*.sock']:
|
||||
sockets = glob.glob(pattern)
|
||||
if sockets:
|
||||
return sockets[0]
|
||||
|
||||
return None
|
||||
|
||||
def run_swaymsg(self, *args):
|
||||
"""Run swaymsg command"""
|
||||
cmd = ['swaymsg']
|
||||
if self.sway_socket:
|
||||
cmd.extend(['-s', self.sway_socket])
|
||||
cmd.extend(args)
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
|
||||
if result.returncode != 0:
|
||||
log.warning(f"swaymsg failed: {result.stderr}")
|
||||
return result.returncode == 0, result.stdout
|
||||
except Exception as e:
|
||||
log.error(f"Failed to run swaymsg: {e}")
|
||||
return False, ""
|
||||
|
||||
def get_outputs(self):
|
||||
"""Get current Sway outputs"""
|
||||
success, output = self.run_swaymsg('-t', 'get_outputs', '-r')
|
||||
if success:
|
||||
try:
|
||||
return json.loads(output)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return []
|
||||
|
||||
def configure_output(self, name, width, height, x, y, enable=True):
|
||||
"""Configure a Sway output"""
|
||||
if enable:
|
||||
# Try to enable and position the output
|
||||
# First, try setting mode
|
||||
mode_cmd = f'output {name} mode {width}x{height} position {x} {y} enable'
|
||||
success, _ = self.run_swaymsg(mode_cmd)
|
||||
if not success:
|
||||
# Try without explicit mode (use preferred)
|
||||
pos_cmd = f'output {name} position {x} {y} enable'
|
||||
success, _ = self.run_swaymsg(pos_cmd)
|
||||
return success
|
||||
else:
|
||||
return self.run_swaymsg(f'output {name} disable')[0]
|
||||
|
||||
def apply_monitors_config(self, monitors):
|
||||
"""Apply monitor configuration to Sway outputs"""
|
||||
log.info(f"Applying configuration for {len(monitors)} monitors")
|
||||
|
||||
# Get current outputs
|
||||
outputs = self.get_outputs()
|
||||
output_names = [o.get('name') for o in outputs]
|
||||
log.info(f"Available outputs: {output_names}")
|
||||
|
||||
# Sort monitors by x position to match with outputs
|
||||
monitors_sorted = sorted(enumerate(monitors), key=lambda m: m[1]['x'])
|
||||
|
||||
# Match monitors to outputs
|
||||
for i, (mon_idx, mon) in enumerate(monitors_sorted):
|
||||
if i < len(output_names):
|
||||
name = output_names[i]
|
||||
log.info(f"Configuring {name}: {mon['width']}x{mon['height']} at ({mon['x']}, {mon['y']})")
|
||||
self.configure_output(
|
||||
name,
|
||||
mon['width'],
|
||||
mon['height'],
|
||||
mon['x'],
|
||||
mon['y'],
|
||||
enable=True
|
||||
)
|
||||
else:
|
||||
log.warning(f"No output available for monitor {mon_idx}")
|
||||
|
||||
# Disable extra outputs
|
||||
for i in range(len(monitors), len(output_names)):
|
||||
name = output_names[i]
|
||||
log.info(f"Disabling unused output: {name}")
|
||||
self.configure_output(name, 0, 0, 0, 0, enable=False)
|
||||
|
||||
def parse_monitors_config(self, data):
|
||||
"""Parse VD_AGENT_MONITORS_CONFIG message"""
|
||||
if len(data) < MONITORS_CONFIG_HEADER_SIZE:
|
||||
log.error("Monitors config data too short")
|
||||
return None
|
||||
|
||||
num_monitors, flags = struct.unpack(MONITORS_CONFIG_HEADER_FMT, data[:MONITORS_CONFIG_HEADER_SIZE])
|
||||
log.info(f"Monitors config: {num_monitors} monitors, flags={flags}")
|
||||
|
||||
monitors = []
|
||||
offset = MONITORS_CONFIG_HEADER_SIZE
|
||||
|
||||
for i in range(num_monitors):
|
||||
if offset + MON_CONFIG_SIZE > len(data):
|
||||
log.error(f"Truncated monitor config at index {i}")
|
||||
break
|
||||
|
||||
height, width, depth, x, y = struct.unpack(
|
||||
MON_CONFIG_FMT,
|
||||
data[offset:offset + MON_CONFIG_SIZE]
|
||||
)
|
||||
|
||||
monitors.append({
|
||||
'width': width,
|
||||
'height': height,
|
||||
'depth': depth,
|
||||
'x': x,
|
||||
'y': y
|
||||
})
|
||||
log.info(f" Monitor {i}: {width}x{height}+{x}+{y} depth={depth}")
|
||||
offset += MON_CONFIG_SIZE
|
||||
|
||||
return monitors
|
||||
|
||||
def send_reply(self, msg_type, error_code):
|
||||
"""Send VD_AGENT_REPLY message"""
|
||||
# Reply data: type(4) + error(4) = 8 bytes
|
||||
reply_data = struct.pack('<II', msg_type, error_code)
|
||||
|
||||
if self.send_message(VD_AGENT_REPLY, reply_data):
|
||||
log.debug(f"Sent reply for type {msg_type}: {'success' if error_code == VD_AGENT_SUCCESS else 'error'}")
|
||||
else:
|
||||
log.error(f"Failed to send reply for type {msg_type}")
|
||||
|
||||
def send_message(self, msg_type, data):
|
||||
"""Send a VDAgent message with proper chunk header"""
|
||||
if not self.port_fd:
|
||||
return False
|
||||
|
||||
# Build VDAgentMessage header
|
||||
msg_header = struct.pack(
|
||||
VDAGENT_MSG_HEADER_FMT,
|
||||
VD_AGENT_PROTOCOL,
|
||||
msg_type,
|
||||
0, # opaque
|
||||
len(data)
|
||||
)
|
||||
|
||||
# Full message = header + data
|
||||
full_msg = msg_header + data
|
||||
|
||||
# Build VDI chunk header (port=SERVER, size=message size)
|
||||
chunk_header = struct.pack(
|
||||
VDI_CHUNK_HEADER_FMT,
|
||||
VDP_SERVER_PORT,
|
||||
len(full_msg)
|
||||
)
|
||||
|
||||
# Retry writes with EAGAIN handling (non-blocking fd)
|
||||
message = chunk_header + full_msg
|
||||
retries = 10
|
||||
while retries > 0:
|
||||
try:
|
||||
os.write(self.port_fd, message)
|
||||
return True
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN - resource temporarily unavailable
|
||||
retries -= 1
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
log.error(f"Failed to send message type {msg_type}: {e}")
|
||||
return False
|
||||
log.error(f"Failed to send message type {msg_type}: EAGAIN after retries")
|
||||
return False
|
||||
|
||||
def announce_capabilities(self):
|
||||
"""Send VD_AGENT_ANNOUNCE_CAPABILITIES to register with SPICE server"""
|
||||
# Build capability bits - we support monitors config
|
||||
caps = 0
|
||||
caps |= (1 << VD_AGENT_CAP_MONITORS_CONFIG)
|
||||
caps |= (1 << VD_AGENT_CAP_REPLY)
|
||||
caps |= (1 << VD_AGENT_CAP_SPARSE_MONITORS_CONFIG)
|
||||
caps |= (1 << VD_AGENT_CAP_MONITORS_CONFIG_POSITION)
|
||||
|
||||
# VDAgentAnnounceCapabilities: request(4) + caps(4) = 8 bytes
|
||||
# request=1 means we want the server to send us its capabilities
|
||||
announce_data = struct.pack('<II', 1, caps)
|
||||
|
||||
if self.send_message(VD_AGENT_ANNOUNCE_CAPABILITIES, announce_data):
|
||||
log.info("Announced capabilities to SPICE server")
|
||||
else:
|
||||
log.error("Failed to announce capabilities")
|
||||
|
||||
def handle_message(self, msg_type, data):
|
||||
"""Handle a VDAgent message"""
|
||||
if msg_type == VD_AGENT_MONITORS_CONFIG:
|
||||
log.info("Received VD_AGENT_MONITORS_CONFIG")
|
||||
monitors = self.parse_monitors_config(data)
|
||||
if monitors:
|
||||
self.apply_monitors_config(monitors)
|
||||
self.send_reply(VD_AGENT_MONITORS_CONFIG, VD_AGENT_SUCCESS)
|
||||
else:
|
||||
self.send_reply(VD_AGENT_MONITORS_CONFIG, VD_AGENT_ERROR)
|
||||
|
||||
elif msg_type == VD_AGENT_ANNOUNCE_CAPABILITIES:
|
||||
log.info("Received VD_AGENT_ANNOUNCE_CAPABILITIES")
|
||||
# We could respond with our capabilities here
|
||||
# For now, just acknowledge
|
||||
|
||||
elif msg_type == VD_AGENT_DISPLAY_CONFIG:
|
||||
log.info("Received VD_AGENT_DISPLAY_CONFIG")
|
||||
# Display config for disabling client display changes
|
||||
|
||||
elif msg_type == VD_AGENT_CLIENT_DISCONNECTED:
|
||||
log.info("Client disconnected")
|
||||
|
||||
else:
|
||||
log.debug(f"Unhandled message type: {msg_type}")
|
||||
|
||||
def read_message(self):
|
||||
"""Read a single VDAgent message from the port (with chunk header)"""
|
||||
# Read VDI chunk header first
|
||||
try:
|
||||
chunk_header_data = os.read(self.port_fd, VDI_CHUNK_HEADER_SIZE)
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
return None
|
||||
raise
|
||||
|
||||
if len(chunk_header_data) < VDI_CHUNK_HEADER_SIZE:
|
||||
if len(chunk_header_data) == 0:
|
||||
return None
|
||||
log.warning(f"Short chunk header read: {len(chunk_header_data)} bytes")
|
||||
return None
|
||||
|
||||
port, chunk_size = struct.unpack(VDI_CHUNK_HEADER_FMT, chunk_header_data)
|
||||
log.debug(f"Chunk header: port={port}, size={chunk_size}")
|
||||
|
||||
if chunk_size < VDAGENT_MSG_HEADER_SIZE:
|
||||
log.warning(f"Chunk size too small: {chunk_size}")
|
||||
return None
|
||||
|
||||
# Read VDAgent message header
|
||||
try:
|
||||
header_data = os.read(self.port_fd, VDAGENT_MSG_HEADER_SIZE)
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
return None
|
||||
raise
|
||||
|
||||
if len(header_data) < VDAGENT_MSG_HEADER_SIZE:
|
||||
log.warning(f"Short message header read: {len(header_data)} bytes")
|
||||
return None
|
||||
|
||||
protocol, msg_type, opaque, size = struct.unpack(VDAGENT_MSG_HEADER_FMT, header_data)
|
||||
|
||||
if protocol != VD_AGENT_PROTOCOL:
|
||||
log.warning(f"Unknown protocol: {protocol}")
|
||||
return None
|
||||
|
||||
# Read message data
|
||||
data = b''
|
||||
while len(data) < size:
|
||||
try:
|
||||
chunk = os.read(self.port_fd, size - len(data))
|
||||
if not chunk:
|
||||
break
|
||||
data += chunk
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
time.sleep(0.01)
|
||||
continue
|
||||
raise
|
||||
|
||||
return msg_type, data
|
||||
|
||||
def signal_handler(self, signum, frame):
|
||||
"""Handle shutdown signals"""
|
||||
log.info(f"Received signal {signum}, shutting down...")
|
||||
self.running = False
|
||||
|
||||
def run(self):
|
||||
"""Main loop"""
|
||||
# Set up signal handlers
|
||||
signal.signal(signal.SIGTERM, self.signal_handler)
|
||||
signal.signal(signal.SIGINT, self.signal_handler)
|
||||
|
||||
# Find Sway socket
|
||||
self.sway_socket = self.find_sway_socket()
|
||||
if self.sway_socket:
|
||||
log.info(f"Using Sway socket: {self.sway_socket}")
|
||||
else:
|
||||
log.warning("No Sway socket found, will retry...")
|
||||
|
||||
# Wait for virtio port
|
||||
log.info(f"Waiting for virtio port: {VIRTIO_PORT}")
|
||||
while self.running and not Path(VIRTIO_PORT).exists():
|
||||
time.sleep(1)
|
||||
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
log.info("Opening virtio port...")
|
||||
try:
|
||||
self.port_fd = os.open(VIRTIO_PORT, os.O_RDWR | os.O_NONBLOCK)
|
||||
except OSError as e:
|
||||
log.error(f"Failed to open virtio port: {e}")
|
||||
return
|
||||
|
||||
log.info("eco-vdagent started, announcing capabilities...")
|
||||
|
||||
# Announce our capabilities to the SPICE server
|
||||
self.announce_capabilities()
|
||||
|
||||
log.info("Listening for SPICE agent messages...")
|
||||
|
||||
# Main loop
|
||||
while self.running:
|
||||
try:
|
||||
# Try to find Sway socket if not found yet
|
||||
if not self.sway_socket:
|
||||
self.sway_socket = self.find_sway_socket()
|
||||
|
||||
result = self.read_message()
|
||||
if result:
|
||||
msg_type, data = result
|
||||
self.handle_message(msg_type, data)
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
except Exception as e:
|
||||
log.error(f"Error in main loop: {e}")
|
||||
time.sleep(1)
|
||||
|
||||
if self.port_fd:
|
||||
os.close(self.port_fd)
|
||||
|
||||
log.info("eco-vdagent stopped")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
agent = EcoVDAgent()
|
||||
agent.run()
|
||||
@@ -1,33 +1,52 @@
|
||||
# EcoOS ISO Builder
|
||||
# Build from eco_os directory:
|
||||
# docker build -t ecoos-builder -f isobuild/Dockerfile .
|
||||
# docker build --build-arg TARGET_ARCH=amd64 -t ecoos-builder -f isobuild/Dockerfile .
|
||||
# docker run --privileged -v $(pwd)/isobuild/output:/output ecoos-builder
|
||||
#
|
||||
# Supported architectures: amd64, arm64, rpi
|
||||
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG TARGET_ARCH=amd64
|
||||
ENV TARGET_ARCH=${TARGET_ARCH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
# Install common build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
live-build \
|
||||
debootstrap \
|
||||
xorriso \
|
||||
squashfs-tools \
|
||||
grub-efi-amd64-bin \
|
||||
grub-efi-amd64-signed \
|
||||
grub-pc-bin \
|
||||
shim-signed \
|
||||
mtools \
|
||||
dosfstools \
|
||||
syslinux-utils \
|
||||
syslinux \
|
||||
syslinux-common \
|
||||
isolinux \
|
||||
curl \
|
||||
unzip \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& ln -sf /usr/bin/isohybrid /usr/local/bin/isohybrid 2>/dev/null || true
|
||||
parted \
|
||||
fdisk \
|
||||
e2fsprogs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install architecture-specific packages
|
||||
RUN apt-get update && \
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then \
|
||||
apt-get install -y \
|
||||
grub-efi-amd64-bin \
|
||||
grub-efi-amd64-signed \
|
||||
grub-pc-bin \
|
||||
shim-signed \
|
||||
syslinux-utils \
|
||||
syslinux \
|
||||
syslinux-common \
|
||||
isolinux; \
|
||||
elif [ "$TARGET_ARCH" = "arm64" ]; then \
|
||||
apt-get install -y \
|
||||
grub-efi-arm64-bin \
|
||||
grub-efi-arm64-signed; \
|
||||
elif [ "$TARGET_ARCH" = "rpi" ]; then \
|
||||
apt-get install -y \
|
||||
grub-efi-arm64-bin; \
|
||||
fi && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Deno
|
||||
RUN curl -fsSL https://deno.land/install.sh | DENO_INSTALL=/usr/local sh
|
||||
@@ -45,16 +64,29 @@ COPY isobuild/config/hooks/ /build/hooks/
|
||||
# Copy daemon source (for bundling)
|
||||
COPY ecoos_daemon/ /daemon/
|
||||
|
||||
# Bundle the daemon
|
||||
RUN cd /daemon && deno compile --allow-all --output /build/daemon-bundle/eco-daemon mod.ts
|
||||
# Bundle the daemon - cross-compile for target architecture
|
||||
RUN cd /daemon && \
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then \
|
||||
deno compile --allow-all --target x86_64-unknown-linux-gnu --output /build/daemon-bundle/eco-daemon mod.ts; \
|
||||
else \
|
||||
deno compile --allow-all --target aarch64-unknown-linux-gnu --output /build/daemon-bundle/eco-daemon mod.ts; \
|
||||
fi
|
||||
|
||||
# Download Chromium during Docker build (network works here, not in chroot hooks)
|
||||
RUN echo "Downloading Chromium from official snapshots..." && \
|
||||
# Note: ARM64 Chromium snapshots may be less reliable, fallback to known version
|
||||
RUN echo "Downloading Chromium for $TARGET_ARCH..." && \
|
||||
cd /tmp && \
|
||||
LATEST=$(curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2FLAST_CHANGE?alt=media" 2>/dev/null || echo "1368529") && \
|
||||
echo "Using Chromium build: $LATEST" && \
|
||||
curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F${LATEST}%2Fchrome-linux.zip?alt=media" -o chromium.zip || \
|
||||
curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F1368529%2Fchrome-linux.zip?alt=media" -o chromium.zip && \
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then \
|
||||
PLATFORM="Linux_x64"; \
|
||||
FALLBACK_VERSION="1368529"; \
|
||||
else \
|
||||
PLATFORM="Linux_ARM64"; \
|
||||
FALLBACK_VERSION="1368529"; \
|
||||
fi && \
|
||||
LATEST=$(curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/${PLATFORM}%2FLAST_CHANGE?alt=media" 2>/dev/null || echo "$FALLBACK_VERSION") && \
|
||||
echo "Using Chromium build: $LATEST for platform $PLATFORM" && \
|
||||
curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/${PLATFORM}%2F${LATEST}%2Fchrome-linux.zip?alt=media" -o chromium.zip || \
|
||||
curl -fsSL "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/${PLATFORM}%2F${FALLBACK_VERSION}%2Fchrome-linux.zip?alt=media" -o chromium.zip && \
|
||||
mkdir -p /build/chromium && \
|
||||
unzip -q chromium.zip -d /tmp && \
|
||||
mv /tmp/chrome-linux/* /build/chromium/ && \
|
||||
@@ -71,21 +103,45 @@ RUN echo '#!/bin/sh' > /usr/local/bin/isohybrid && \
|
||||
echo 'exit 0' >> /usr/local/bin/isohybrid && \
|
||||
chmod +x /usr/local/bin/isohybrid
|
||||
|
||||
# Build script
|
||||
# Build script - parameterized for architecture
|
||||
COPY <<'EOF' /build/docker-build.sh
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
export PATH="/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin:$PATH"
|
||||
|
||||
# Get architecture from environment (passed from docker run -e)
|
||||
TARGET_ARCH="${TARGET_ARCH:-amd64}"
|
||||
|
||||
echo "=== EcoOS ISO Builder (Docker) ==="
|
||||
echo "Target architecture: $TARGET_ARCH"
|
||||
|
||||
cd /build
|
||||
|
||||
# Initialize live-build - UEFI only (no syslinux/BIOS)
|
||||
# Using German mirror for faster/more stable downloads
|
||||
# Determine live-build architecture and image format
|
||||
case "$TARGET_ARCH" in
|
||||
amd64)
|
||||
LB_ARCH="amd64"
|
||||
IMAGE_FORMAT="iso-hybrid"
|
||||
BOOTLOADER_OPT="--bootloader grub-efi"
|
||||
;;
|
||||
arm64)
|
||||
LB_ARCH="arm64"
|
||||
IMAGE_FORMAT="iso-hybrid"
|
||||
BOOTLOADER_OPT="--bootloader grub-efi"
|
||||
;;
|
||||
rpi)
|
||||
LB_ARCH="arm64"
|
||||
IMAGE_FORMAT="hdd"
|
||||
BOOTLOADER_OPT="" # RPi uses native bootloader
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Live-build arch: $LB_ARCH, format: $IMAGE_FORMAT"
|
||||
|
||||
# Initialize live-build
|
||||
lb config \
|
||||
--architectures amd64 \
|
||||
--architectures $LB_ARCH \
|
||||
--distribution noble \
|
||||
--archive-areas "main restricted universe multiverse" \
|
||||
--mirror-bootstrap "http://ftp.halifax.rwth-aachen.de/ubuntu/" \
|
||||
@@ -93,16 +149,33 @@ lb config \
|
||||
--mirror-chroot-security "http://ftp.halifax.rwth-aachen.de/ubuntu/" \
|
||||
--mirror-binary "http://ftp.halifax.rwth-aachen.de/ubuntu/" \
|
||||
--mirror-binary-security "http://ftp.halifax.rwth-aachen.de/ubuntu/" \
|
||||
--binary-images iso-hybrid \
|
||||
--binary-images $IMAGE_FORMAT \
|
||||
--debian-installer false \
|
||||
--memtest none \
|
||||
--bootloader grub-efi \
|
||||
$BOOTLOADER_OPT \
|
||||
--iso-application "EcoOS" \
|
||||
--iso-publisher "EcoBridge" \
|
||||
--iso-volume "EcoOS"
|
||||
|
||||
# Copy package lists
|
||||
cp /build/config/live-build/package-lists/*.list.chroot config/package-lists/
|
||||
# Copy common package lists (excluding architecture-specific ones)
|
||||
for f in /build/config/live-build/package-lists/*.list.chroot; do
|
||||
filename=$(basename "$f")
|
||||
# Skip architecture-specific files (base-amd64, base-arm64, base-rpi)
|
||||
case "$filename" in
|
||||
base-amd64.list.chroot|base-arm64.list.chroot|base-rpi.list.chroot)
|
||||
echo "Skipping arch-specific list: $filename"
|
||||
;;
|
||||
*)
|
||||
cp "$f" config/package-lists/
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Append architecture-specific packages to base.list.chroot
|
||||
if [ -f "/build/config/live-build/package-lists/base-${TARGET_ARCH}.list.chroot" ]; then
|
||||
echo "Adding architecture-specific packages for $TARGET_ARCH..."
|
||||
cat "/build/config/live-build/package-lists/base-${TARGET_ARCH}.list.chroot" >> config/package-lists/base.list.chroot
|
||||
fi
|
||||
|
||||
# Prepare includes.chroot
|
||||
mkdir -p config/includes.chroot/opt/eco/bin
|
||||
@@ -154,26 +227,28 @@ mkdir -p config/includes.binary/autoinstall
|
||||
cp /build/config/autoinstall/user-data config/includes.binary/autoinstall/
|
||||
touch config/includes.binary/autoinstall/meta-data
|
||||
|
||||
# Prepare EFI boot files in includes.binary
|
||||
echo "Preparing EFI boot structure..."
|
||||
mkdir -p config/includes.binary/EFI/BOOT
|
||||
mkdir -p config/includes.binary/boot/grub
|
||||
# Architecture-specific EFI/boot setup
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
# AMD64 EFI boot setup
|
||||
echo "Preparing AMD64 EFI boot structure..."
|
||||
mkdir -p config/includes.binary/EFI/BOOT
|
||||
mkdir -p config/includes.binary/boot/grub
|
||||
|
||||
# Copy signed EFI files from host (installed in Docker image)
|
||||
cp /usr/lib/shim/shimx64.efi.signed.latest config/includes.binary/EFI/BOOT/BOOTX64.EFI || \
|
||||
cp /usr/lib/shim/shimx64.efi.signed config/includes.binary/EFI/BOOT/BOOTX64.EFI || \
|
||||
cp /usr/lib/shim/shimx64.efi config/includes.binary/EFI/BOOT/BOOTX64.EFI || true
|
||||
# Copy signed EFI files from host (installed in Docker image)
|
||||
cp /usr/lib/shim/shimx64.efi.signed.latest config/includes.binary/EFI/BOOT/BOOTX64.EFI || \
|
||||
cp /usr/lib/shim/shimx64.efi.signed config/includes.binary/EFI/BOOT/BOOTX64.EFI || \
|
||||
cp /usr/lib/shim/shimx64.efi config/includes.binary/EFI/BOOT/BOOTX64.EFI || true
|
||||
|
||||
cp /usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed config/includes.binary/EFI/BOOT/grubx64.efi || \
|
||||
cp /usr/lib/grub/x86_64-efi/grubx64.efi config/includes.binary/EFI/BOOT/grubx64.efi || true
|
||||
cp /usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed config/includes.binary/EFI/BOOT/grubx64.efi || \
|
||||
cp /usr/lib/grub/x86_64-efi/grubx64.efi config/includes.binary/EFI/BOOT/grubx64.efi || true
|
||||
|
||||
# Also provide mmx64.efi for some UEFI implementations
|
||||
if [ -f config/includes.binary/EFI/BOOT/grubx64.efi ]; then
|
||||
cp config/includes.binary/EFI/BOOT/grubx64.efi config/includes.binary/EFI/BOOT/mmx64.efi
|
||||
fi
|
||||
# Also provide mmx64.efi for some UEFI implementations
|
||||
if [ -f config/includes.binary/EFI/BOOT/grubx64.efi ]; then
|
||||
cp config/includes.binary/EFI/BOOT/grubx64.efi config/includes.binary/EFI/BOOT/mmx64.efi
|
||||
fi
|
||||
|
||||
# Create grub.cfg for live boot with installer option
|
||||
cat > config/includes.binary/boot/grub/grub.cfg << 'GRUBCFG'
|
||||
# Create grub.cfg for live boot with installer option
|
||||
cat > config/includes.binary/boot/grub/grub.cfg << 'GRUBCFG'
|
||||
set default=0
|
||||
set timeout=10
|
||||
|
||||
@@ -198,82 +273,174 @@ menuentry "EcoOS Live (Safe Mode)" {
|
||||
}
|
||||
GRUBCFG
|
||||
|
||||
# Also put grub.cfg in EFI/BOOT for fallback
|
||||
cp config/includes.binary/boot/grub/grub.cfg config/includes.binary/EFI/BOOT/grub.cfg
|
||||
# Also put grub.cfg in EFI/BOOT for fallback
|
||||
cp config/includes.binary/boot/grub/grub.cfg config/includes.binary/EFI/BOOT/grub.cfg
|
||||
|
||||
# Build ISO - use individual lb stages to control the process
|
||||
lb bootstrap
|
||||
lb chroot
|
||||
elif [ "$TARGET_ARCH" = "arm64" ]; then
|
||||
# ARM64 EFI boot setup
|
||||
echo "Preparing ARM64 EFI boot structure..."
|
||||
mkdir -p config/includes.binary/EFI/BOOT
|
||||
mkdir -p config/includes.binary/boot/grub
|
||||
|
||||
# Try lb binary, but continue even if isohybrid fails
|
||||
lb binary || {
|
||||
echo "lb binary had errors, checking if ISO was created anyway..."
|
||||
if ls /build/*.iso 2>/dev/null; then
|
||||
echo "ISO exists despite errors, continuing..."
|
||||
else
|
||||
echo "No ISO found, build truly failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
# Copy ARM64 GRUB EFI
|
||||
cp /usr/lib/grub/arm64-efi-signed/grubaa64.efi.signed config/includes.binary/EFI/BOOT/BOOTAA64.EFI || \
|
||||
cp /usr/lib/grub/arm64-efi/grubaa64.efi config/includes.binary/EFI/BOOT/BOOTAA64.EFI || true
|
||||
|
||||
# Check if EFI was created properly
|
||||
echo "Checking binary directory for EFI..."
|
||||
ls -la binary/EFI/BOOT/ 2>/dev/null || echo "EFI/BOOT not found in binary dir"
|
||||
|
||||
# Find the ISO file
|
||||
echo "Searching for ISO file..."
|
||||
find /build -name "*.iso" -type f 2>/dev/null
|
||||
ls -la /build/*.iso 2>/dev/null || true
|
||||
|
||||
ISO_FILE=$(find /build -name "*.iso" -type f 2>/dev/null | head -1)
|
||||
if [ -z "$ISO_FILE" ]; then
|
||||
echo "ERROR: No ISO file found in build directory"
|
||||
echo "Listing /build contents:"
|
||||
ls -la /build/
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found ISO: $ISO_FILE"
|
||||
|
||||
# Always create proper EFI boot image and rebuild ISO
|
||||
echo "Creating UEFI-bootable ISO..."
|
||||
|
||||
# Extract ISO contents
|
||||
mkdir -p /tmp/iso_extract
|
||||
xorriso -osirrox on -indev "$ISO_FILE" -extract / /tmp/iso_extract
|
||||
|
||||
# Find the actual kernel and initrd names
|
||||
VMLINUZ=$(ls /tmp/iso_extract/casper/vmlinuz* 2>/dev/null | head -1 | xargs basename)
|
||||
INITRD=$(ls /tmp/iso_extract/casper/initrd* 2>/dev/null | head -1 | xargs basename)
|
||||
|
||||
echo "Found kernel: $VMLINUZ, initrd: $INITRD"
|
||||
|
||||
# Ensure EFI structure exists with proper files
|
||||
mkdir -p /tmp/iso_extract/EFI/BOOT
|
||||
mkdir -p /tmp/iso_extract/boot/grub
|
||||
|
||||
# Copy EFI files from host
|
||||
cp /usr/lib/shim/shimx64.efi.signed.latest /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || \
|
||||
cp /usr/lib/shim/shimx64.efi.signed /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || \
|
||||
cp /usr/lib/shim/shimx64.efi /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || true
|
||||
|
||||
cp /usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed /tmp/iso_extract/EFI/BOOT/grubx64.efi 2>/dev/null || \
|
||||
cp /usr/lib/grub/x86_64-efi/grubx64.efi /tmp/iso_extract/EFI/BOOT/grubx64.efi 2>/dev/null || true
|
||||
|
||||
# Copy mmx64.efi for secure boot compatibility
|
||||
if [ -f /tmp/iso_extract/EFI/BOOT/grubx64.efi ]; then
|
||||
cp /tmp/iso_extract/EFI/BOOT/grubx64.efi /tmp/iso_extract/EFI/BOOT/mmx64.efi
|
||||
fi
|
||||
|
||||
# Create grub.cfg with correct filenames and installer option
|
||||
cat > /tmp/iso_extract/boot/grub/grub.cfg << GRUBCFG2
|
||||
# Create grub.cfg for ARM64
|
||||
cat > config/includes.binary/boot/grub/grub.cfg << 'GRUBCFG'
|
||||
set default=0
|
||||
set timeout=10
|
||||
|
||||
insmod part_gpt
|
||||
insmod fat
|
||||
insmod efi_gop
|
||||
insmod efi_uga
|
||||
|
||||
menuentry "Install EcoOS (auto-selects in 10s)" {
|
||||
linux /casper/vmlinuz boot=casper noprompt quiet splash ecoos_install=1 ---
|
||||
initrd /casper/initrd
|
||||
}
|
||||
|
||||
menuentry "EcoOS Live (Try without installing)" {
|
||||
linux /casper/vmlinuz boot=casper noprompt quiet splash ---
|
||||
initrd /casper/initrd
|
||||
}
|
||||
|
||||
menuentry "EcoOS Live (Safe Mode)" {
|
||||
linux /casper/vmlinuz boot=casper noprompt nomodeset ---
|
||||
initrd /casper/initrd
|
||||
}
|
||||
GRUBCFG
|
||||
|
||||
cp config/includes.binary/boot/grub/grub.cfg config/includes.binary/EFI/BOOT/grub.cfg
|
||||
|
||||
elif [ "$TARGET_ARCH" = "rpi" ]; then
|
||||
# Raspberry Pi boot setup (native bootloader, no GRUB)
|
||||
echo "Preparing Raspberry Pi boot structure..."
|
||||
mkdir -p config/includes.binary/boot
|
||||
|
||||
# Create config.txt for Raspberry Pi
|
||||
cat > config/includes.binary/boot/config.txt << 'PICFG'
|
||||
# EcoOS Raspberry Pi Configuration
|
||||
# Supports Pi 3, 4, and 5
|
||||
|
||||
# Enable 64-bit mode
|
||||
arm_64bit=1
|
||||
|
||||
# Kernel and initrd
|
||||
kernel=vmlinuz
|
||||
initramfs initrd.img followkernel
|
||||
|
||||
# Enable serial console for debugging
|
||||
enable_uart=1
|
||||
|
||||
# GPU/display settings
|
||||
dtoverlay=vc4-kms-v3d
|
||||
gpu_mem=256
|
||||
|
||||
# USB and power settings (Pi 4/5)
|
||||
max_usb_current=1
|
||||
|
||||
# Audio
|
||||
dtparam=audio=on
|
||||
|
||||
# Camera/display interfaces
|
||||
camera_auto_detect=1
|
||||
display_auto_detect=1
|
||||
|
||||
# Pi 5 specific (ignored on older models)
|
||||
[pi5]
|
||||
dtoverlay=dwc2,dr_mode=host
|
||||
PICFG
|
||||
|
||||
# Create cmdline.txt
|
||||
cat > config/includes.binary/boot/cmdline.txt << 'CMDLINE'
|
||||
console=serial0,115200 console=tty1 root=LABEL=EcoOS rootfstype=ext4 fsck.repair=yes rootwait quiet splash
|
||||
CMDLINE
|
||||
fi
|
||||
|
||||
# Build - use individual lb stages to control the process
|
||||
echo "Running lb bootstrap..."
|
||||
lb bootstrap
|
||||
|
||||
echo "Running lb chroot..."
|
||||
lb chroot
|
||||
|
||||
# Try lb binary, but continue even if isohybrid fails
|
||||
echo "Running lb binary..."
|
||||
lb binary || {
|
||||
echo "lb binary had errors, checking if output was created anyway..."
|
||||
if ls /build/*.iso 2>/dev/null || ls /build/*.img 2>/dev/null; then
|
||||
echo "Output exists despite errors, continuing..."
|
||||
else
|
||||
echo "No output found, build truly failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Post-processing based on architecture
|
||||
if [ "$TARGET_ARCH" = "amd64" ] || [ "$TARGET_ARCH" = "arm64" ]; then
|
||||
# Find the ISO file
|
||||
echo "Searching for ISO file..."
|
||||
find /build -name "*.iso" -type f 2>/dev/null
|
||||
ls -la /build/*.iso 2>/dev/null || true
|
||||
|
||||
ISO_FILE=$(find /build -name "*.iso" -type f 2>/dev/null | head -1)
|
||||
if [ -z "$ISO_FILE" ]; then
|
||||
echo "ERROR: No ISO file found in build directory"
|
||||
ls -la /build/
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found ISO: $ISO_FILE"
|
||||
|
||||
# Rebuild ISO with proper EFI boot support
|
||||
echo "Creating UEFI-bootable ISO..."
|
||||
|
||||
# Extract ISO contents
|
||||
mkdir -p /tmp/iso_extract
|
||||
xorriso -osirrox on -indev "$ISO_FILE" -extract / /tmp/iso_extract
|
||||
|
||||
# Find the actual kernel and initrd names
|
||||
VMLINUZ=$(ls /tmp/iso_extract/casper/vmlinuz* 2>/dev/null | head -1 | xargs basename)
|
||||
INITRD=$(ls /tmp/iso_extract/casper/initrd* 2>/dev/null | head -1 | xargs basename)
|
||||
|
||||
echo "Found kernel: $VMLINUZ, initrd: $INITRD"
|
||||
|
||||
# Ensure EFI structure exists with proper files
|
||||
mkdir -p /tmp/iso_extract/EFI/BOOT
|
||||
mkdir -p /tmp/iso_extract/boot/grub
|
||||
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
# Copy AMD64 EFI files
|
||||
cp /usr/lib/shim/shimx64.efi.signed.latest /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || \
|
||||
cp /usr/lib/shim/shimx64.efi.signed /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || \
|
||||
cp /usr/lib/shim/shimx64.efi /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI 2>/dev/null || true
|
||||
|
||||
cp /usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed /tmp/iso_extract/EFI/BOOT/grubx64.efi 2>/dev/null || \
|
||||
cp /usr/lib/grub/x86_64-efi/grubx64.efi /tmp/iso_extract/EFI/BOOT/grubx64.efi 2>/dev/null || true
|
||||
|
||||
if [ -f /tmp/iso_extract/EFI/BOOT/grubx64.efi ]; then
|
||||
cp /tmp/iso_extract/EFI/BOOT/grubx64.efi /tmp/iso_extract/EFI/BOOT/mmx64.efi
|
||||
fi
|
||||
|
||||
EFI_BOOT_FILE="BOOTX64.EFI"
|
||||
else
|
||||
# Copy ARM64 EFI files
|
||||
cp /usr/lib/grub/arm64-efi-signed/grubaa64.efi.signed /tmp/iso_extract/EFI/BOOT/BOOTAA64.EFI 2>/dev/null || \
|
||||
cp /usr/lib/grub/arm64-efi/grubaa64.efi /tmp/iso_extract/EFI/BOOT/BOOTAA64.EFI 2>/dev/null || true
|
||||
|
||||
EFI_BOOT_FILE="BOOTAA64.EFI"
|
||||
fi
|
||||
|
||||
# Update grub.cfg with correct filenames
|
||||
cat > /tmp/iso_extract/boot/grub/grub.cfg << GRUBCFG2
|
||||
set default=0
|
||||
set timeout=10
|
||||
|
||||
insmod part_gpt
|
||||
insmod fat
|
||||
insmod efi_gop
|
||||
$([ "$TARGET_ARCH" = "amd64" ] && echo "insmod efi_uga")
|
||||
|
||||
menuentry "Install EcoOS (auto-selects in 10s)" {
|
||||
linux /casper/${VMLINUZ} boot=casper noprompt quiet splash ecoos_install=1 ---
|
||||
@@ -291,54 +458,86 @@ menuentry "EcoOS Live (Safe Mode)" {
|
||||
}
|
||||
GRUBCFG2
|
||||
|
||||
cp /tmp/iso_extract/boot/grub/grub.cfg /tmp/iso_extract/EFI/BOOT/grub.cfg
|
||||
cp /tmp/iso_extract/boot/grub/grub.cfg /tmp/iso_extract/EFI/BOOT/grub.cfg
|
||||
|
||||
# Create EFI boot image (FAT filesystem for UEFI El Torito boot)
|
||||
echo "Creating EFI boot image..."
|
||||
dd if=/dev/zero of=/tmp/efi.img bs=1M count=10
|
||||
mkfs.fat -F 12 /tmp/efi.img
|
||||
mmd -i /tmp/efi.img ::/EFI
|
||||
mmd -i /tmp/efi.img ::/EFI/BOOT
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/BOOTX64.EFI ::/EFI/BOOT/
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/grubx64.efi ::/EFI/BOOT/ 2>/dev/null || true
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/mmx64.efi ::/EFI/BOOT/ 2>/dev/null || true
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/grub.cfg ::/EFI/BOOT/
|
||||
# Create EFI boot image (FAT filesystem for UEFI El Torito boot)
|
||||
echo "Creating EFI boot image..."
|
||||
dd if=/dev/zero of=/tmp/efi.img bs=1M count=10
|
||||
mkfs.fat -F 12 /tmp/efi.img
|
||||
mmd -i /tmp/efi.img ::/EFI
|
||||
mmd -i /tmp/efi.img ::/EFI/BOOT
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/$EFI_BOOT_FILE ::/EFI/BOOT/
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/grubx64.efi ::/EFI/BOOT/ 2>/dev/null || true
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/mmx64.efi ::/EFI/BOOT/ 2>/dev/null || true
|
||||
fi
|
||||
mcopy -i /tmp/efi.img /tmp/iso_extract/EFI/BOOT/grub.cfg ::/EFI/BOOT/
|
||||
|
||||
# Rebuild ISO with EFI boot support (UEFI-only, no BIOS boot)
|
||||
echo "Rebuilding ISO with UEFI boot support..."
|
||||
xorriso -as mkisofs \
|
||||
-r -V "EcoOS" \
|
||||
-o /tmp/ecoos-efi.iso \
|
||||
-J -joliet-long \
|
||||
-eltorito-alt-boot \
|
||||
-e --interval:appended_partition_2:all:: \
|
||||
-no-emul-boot -isohybrid-gpt-basdat \
|
||||
-append_partition 2 0xef /tmp/efi.img \
|
||||
/tmp/iso_extract
|
||||
# Rebuild ISO with EFI boot support
|
||||
echo "Rebuilding ISO with UEFI boot support..."
|
||||
xorriso -as mkisofs \
|
||||
-r -V "EcoOS" \
|
||||
-o /tmp/ecoos-efi.iso \
|
||||
-J -joliet-long \
|
||||
-eltorito-alt-boot \
|
||||
-e --interval:appended_partition_2:all:: \
|
||||
-no-emul-boot -isohybrid-gpt-basdat \
|
||||
-append_partition 2 0xef /tmp/efi.img \
|
||||
/tmp/iso_extract
|
||||
|
||||
if [ -f /tmp/ecoos-efi.iso ]; then
|
||||
ISO_FILE=/tmp/ecoos-efi.iso
|
||||
echo "Created UEFI-bootable ISO: $ISO_FILE"
|
||||
else
|
||||
echo "ERROR: Failed to create EFI ISO"
|
||||
exit 1
|
||||
if [ -f /tmp/ecoos-efi.iso ]; then
|
||||
ISO_FILE=/tmp/ecoos-efi.iso
|
||||
echo "Created UEFI-bootable ISO: $ISO_FILE"
|
||||
else
|
||||
echo "ERROR: Failed to create EFI ISO"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf /tmp/iso_extract
|
||||
|
||||
# Determine output filename
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
OUTPUT_NAME="ecoos.iso"
|
||||
else
|
||||
OUTPUT_NAME="ecoos-arm64.iso"
|
||||
fi
|
||||
|
||||
# Copy to output
|
||||
mkdir -p /output
|
||||
cp "$ISO_FILE" /output/$OUTPUT_NAME
|
||||
|
||||
echo ""
|
||||
echo "=== Final ISO EFI check ==="
|
||||
xorriso -indev /output/$OUTPUT_NAME -find / -maxdepth 2 -type d 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo "=== Build Complete ==="
|
||||
echo "ISO: /output/$OUTPUT_NAME"
|
||||
ls -lh /output/$OUTPUT_NAME
|
||||
|
||||
elif [ "$TARGET_ARCH" = "rpi" ]; then
|
||||
# Raspberry Pi image creation
|
||||
echo "Creating Raspberry Pi bootable image..."
|
||||
|
||||
# Find the live-build output
|
||||
HDD_FILE=$(find /build -name "*.img" -type f 2>/dev/null | head -1)
|
||||
SQUASHFS_FILE=$(find /build -name "filesystem.squashfs" -type f 2>/dev/null | head -1)
|
||||
|
||||
if [ -z "$SQUASHFS_FILE" ]; then
|
||||
echo "Looking for squashfs in chroot..."
|
||||
SQUASHFS_FILE=$(find /build/chroot -name "filesystem.squashfs" -type f 2>/dev/null | head -1)
|
||||
fi
|
||||
|
||||
echo "Found squashfs: $SQUASHFS_FILE"
|
||||
|
||||
# Create RPi image using the helper script
|
||||
/build/scripts/create-rpi-image.sh "$SQUASHFS_FILE" /output/ecoos-rpi.img
|
||||
|
||||
echo ""
|
||||
echo "=== Build Complete ==="
|
||||
echo "Image: /output/ecoos-rpi.img"
|
||||
ls -lh /output/ecoos-rpi.img
|
||||
fi
|
||||
|
||||
rm -rf /tmp/iso_extract
|
||||
|
||||
# Copy to output
|
||||
mkdir -p /output
|
||||
cp "$ISO_FILE" /output/ecoos.iso
|
||||
|
||||
# Final verification
|
||||
echo ""
|
||||
echo "=== Final ISO EFI check ==="
|
||||
xorriso -indev /output/ecoos.iso -find / -maxdepth 2 -type d 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo "=== Build Complete ==="
|
||||
echo "ISO: /output/ecoos.iso"
|
||||
ls -lh /output/ecoos.iso
|
||||
EOF
|
||||
|
||||
RUN chmod +x /build/docker-build.sh
|
||||
|
||||
@@ -26,4 +26,7 @@ systemctl enable ssh.service || true
|
||||
echo "Enabling debug service..."
|
||||
systemctl enable debug-network.service || true
|
||||
|
||||
echo "Enabling eco-vdagent service (Wayland display agent for VMs)..."
|
||||
systemctl enable eco-vdagent.service || true
|
||||
|
||||
echo "Services enabled."
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=EcoOS Wayland Display Agent
|
||||
Documentation=https://ecobridge.xyz
|
||||
After=seatd.service
|
||||
Wants=seatd.service
|
||||
ConditionVirtualization=vm
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/eco/bin/eco-vdagent
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Environment=XDG_RUNTIME_DIR=/run/user/1000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1 @@
|
||||
/etc/systemd/system/eco-vdagent.service
|
||||
Binary file not shown.
455
isobuild/config/includes.chroot/opt/eco/bin/eco-vdagent
Executable file
455
isobuild/config/includes.chroot/opt/eco/bin/eco-vdagent
Executable file
@@ -0,0 +1,455 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
EcoOS Wayland Display Agent (eco-vdagent)
|
||||
|
||||
A Wayland-native replacement for spice-vdagent that uses swaymsg/wlr-output-management
|
||||
instead of xrandr to configure displays.
|
||||
|
||||
Listens on the SPICE virtio-serial port for VD_AGENT_MONITORS_CONFIG messages
|
||||
and applies the configuration to Sway outputs.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import struct
|
||||
import subprocess
|
||||
import json
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - eco-vdagent - %(levelname)s - %(message)s'
|
||||
)
|
||||
log = logging.getLogger('eco-vdagent')
|
||||
|
||||
# SPICE VDAgent Protocol Constants
|
||||
VD_AGENT_PROTOCOL = 1
|
||||
|
||||
# Message types
|
||||
VD_AGENT_MOUSE_STATE = 1
|
||||
VD_AGENT_MONITORS_CONFIG = 2
|
||||
VD_AGENT_REPLY = 3
|
||||
VD_AGENT_CLIPBOARD = 4
|
||||
VD_AGENT_DISPLAY_CONFIG = 5
|
||||
VD_AGENT_ANNOUNCE_CAPABILITIES = 6
|
||||
VD_AGENT_CLIPBOARD_GRAB = 7
|
||||
VD_AGENT_CLIPBOARD_REQUEST = 8
|
||||
VD_AGENT_CLIPBOARD_RELEASE = 9
|
||||
VD_AGENT_FILE_XFER_START = 10
|
||||
VD_AGENT_FILE_XFER_STATUS = 11
|
||||
VD_AGENT_FILE_XFER_DATA = 12
|
||||
VD_AGENT_CLIENT_DISCONNECTED = 13
|
||||
VD_AGENT_MAX_CLIPBOARD = 14
|
||||
VD_AGENT_AUDIO_VOLUME_SYNC = 15
|
||||
VD_AGENT_GRAPHICS_DEVICE_INFO = 16
|
||||
|
||||
# Reply error codes
|
||||
VD_AGENT_SUCCESS = 1
|
||||
VD_AGENT_ERROR = 2
|
||||
|
||||
# Capability bits
|
||||
VD_AGENT_CAP_MOUSE_STATE = 0
|
||||
VD_AGENT_CAP_MONITORS_CONFIG = 1
|
||||
VD_AGENT_CAP_REPLY = 2
|
||||
VD_AGENT_CAP_CLIPBOARD = 3
|
||||
VD_AGENT_CAP_DISPLAY_CONFIG = 4
|
||||
VD_AGENT_CAP_CLIPBOARD_BY_DEMAND = 5
|
||||
VD_AGENT_CAP_CLIPBOARD_SELECTION = 6
|
||||
VD_AGENT_CAP_SPARSE_MONITORS_CONFIG = 7
|
||||
VD_AGENT_CAP_GUEST_LINEEND_LF = 8
|
||||
VD_AGENT_CAP_GUEST_LINEEND_CRLF = 9
|
||||
VD_AGENT_CAP_MAX_CLIPBOARD = 10
|
||||
VD_AGENT_CAP_AUDIO_VOLUME_SYNC = 11
|
||||
VD_AGENT_CAP_MONITORS_CONFIG_POSITION = 12
|
||||
VD_AGENT_CAP_FILE_XFER_DISABLED = 13
|
||||
VD_AGENT_CAP_FILE_XFER_DETAILED_ERRORS = 14
|
||||
VD_AGENT_CAP_GRAPHICS_DEVICE_INFO = 15
|
||||
VD_AGENT_CAP_CLIPBOARD_NO_RELEASE_ON_REGRAB = 16
|
||||
VD_AGENT_CAP_CLIPBOARD_GRAB_SERIAL = 17
|
||||
|
||||
# Virtio serial port path
|
||||
VIRTIO_PORT = '/dev/virtio-ports/com.redhat.spice.0'
|
||||
|
||||
# VDI Chunk header: port(4) + size(4) = 8 bytes
|
||||
VDI_CHUNK_HEADER_SIZE = 8
|
||||
VDI_CHUNK_HEADER_FMT = '<II' # port, size
|
||||
|
||||
# VDI Port constants
|
||||
VDP_CLIENT_PORT = 1
|
||||
VDP_SERVER_PORT = 2
|
||||
|
||||
# VDAgentMessage header: protocol(4) + type(4) + opaque(8) + size(4) = 20 bytes
|
||||
VDAGENT_MSG_HEADER_SIZE = 20
|
||||
VDAGENT_MSG_HEADER_FMT = '<IIQI' # little-endian: uint32, uint32, uint64, uint32
|
||||
|
||||
# VDAgentMonitorsConfig header: num_of_monitors(4) + flags(4) = 8 bytes
|
||||
MONITORS_CONFIG_HEADER_SIZE = 8
|
||||
MONITORS_CONFIG_HEADER_FMT = '<II'
|
||||
|
||||
# VDAgentMonConfig: height(4) + width(4) + depth(4) + x(4) + y(4) = 20 bytes
|
||||
MON_CONFIG_SIZE = 20
|
||||
MON_CONFIG_FMT = '<IIIii' # height, width, depth, x, y (x,y are signed)
|
||||
|
||||
|
||||
class EcoVDAgent:
|
||||
def __init__(self):
|
||||
self.port_fd = None
|
||||
self.running = True
|
||||
self.sway_socket = None
|
||||
|
||||
def find_sway_socket(self):
|
||||
"""Find the Sway IPC socket"""
|
||||
# Check environment first
|
||||
if 'SWAYSOCK' in os.environ:
|
||||
return os.environ['SWAYSOCK']
|
||||
|
||||
# Search common locations
|
||||
runtime_dir = os.environ.get('XDG_RUNTIME_DIR', '/run/user/1000')
|
||||
|
||||
# Try to find sway socket - check fixed path first, then glob patterns
|
||||
import glob
|
||||
|
||||
# Check for fixed socket path first (set by eco-daemon)
|
||||
fixed_socket = f'{runtime_dir}/sway-ipc.sock'
|
||||
if os.path.exists(fixed_socket):
|
||||
return fixed_socket
|
||||
|
||||
# Fall back to glob patterns for standard Sway socket naming
|
||||
for pattern in [f'{runtime_dir}/sway-ipc.*.sock', '/run/user/*/sway-ipc.*.sock']:
|
||||
sockets = glob.glob(pattern)
|
||||
if sockets:
|
||||
return sockets[0]
|
||||
|
||||
return None
|
||||
|
||||
def run_swaymsg(self, *args):
|
||||
"""Run swaymsg command"""
|
||||
cmd = ['swaymsg']
|
||||
if self.sway_socket:
|
||||
cmd.extend(['-s', self.sway_socket])
|
||||
cmd.extend(args)
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
|
||||
if result.returncode != 0:
|
||||
log.warning(f"swaymsg failed: {result.stderr}")
|
||||
return result.returncode == 0, result.stdout
|
||||
except Exception as e:
|
||||
log.error(f"Failed to run swaymsg: {e}")
|
||||
return False, ""
|
||||
|
||||
def get_outputs(self):
|
||||
"""Get current Sway outputs"""
|
||||
success, output = self.run_swaymsg('-t', 'get_outputs', '-r')
|
||||
if success:
|
||||
try:
|
||||
return json.loads(output)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return []
|
||||
|
||||
def configure_output(self, name, width, height, x, y, enable=True):
|
||||
"""Configure a Sway output"""
|
||||
if enable:
|
||||
# Try to enable and position the output
|
||||
# First, try setting mode
|
||||
mode_cmd = f'output {name} mode {width}x{height} position {x} {y} enable'
|
||||
success, _ = self.run_swaymsg(mode_cmd)
|
||||
if not success:
|
||||
# Try without explicit mode (use preferred)
|
||||
pos_cmd = f'output {name} position {x} {y} enable'
|
||||
success, _ = self.run_swaymsg(pos_cmd)
|
||||
return success
|
||||
else:
|
||||
return self.run_swaymsg(f'output {name} disable')[0]
|
||||
|
||||
def apply_monitors_config(self, monitors):
|
||||
"""Apply monitor configuration to Sway outputs"""
|
||||
log.info(f"Applying configuration for {len(monitors)} monitors")
|
||||
|
||||
# Get current outputs
|
||||
outputs = self.get_outputs()
|
||||
output_names = [o.get('name') for o in outputs]
|
||||
log.info(f"Available outputs: {output_names}")
|
||||
|
||||
# Sort monitors by x position to match with outputs
|
||||
monitors_sorted = sorted(enumerate(monitors), key=lambda m: m[1]['x'])
|
||||
|
||||
# Match monitors to outputs
|
||||
for i, (mon_idx, mon) in enumerate(monitors_sorted):
|
||||
if i < len(output_names):
|
||||
name = output_names[i]
|
||||
log.info(f"Configuring {name}: {mon['width']}x{mon['height']} at ({mon['x']}, {mon['y']})")
|
||||
self.configure_output(
|
||||
name,
|
||||
mon['width'],
|
||||
mon['height'],
|
||||
mon['x'],
|
||||
mon['y'],
|
||||
enable=True
|
||||
)
|
||||
else:
|
||||
log.warning(f"No output available for monitor {mon_idx}")
|
||||
|
||||
# Disable extra outputs
|
||||
for i in range(len(monitors), len(output_names)):
|
||||
name = output_names[i]
|
||||
log.info(f"Disabling unused output: {name}")
|
||||
self.configure_output(name, 0, 0, 0, 0, enable=False)
|
||||
|
||||
def parse_monitors_config(self, data):
|
||||
"""Parse VD_AGENT_MONITORS_CONFIG message"""
|
||||
if len(data) < MONITORS_CONFIG_HEADER_SIZE:
|
||||
log.error("Monitors config data too short")
|
||||
return None
|
||||
|
||||
num_monitors, flags = struct.unpack(MONITORS_CONFIG_HEADER_FMT, data[:MONITORS_CONFIG_HEADER_SIZE])
|
||||
log.info(f"Monitors config: {num_monitors} monitors, flags={flags}")
|
||||
|
||||
monitors = []
|
||||
offset = MONITORS_CONFIG_HEADER_SIZE
|
||||
|
||||
for i in range(num_monitors):
|
||||
if offset + MON_CONFIG_SIZE > len(data):
|
||||
log.error(f"Truncated monitor config at index {i}")
|
||||
break
|
||||
|
||||
height, width, depth, x, y = struct.unpack(
|
||||
MON_CONFIG_FMT,
|
||||
data[offset:offset + MON_CONFIG_SIZE]
|
||||
)
|
||||
|
||||
monitors.append({
|
||||
'width': width,
|
||||
'height': height,
|
||||
'depth': depth,
|
||||
'x': x,
|
||||
'y': y
|
||||
})
|
||||
log.info(f" Monitor {i}: {width}x{height}+{x}+{y} depth={depth}")
|
||||
offset += MON_CONFIG_SIZE
|
||||
|
||||
return monitors
|
||||
|
||||
def send_reply(self, msg_type, error_code):
|
||||
"""Send VD_AGENT_REPLY message"""
|
||||
# Reply data: type(4) + error(4) = 8 bytes
|
||||
reply_data = struct.pack('<II', msg_type, error_code)
|
||||
|
||||
if self.send_message(VD_AGENT_REPLY, reply_data):
|
||||
log.debug(f"Sent reply for type {msg_type}: {'success' if error_code == VD_AGENT_SUCCESS else 'error'}")
|
||||
else:
|
||||
log.error(f"Failed to send reply for type {msg_type}")
|
||||
|
||||
def send_message(self, msg_type, data):
|
||||
"""Send a VDAgent message with proper chunk header"""
|
||||
if not self.port_fd:
|
||||
return False
|
||||
|
||||
# Build VDAgentMessage header
|
||||
msg_header = struct.pack(
|
||||
VDAGENT_MSG_HEADER_FMT,
|
||||
VD_AGENT_PROTOCOL,
|
||||
msg_type,
|
||||
0, # opaque
|
||||
len(data)
|
||||
)
|
||||
|
||||
# Full message = header + data
|
||||
full_msg = msg_header + data
|
||||
|
||||
# Build VDI chunk header (port=SERVER, size=message size)
|
||||
chunk_header = struct.pack(
|
||||
VDI_CHUNK_HEADER_FMT,
|
||||
VDP_SERVER_PORT,
|
||||
len(full_msg)
|
||||
)
|
||||
|
||||
# Retry writes with EAGAIN handling (non-blocking fd)
|
||||
message = chunk_header + full_msg
|
||||
retries = 10
|
||||
while retries > 0:
|
||||
try:
|
||||
os.write(self.port_fd, message)
|
||||
return True
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN - resource temporarily unavailable
|
||||
retries -= 1
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
log.error(f"Failed to send message type {msg_type}: {e}")
|
||||
return False
|
||||
log.error(f"Failed to send message type {msg_type}: EAGAIN after retries")
|
||||
return False
|
||||
|
||||
def announce_capabilities(self):
|
||||
"""Send VD_AGENT_ANNOUNCE_CAPABILITIES to register with SPICE server"""
|
||||
# Build capability bits - we support monitors config
|
||||
caps = 0
|
||||
caps |= (1 << VD_AGENT_CAP_MONITORS_CONFIG)
|
||||
caps |= (1 << VD_AGENT_CAP_REPLY)
|
||||
caps |= (1 << VD_AGENT_CAP_SPARSE_MONITORS_CONFIG)
|
||||
caps |= (1 << VD_AGENT_CAP_MONITORS_CONFIG_POSITION)
|
||||
|
||||
# VDAgentAnnounceCapabilities: request(4) + caps(4) = 8 bytes
|
||||
# request=1 means we want the server to send us its capabilities
|
||||
announce_data = struct.pack('<II', 1, caps)
|
||||
|
||||
if self.send_message(VD_AGENT_ANNOUNCE_CAPABILITIES, announce_data):
|
||||
log.info("Announced capabilities to SPICE server")
|
||||
else:
|
||||
log.error("Failed to announce capabilities")
|
||||
|
||||
def handle_message(self, msg_type, data):
|
||||
"""Handle a VDAgent message"""
|
||||
if msg_type == VD_AGENT_MONITORS_CONFIG:
|
||||
log.info("Received VD_AGENT_MONITORS_CONFIG")
|
||||
monitors = self.parse_monitors_config(data)
|
||||
if monitors:
|
||||
self.apply_monitors_config(monitors)
|
||||
self.send_reply(VD_AGENT_MONITORS_CONFIG, VD_AGENT_SUCCESS)
|
||||
else:
|
||||
self.send_reply(VD_AGENT_MONITORS_CONFIG, VD_AGENT_ERROR)
|
||||
|
||||
elif msg_type == VD_AGENT_ANNOUNCE_CAPABILITIES:
|
||||
log.info("Received VD_AGENT_ANNOUNCE_CAPABILITIES")
|
||||
# We could respond with our capabilities here
|
||||
# For now, just acknowledge
|
||||
|
||||
elif msg_type == VD_AGENT_DISPLAY_CONFIG:
|
||||
log.info("Received VD_AGENT_DISPLAY_CONFIG")
|
||||
# Display config for disabling client display changes
|
||||
|
||||
elif msg_type == VD_AGENT_CLIENT_DISCONNECTED:
|
||||
log.info("Client disconnected")
|
||||
|
||||
else:
|
||||
log.debug(f"Unhandled message type: {msg_type}")
|
||||
|
||||
def read_message(self):
|
||||
"""Read a single VDAgent message from the port (with chunk header)"""
|
||||
# Read VDI chunk header first
|
||||
try:
|
||||
chunk_header_data = os.read(self.port_fd, VDI_CHUNK_HEADER_SIZE)
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
return None
|
||||
raise
|
||||
|
||||
if len(chunk_header_data) < VDI_CHUNK_HEADER_SIZE:
|
||||
if len(chunk_header_data) == 0:
|
||||
return None
|
||||
log.warning(f"Short chunk header read: {len(chunk_header_data)} bytes")
|
||||
return None
|
||||
|
||||
port, chunk_size = struct.unpack(VDI_CHUNK_HEADER_FMT, chunk_header_data)
|
||||
log.debug(f"Chunk header: port={port}, size={chunk_size}")
|
||||
|
||||
if chunk_size < VDAGENT_MSG_HEADER_SIZE:
|
||||
log.warning(f"Chunk size too small: {chunk_size}")
|
||||
return None
|
||||
|
||||
# Read VDAgent message header
|
||||
try:
|
||||
header_data = os.read(self.port_fd, VDAGENT_MSG_HEADER_SIZE)
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
return None
|
||||
raise
|
||||
|
||||
if len(header_data) < VDAGENT_MSG_HEADER_SIZE:
|
||||
log.warning(f"Short message header read: {len(header_data)} bytes")
|
||||
return None
|
||||
|
||||
protocol, msg_type, opaque, size = struct.unpack(VDAGENT_MSG_HEADER_FMT, header_data)
|
||||
|
||||
if protocol != VD_AGENT_PROTOCOL:
|
||||
log.warning(f"Unknown protocol: {protocol}")
|
||||
return None
|
||||
|
||||
# Read message data
|
||||
data = b''
|
||||
while len(data) < size:
|
||||
try:
|
||||
chunk = os.read(self.port_fd, size - len(data))
|
||||
if not chunk:
|
||||
break
|
||||
data += chunk
|
||||
except OSError as e:
|
||||
if e.errno == 11: # EAGAIN
|
||||
time.sleep(0.01)
|
||||
continue
|
||||
raise
|
||||
|
||||
return msg_type, data
|
||||
|
||||
def signal_handler(self, signum, frame):
|
||||
"""Handle shutdown signals"""
|
||||
log.info(f"Received signal {signum}, shutting down...")
|
||||
self.running = False
|
||||
|
||||
def run(self):
|
||||
"""Main loop"""
|
||||
# Set up signal handlers
|
||||
signal.signal(signal.SIGTERM, self.signal_handler)
|
||||
signal.signal(signal.SIGINT, self.signal_handler)
|
||||
|
||||
# Find Sway socket
|
||||
self.sway_socket = self.find_sway_socket()
|
||||
if self.sway_socket:
|
||||
log.info(f"Using Sway socket: {self.sway_socket}")
|
||||
else:
|
||||
log.warning("No Sway socket found, will retry...")
|
||||
|
||||
# Wait for virtio port
|
||||
log.info(f"Waiting for virtio port: {VIRTIO_PORT}")
|
||||
while self.running and not Path(VIRTIO_PORT).exists():
|
||||
time.sleep(1)
|
||||
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
log.info("Opening virtio port...")
|
||||
try:
|
||||
self.port_fd = os.open(VIRTIO_PORT, os.O_RDWR | os.O_NONBLOCK)
|
||||
except OSError as e:
|
||||
log.error(f"Failed to open virtio port: {e}")
|
||||
return
|
||||
|
||||
log.info("eco-vdagent started, announcing capabilities...")
|
||||
|
||||
# Announce our capabilities to the SPICE server
|
||||
self.announce_capabilities()
|
||||
|
||||
log.info("Listening for SPICE agent messages...")
|
||||
|
||||
# Main loop
|
||||
while self.running:
|
||||
try:
|
||||
# Try to find Sway socket if not found yet
|
||||
if not self.sway_socket:
|
||||
self.sway_socket = self.find_sway_socket()
|
||||
|
||||
result = self.read_message()
|
||||
if result:
|
||||
msg_type, data = result
|
||||
self.handle_message(msg_type, data)
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
except Exception as e:
|
||||
log.error(f"Error in main loop: {e}")
|
||||
time.sleep(1)
|
||||
|
||||
if self.port_fd:
|
||||
os.close(self.port_fd)
|
||||
|
||||
log.info("eco-vdagent stopped")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
agent = EcoVDAgent()
|
||||
agent.run()
|
||||
@@ -1,7 +1,8 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# EcoOS Installer
|
||||
# Installs EcoOS from live USB to disk
|
||||
# Installs EcoOS from live USB/SD to disk
|
||||
# Supports: x86_64 (UEFI), ARM64 (UEFI), Raspberry Pi (native boot)
|
||||
#
|
||||
|
||||
set -e
|
||||
@@ -12,6 +13,45 @@ HOSTNAME="ecoos"
|
||||
USERNAME="ecouser"
|
||||
SQUASHFS_PATH="/run/live/medium/live/filesystem.squashfs"
|
||||
|
||||
# Detect architecture
|
||||
detect_architecture() {
|
||||
local arch=$(uname -m)
|
||||
local is_rpi="no"
|
||||
|
||||
# Check if running on Raspberry Pi
|
||||
if [ -f /sys/firmware/devicetree/base/model ]; then
|
||||
local model=$(cat /sys/firmware/devicetree/base/model 2>/dev/null || echo "")
|
||||
if [[ "$model" == *"Raspberry Pi"* ]]; then
|
||||
is_rpi="yes"
|
||||
fi
|
||||
fi
|
||||
|
||||
case "$arch" in
|
||||
x86_64)
|
||||
ARCH_TYPE="amd64"
|
||||
BOOT_TYPE="uefi"
|
||||
;;
|
||||
aarch64)
|
||||
if [ "$is_rpi" = "yes" ]; then
|
||||
ARCH_TYPE="rpi"
|
||||
BOOT_TYPE="rpi"
|
||||
else
|
||||
ARCH_TYPE="arm64"
|
||||
BOOT_TYPE="uefi"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
ARCH_TYPE="unknown"
|
||||
BOOT_TYPE="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
export ARCH_TYPE BOOT_TYPE
|
||||
}
|
||||
|
||||
# Call architecture detection early
|
||||
detect_architecture
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
@@ -191,43 +231,66 @@ select_disk() {
|
||||
partition_disk() {
|
||||
local disk=$1
|
||||
|
||||
log "Partitioning $disk..."
|
||||
log "Partitioning $disk for $ARCH_TYPE ($BOOT_TYPE boot)..."
|
||||
|
||||
# Wipe existing partition table
|
||||
wipefs -a "$disk" >/dev/null 2>&1 || true
|
||||
|
||||
# Create GPT partition table
|
||||
parted -s "$disk" mklabel gpt
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
# Raspberry Pi uses MBR partition table
|
||||
log "Creating MBR partition table for Raspberry Pi..."
|
||||
parted -s "$disk" mklabel msdos
|
||||
|
||||
# Create EFI partition (512MB)
|
||||
parted -s "$disk" mkpart ESP fat32 1MiB 513MiB
|
||||
parted -s "$disk" set 1 esp on
|
||||
# Create boot partition (FAT32, 256MB)
|
||||
parted -s "$disk" mkpart primary fat32 1MiB 257MiB
|
||||
parted -s "$disk" set 1 boot on
|
||||
|
||||
# Create root partition (rest of disk)
|
||||
parted -s "$disk" mkpart root ext4 513MiB 100%
|
||||
# Create root partition (rest of disk)
|
||||
parted -s "$disk" mkpart primary ext4 257MiB 100%
|
||||
else
|
||||
# x86_64 and generic ARM64 use GPT with EFI
|
||||
log "Creating GPT partition table with EFI..."
|
||||
parted -s "$disk" mklabel gpt
|
||||
|
||||
# Create EFI partition (512MB)
|
||||
parted -s "$disk" mkpart ESP fat32 1MiB 513MiB
|
||||
parted -s "$disk" set 1 esp on
|
||||
|
||||
# Create root partition (rest of disk)
|
||||
parted -s "$disk" mkpart root ext4 513MiB 100%
|
||||
fi
|
||||
|
||||
# Wait for partitions to appear
|
||||
sleep 2
|
||||
partprobe "$disk"
|
||||
sleep 1
|
||||
|
||||
# Determine partition names (nvme vs sd)
|
||||
if [[ "$disk" == *"nvme"* ]]; then
|
||||
EFI_PART="${disk}p1"
|
||||
# Determine partition names (nvme vs sd vs mmcblk)
|
||||
if [[ "$disk" == *"nvme"* ]] || [[ "$disk" == *"mmcblk"* ]]; then
|
||||
BOOT_PART="${disk}p1"
|
||||
ROOT_PART="${disk}p2"
|
||||
else
|
||||
EFI_PART="${disk}1"
|
||||
BOOT_PART="${disk}1"
|
||||
ROOT_PART="${disk}2"
|
||||
fi
|
||||
|
||||
log "Created partitions: EFI=$EFI_PART, Root=$ROOT_PART"
|
||||
# For UEFI systems, BOOT_PART is the EFI partition
|
||||
if [ "$BOOT_TYPE" = "uefi" ]; then
|
||||
EFI_PART="$BOOT_PART"
|
||||
fi
|
||||
|
||||
log "Created partitions: Boot=$BOOT_PART, Root=$ROOT_PART"
|
||||
}
|
||||
|
||||
# Format partitions
|
||||
format_partitions() {
|
||||
log "Formatting partitions..."
|
||||
|
||||
mkfs.fat -F 32 -n "EFI" "$EFI_PART"
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
mkfs.fat -F 32 -n "boot" "$BOOT_PART"
|
||||
else
|
||||
mkfs.fat -F 32 -n "EFI" "$EFI_PART"
|
||||
fi
|
||||
mkfs.ext4 -F -L "EcoOS" "$ROOT_PART"
|
||||
|
||||
log "Partitions formatted"
|
||||
@@ -240,8 +303,15 @@ mount_partitions() {
|
||||
mkdir -p /mnt/target
|
||||
mount "$ROOT_PART" /mnt/target
|
||||
|
||||
mkdir -p /mnt/target/boot/efi
|
||||
mount "$EFI_PART" /mnt/target/boot/efi
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
# Raspberry Pi mounts boot at /boot
|
||||
mkdir -p /mnt/target/boot
|
||||
mount "$BOOT_PART" /mnt/target/boot
|
||||
else
|
||||
# UEFI systems mount EFI at /boot/efi
|
||||
mkdir -p /mnt/target/boot/efi
|
||||
mount "$EFI_PART" /mnt/target/boot/efi
|
||||
fi
|
||||
|
||||
log "Partitions mounted at /mnt/target"
|
||||
}
|
||||
@@ -292,14 +362,22 @@ configure_system() {
|
||||
|
||||
# Get UUIDs
|
||||
local root_uuid=$(blkid -s UUID -o value "$ROOT_PART")
|
||||
local efi_uuid=$(blkid -s UUID -o value "$EFI_PART")
|
||||
local boot_uuid=$(blkid -s UUID -o value "$BOOT_PART")
|
||||
|
||||
# Create fstab
|
||||
cat > /mnt/target/etc/fstab << EOF
|
||||
# Create fstab based on boot type
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
cat > /mnt/target/etc/fstab << EOF
|
||||
# EcoOS fstab - Raspberry Pi
|
||||
UUID=$root_uuid / ext4 defaults,noatime 0 1
|
||||
UUID=$boot_uuid /boot vfat defaults 0 2
|
||||
EOF
|
||||
else
|
||||
cat > /mnt/target/etc/fstab << EOF
|
||||
# EcoOS fstab
|
||||
UUID=$root_uuid / ext4 defaults,noatime 0 1
|
||||
UUID=$efi_uuid /boot/efi vfat umask=0077 0 1
|
||||
UUID=$boot_uuid /boot/efi vfat umask=0077 0 1
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Set hostname
|
||||
echo "$HOSTNAME" > /mnt/target/etc/hostname
|
||||
@@ -406,9 +484,89 @@ NETEOF
|
||||
log "System configured"
|
||||
}
|
||||
|
||||
# Configure Raspberry Pi boot files
|
||||
configure_rpi_boot() {
|
||||
log "Configuring Raspberry Pi boot files..."
|
||||
|
||||
local root_uuid=$(blkid -s UUID -o value "$ROOT_PART")
|
||||
|
||||
# Copy kernel and initrd to boot partition
|
||||
local kernel=$(ls /mnt/target/boot/vmlinuz-* 2>/dev/null | sort -V | tail -1)
|
||||
local initrd=$(ls /mnt/target/boot/initrd.img-* 2>/dev/null | sort -V | tail -1)
|
||||
|
||||
if [ -n "$kernel" ]; then
|
||||
cp "$kernel" /mnt/target/boot/vmlinuz
|
||||
log "Copied kernel: $(basename $kernel)"
|
||||
fi
|
||||
|
||||
if [ -n "$initrd" ]; then
|
||||
cp "$initrd" /mnt/target/boot/initrd.img
|
||||
log "Copied initrd: $(basename $initrd)"
|
||||
fi
|
||||
|
||||
# Copy device tree blobs
|
||||
local dtb_dir=$(ls -d /mnt/target/usr/lib/linux-image-*-raspi 2>/dev/null | tail -1)
|
||||
if [ -d "$dtb_dir/broadcom" ]; then
|
||||
cp "$dtb_dir/broadcom"/*.dtb /mnt/target/boot/ 2>/dev/null || true
|
||||
log "Copied device tree blobs"
|
||||
fi
|
||||
if [ -d "$dtb_dir/overlays" ]; then
|
||||
mkdir -p /mnt/target/boot/overlays
|
||||
cp -r "$dtb_dir/overlays"/* /mnt/target/boot/overlays/ 2>/dev/null || true
|
||||
log "Copied device tree overlays"
|
||||
fi
|
||||
|
||||
# Copy Pi firmware files
|
||||
if [ -d /mnt/target/usr/lib/raspi-firmware ]; then
|
||||
cp /mnt/target/usr/lib/raspi-firmware/*.bin /mnt/target/boot/ 2>/dev/null || true
|
||||
cp /mnt/target/usr/lib/raspi-firmware/*.elf /mnt/target/boot/ 2>/dev/null || true
|
||||
cp /mnt/target/usr/lib/raspi-firmware/*.dat /mnt/target/boot/ 2>/dev/null || true
|
||||
log "Copied Raspberry Pi firmware"
|
||||
fi
|
||||
|
||||
# Create config.txt
|
||||
cat > /mnt/target/boot/config.txt << 'EOF'
|
||||
# EcoOS Raspberry Pi Configuration
|
||||
# Supports Pi 3, 4, and 5
|
||||
|
||||
# Enable 64-bit mode
|
||||
arm_64bit=1
|
||||
|
||||
# Kernel and initrd
|
||||
kernel=vmlinuz
|
||||
initramfs initrd.img followkernel
|
||||
|
||||
# Enable serial console for debugging
|
||||
enable_uart=1
|
||||
|
||||
# GPU/display settings
|
||||
dtoverlay=vc4-kms-v3d
|
||||
gpu_mem=256
|
||||
|
||||
# USB and power settings (Pi 4/5)
|
||||
max_usb_current=1
|
||||
|
||||
# Audio
|
||||
dtparam=audio=on
|
||||
|
||||
# Camera/display interfaces
|
||||
camera_auto_detect=1
|
||||
display_auto_detect=1
|
||||
|
||||
# Pi 5 specific (ignored on older models)
|
||||
[pi5]
|
||||
dtoverlay=dwc2,dr_mode=host
|
||||
EOF
|
||||
|
||||
# Create cmdline.txt with root UUID
|
||||
echo "console=serial0,115200 console=tty1 root=UUID=$root_uuid rootfstype=ext4 fsck.repair=yes rootwait quiet splash" > /mnt/target/boot/cmdline.txt
|
||||
|
||||
log "Raspberry Pi boot configured"
|
||||
}
|
||||
|
||||
# Install bootloader
|
||||
install_bootloader() {
|
||||
log "Installing GRUB bootloader..."
|
||||
log "Installing bootloader for $ARCH_TYPE ($BOOT_TYPE)..."
|
||||
|
||||
# Mount necessary filesystems for chroot
|
||||
mount --bind /dev /mnt/target/dev
|
||||
@@ -417,22 +575,7 @@ install_bootloader() {
|
||||
mount --bind /sys /mnt/target/sys
|
||||
mount --bind /run /mnt/target/run
|
||||
|
||||
# Fix GRUB default config - remove casper/live boot parameters and add serial console
|
||||
if [ -f /mnt/target/etc/default/grub ]; then
|
||||
# Remove any boot=casper or live-related parameters
|
||||
sed -i 's/boot=casper//g' /mnt/target/etc/default/grub
|
||||
# Update GRUB_CMDLINE_LINUX_DEFAULT with serial console for debugging
|
||||
sed -i 's/^GRUB_CMDLINE_LINUX_DEFAULT=.*/GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0,115200n8"/' /mnt/target/etc/default/grub
|
||||
# If line doesn't exist, add it
|
||||
if ! grep -q "GRUB_CMDLINE_LINUX_DEFAULT" /mnt/target/etc/default/grub; then
|
||||
echo 'GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0,115200n8"' >> /mnt/target/etc/default/grub
|
||||
fi
|
||||
# Enable serial terminal in GRUB
|
||||
echo 'GRUB_TERMINAL="console serial"' >> /mnt/target/etc/default/grub
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"' >> /mnt/target/etc/default/grub
|
||||
fi
|
||||
|
||||
# Disable casper-related services
|
||||
# Disable casper-related services (common to all architectures)
|
||||
log "Disabling live boot services..."
|
||||
chroot /mnt/target systemctl disable casper.service 2>/dev/null || true
|
||||
chroot /mnt/target systemctl disable casper-md5check.service 2>/dev/null || true
|
||||
@@ -454,11 +597,38 @@ install_bootloader() {
|
||||
# Ensure proper boot target
|
||||
chroot /mnt/target systemctl set-default multi-user.target 2>/dev/null || true
|
||||
|
||||
# Install GRUB
|
||||
chroot /mnt/target grub-install --target=x86_64-efi --efi-directory=/boot/efi --bootloader-id=EcoOS --recheck
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
# Raspberry Pi uses native bootloader (no GRUB)
|
||||
configure_rpi_boot
|
||||
else
|
||||
# UEFI systems use GRUB
|
||||
log "Installing GRUB bootloader..."
|
||||
|
||||
# Generate GRUB config
|
||||
chroot /mnt/target update-grub
|
||||
# Fix GRUB default config - remove casper/live boot parameters and add serial console
|
||||
if [ -f /mnt/target/etc/default/grub ]; then
|
||||
# Remove any boot=casper or live-related parameters
|
||||
sed -i 's/boot=casper//g' /mnt/target/etc/default/grub
|
||||
# Update GRUB_CMDLINE_LINUX_DEFAULT with serial console for debugging
|
||||
sed -i 's/^GRUB_CMDLINE_LINUX_DEFAULT=.*/GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0,115200n8"/' /mnt/target/etc/default/grub
|
||||
# If line doesn't exist, add it
|
||||
if ! grep -q "GRUB_CMDLINE_LINUX_DEFAULT" /mnt/target/etc/default/grub; then
|
||||
echo 'GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0,115200n8"' >> /mnt/target/etc/default/grub
|
||||
fi
|
||||
# Enable serial terminal in GRUB
|
||||
echo 'GRUB_TERMINAL="console serial"' >> /mnt/target/etc/default/grub
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"' >> /mnt/target/etc/default/grub
|
||||
fi
|
||||
|
||||
# Install GRUB based on architecture
|
||||
if [ "$ARCH_TYPE" = "amd64" ]; then
|
||||
chroot /mnt/target grub-install --target=x86_64-efi --efi-directory=/boot/efi --bootloader-id=EcoOS --recheck
|
||||
elif [ "$ARCH_TYPE" = "arm64" ]; then
|
||||
chroot /mnt/target grub-install --target=arm64-efi --efi-directory=/boot/efi --bootloader-id=EcoOS --recheck
|
||||
fi
|
||||
|
||||
# Generate GRUB config
|
||||
chroot /mnt/target update-grub
|
||||
fi
|
||||
|
||||
# Cleanup mounts (use lazy unmount for stubborn mounts, reverse order)
|
||||
sync
|
||||
@@ -478,8 +648,12 @@ cleanup_and_reboot() {
|
||||
# Sync disks
|
||||
sync
|
||||
|
||||
# Unmount
|
||||
umount /mnt/target/boot/efi
|
||||
# Unmount based on boot type
|
||||
if [ "$BOOT_TYPE" = "rpi" ]; then
|
||||
umount /mnt/target/boot
|
||||
else
|
||||
umount /mnt/target/boot/efi
|
||||
fi
|
||||
umount /mnt/target
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
# AMD64-specific packages
|
||||
# These are appended to base.list.chroot for amd64 builds
|
||||
|
||||
# EFI bootloader (required for UEFI boot on x86_64)
|
||||
grub-efi-amd64
|
||||
grub-efi-amd64-signed
|
||||
shim-signed
|
||||
|
||||
# x86_64 kernel (same as generic, included for clarity)
|
||||
# linux-image-generic is already in base.list.chroot
|
||||
@@ -0,0 +1,9 @@
|
||||
# ARM64-specific packages
|
||||
# These are appended to base.list.chroot for arm64 builds
|
||||
|
||||
# EFI bootloader (required for UEFI boot on ARM64)
|
||||
grub-efi-arm64
|
||||
grub-efi-arm64-signed
|
||||
|
||||
# ARM64 kernel (generic works for most ARM64 UEFI systems)
|
||||
# linux-image-generic is already in base.list.chroot
|
||||
@@ -0,0 +1,18 @@
|
||||
# Raspberry Pi specific packages
|
||||
# These are appended to base.list.chroot for rpi builds
|
||||
|
||||
# Raspberry Pi kernel (optimized for Pi hardware)
|
||||
linux-image-raspi
|
||||
linux-modules-extra-raspi
|
||||
|
||||
# Raspberry Pi firmware and utilities
|
||||
linux-firmware-raspi
|
||||
raspi-firmware
|
||||
libraspberrypi-bin
|
||||
libraspberrypi0
|
||||
|
||||
# Pi-specific hardware support
|
||||
pi-bluetooth
|
||||
rpi-eeprom
|
||||
|
||||
# Note: No GRUB packages - Pi uses native bootloader (config.txt + start.elf)
|
||||
@@ -1,4 +1,7 @@
|
||||
# EcoOS Base Packages
|
||||
# Common packages for all architectures
|
||||
# Architecture-specific packages are in base-{amd64,arm64,rpi}.list.chroot
|
||||
|
||||
# System essentials
|
||||
linux-image-generic
|
||||
linux-headers-generic
|
||||
@@ -8,10 +11,8 @@ network-manager
|
||||
openssh-server
|
||||
sudo
|
||||
|
||||
# EFI bootloader (required for UEFI boot)
|
||||
grub-efi-amd64
|
||||
grub-efi-amd64-signed
|
||||
shim-signed
|
||||
# Note: EFI bootloader packages are architecture-specific
|
||||
# See base-amd64.list.chroot, base-arm64.list.chroot, base-rpi.list.chroot
|
||||
|
||||
# Sway + Wayland
|
||||
sway
|
||||
@@ -36,6 +37,7 @@ vim
|
||||
nano
|
||||
tmux
|
||||
jq
|
||||
python3
|
||||
|
||||
# System utilities
|
||||
pciutils
|
||||
|
||||
@@ -37,3 +37,4 @@ bluez-tools
|
||||
# Virtualization support
|
||||
qemu-guest-agent
|
||||
open-vm-tools
|
||||
# Note: Using eco-vdagent (Wayland-native) instead of spice-vdagent (X11-only)
|
||||
|
||||
234
isobuild/scripts/create-rpi-image.sh
Executable file
234
isobuild/scripts/create-rpi-image.sh
Executable file
@@ -0,0 +1,234 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Create Raspberry Pi bootable image from live-build output
|
||||
# This script creates a proper Pi-bootable image with:
|
||||
# - Partition 1: FAT32 boot partition (256MB) with Pi firmware
|
||||
# - Partition 2: ext4 root filesystem
|
||||
#
|
||||
# Usage: ./create-rpi-image.sh <squashfs_or_chroot_path> <output_image>
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
INPUT_PATH="$1"
|
||||
OUTPUT_IMG="$2"
|
||||
IMG_SIZE="${3:-8G}"
|
||||
|
||||
if [ -z "$INPUT_PATH" ] || [ -z "$OUTPUT_IMG" ]; then
|
||||
echo "Usage: $0 <squashfs_or_chroot_path> <output_image> [size]"
|
||||
echo ""
|
||||
echo "Arguments:"
|
||||
echo " squashfs_or_chroot_path Path to filesystem.squashfs or chroot directory"
|
||||
echo " output_image Output .img file path"
|
||||
echo " size Image size (default: 8G)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=== Creating Raspberry Pi Image ==="
|
||||
echo "Input: $INPUT_PATH"
|
||||
echo "Output: $OUTPUT_IMG"
|
||||
echo "Size: $IMG_SIZE"
|
||||
|
||||
# Create empty image
|
||||
echo "Creating empty image..."
|
||||
truncate -s $IMG_SIZE "$OUTPUT_IMG"
|
||||
|
||||
# Create partition table (MBR for Pi compatibility)
|
||||
echo "Creating partition table..."
|
||||
parted -s "$OUTPUT_IMG" mklabel msdos
|
||||
parted -s "$OUTPUT_IMG" mkpart primary fat32 1MiB 257MiB
|
||||
parted -s "$OUTPUT_IMG" mkpart primary ext4 257MiB 100%
|
||||
parted -s "$OUTPUT_IMG" set 1 boot on
|
||||
|
||||
# Setup loop device
|
||||
echo "Setting up loop device..."
|
||||
LOOP_DEV=$(losetup --find --show --partscan "$OUTPUT_IMG")
|
||||
echo "Loop device: $LOOP_DEV"
|
||||
|
||||
# Wait for partitions to appear
|
||||
sleep 2
|
||||
|
||||
BOOT_PART="${LOOP_DEV}p1"
|
||||
ROOT_PART="${LOOP_DEV}p2"
|
||||
|
||||
# Verify partitions exist
|
||||
if [ ! -b "$BOOT_PART" ] || [ ! -b "$ROOT_PART" ]; then
|
||||
echo "ERROR: Partitions not found. Trying partx..."
|
||||
partx -a "$LOOP_DEV" 2>/dev/null || true
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
echo "Boot partition: $BOOT_PART"
|
||||
echo "Root partition: $ROOT_PART"
|
||||
|
||||
# Format partitions
|
||||
echo "Formatting partitions..."
|
||||
mkfs.vfat -F 32 -n "boot" "$BOOT_PART"
|
||||
mkfs.ext4 -L "EcoOS" "$ROOT_PART"
|
||||
|
||||
# Create mount points
|
||||
BOOT_MNT=$(mktemp -d)
|
||||
ROOT_MNT=$(mktemp -d)
|
||||
|
||||
# Mount partitions
|
||||
echo "Mounting partitions..."
|
||||
mount "$BOOT_PART" "$BOOT_MNT"
|
||||
mount "$ROOT_PART" "$ROOT_MNT"
|
||||
|
||||
# Extract or copy rootfs
|
||||
echo "Copying root filesystem..."
|
||||
if [ -f "$INPUT_PATH" ] && file "$INPUT_PATH" | grep -q "Squashfs"; then
|
||||
# It's a squashfs file - extract it
|
||||
echo "Extracting squashfs..."
|
||||
unsquashfs -f -d "$ROOT_MNT" "$INPUT_PATH"
|
||||
elif [ -d "$INPUT_PATH" ]; then
|
||||
# It's a directory (chroot) - copy it
|
||||
echo "Copying chroot directory..."
|
||||
cp -a "$INPUT_PATH"/* "$ROOT_MNT"/
|
||||
else
|
||||
echo "ERROR: Input path is neither a squashfs file nor a directory"
|
||||
umount "$BOOT_MNT" "$ROOT_MNT"
|
||||
losetup -d "$LOOP_DEV"
|
||||
rm -rf "$BOOT_MNT" "$ROOT_MNT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy kernel and initrd to boot partition
|
||||
echo "Setting up boot partition..."
|
||||
|
||||
# Find kernel and initrd
|
||||
KERNEL=$(ls "$ROOT_MNT"/boot/vmlinuz-* 2>/dev/null | sort -V | tail -1)
|
||||
INITRD=$(ls "$ROOT_MNT"/boot/initrd.img-* 2>/dev/null | sort -V | tail -1)
|
||||
|
||||
if [ -n "$KERNEL" ]; then
|
||||
cp "$KERNEL" "$BOOT_MNT/vmlinuz"
|
||||
echo "Copied kernel: $(basename $KERNEL)"
|
||||
fi
|
||||
|
||||
if [ -n "$INITRD" ]; then
|
||||
cp "$INITRD" "$BOOT_MNT/initrd.img"
|
||||
echo "Copied initrd: $(basename $INITRD)"
|
||||
fi
|
||||
|
||||
# Copy device tree blobs if present
|
||||
if [ -d "$ROOT_MNT/usr/lib/linux-image-"*"-raspi" ]; then
|
||||
DTB_DIR=$(ls -d "$ROOT_MNT/usr/lib/linux-image-"*"-raspi" 2>/dev/null | tail -1)
|
||||
if [ -d "$DTB_DIR/broadcom" ]; then
|
||||
cp -r "$DTB_DIR/broadcom"/*.dtb "$BOOT_MNT/" 2>/dev/null || true
|
||||
echo "Copied device tree blobs"
|
||||
fi
|
||||
if [ -d "$DTB_DIR/overlays" ]; then
|
||||
mkdir -p "$BOOT_MNT/overlays"
|
||||
cp -r "$DTB_DIR/overlays"/* "$BOOT_MNT/overlays/" 2>/dev/null || true
|
||||
echo "Copied device tree overlays"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy Pi firmware files
|
||||
echo "Copying Raspberry Pi firmware..."
|
||||
if [ -d "$ROOT_MNT/usr/lib/raspi-firmware" ]; then
|
||||
cp "$ROOT_MNT/usr/lib/raspi-firmware"/*.bin "$BOOT_MNT/" 2>/dev/null || true
|
||||
cp "$ROOT_MNT/usr/lib/raspi-firmware"/*.elf "$BOOT_MNT/" 2>/dev/null || true
|
||||
cp "$ROOT_MNT/usr/lib/raspi-firmware"/*.dat "$BOOT_MNT/" 2>/dev/null || true
|
||||
echo "Copied firmware files from raspi-firmware"
|
||||
elif [ -d "$ROOT_MNT/boot/firmware" ]; then
|
||||
cp "$ROOT_MNT/boot/firmware"/*.bin "$BOOT_MNT/" 2>/dev/null || true
|
||||
cp "$ROOT_MNT/boot/firmware"/*.elf "$BOOT_MNT/" 2>/dev/null || true
|
||||
cp "$ROOT_MNT/boot/firmware"/*.dat "$BOOT_MNT/" 2>/dev/null || true
|
||||
echo "Copied firmware files from /boot/firmware"
|
||||
fi
|
||||
|
||||
# Create config.txt if not present
|
||||
if [ ! -f "$BOOT_MNT/config.txt" ]; then
|
||||
echo "Creating config.txt..."
|
||||
cat > "$BOOT_MNT/config.txt" << 'EOF'
|
||||
# EcoOS Raspberry Pi Configuration
|
||||
# Supports Pi 3, 4, and 5
|
||||
|
||||
# Enable 64-bit mode
|
||||
arm_64bit=1
|
||||
|
||||
# Kernel and initrd
|
||||
kernel=vmlinuz
|
||||
initramfs initrd.img followkernel
|
||||
|
||||
# Enable serial console for debugging
|
||||
enable_uart=1
|
||||
|
||||
# GPU/display settings
|
||||
dtoverlay=vc4-kms-v3d
|
||||
gpu_mem=256
|
||||
|
||||
# USB and power settings (Pi 4/5)
|
||||
max_usb_current=1
|
||||
|
||||
# Audio
|
||||
dtparam=audio=on
|
||||
|
||||
# Camera/display interfaces
|
||||
camera_auto_detect=1
|
||||
display_auto_detect=1
|
||||
|
||||
# Pi 5 specific (ignored on older models)
|
||||
[pi5]
|
||||
dtoverlay=dwc2,dr_mode=host
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Create cmdline.txt if not present
|
||||
if [ ! -f "$BOOT_MNT/cmdline.txt" ]; then
|
||||
echo "Creating cmdline.txt..."
|
||||
# Get the UUID of the root partition
|
||||
ROOT_UUID=$(blkid -s UUID -o value "$ROOT_PART")
|
||||
if [ -n "$ROOT_UUID" ]; then
|
||||
echo "console=serial0,115200 console=tty1 root=UUID=$ROOT_UUID rootfstype=ext4 fsck.repair=yes rootwait quiet splash" > "$BOOT_MNT/cmdline.txt"
|
||||
else
|
||||
echo "console=serial0,115200 console=tty1 root=LABEL=EcoOS rootfstype=ext4 fsck.repair=yes rootwait quiet splash" > "$BOOT_MNT/cmdline.txt"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update fstab in the root filesystem
|
||||
echo "Updating /etc/fstab..."
|
||||
BOOT_UUID=$(blkid -s UUID -o value "$BOOT_PART")
|
||||
ROOT_UUID=$(blkid -s UUID -o value "$ROOT_PART")
|
||||
|
||||
cat > "$ROOT_MNT/etc/fstab" << EOF
|
||||
# EcoOS fstab - Raspberry Pi
|
||||
# <file system> <mount point> <type> <options> <dump> <pass>
|
||||
|
||||
# Root filesystem
|
||||
UUID=$ROOT_UUID / ext4 defaults,noatime 0 1
|
||||
|
||||
# Boot partition
|
||||
UUID=$BOOT_UUID /boot vfat defaults 0 2
|
||||
|
||||
# Swap (if needed)
|
||||
# /swapfile none swap sw 0 0
|
||||
EOF
|
||||
|
||||
# Create symlink for boot files in rootfs
|
||||
mkdir -p "$ROOT_MNT/boot"
|
||||
echo "Boot partition will be mounted at /boot"
|
||||
|
||||
# Set hostname
|
||||
echo "ecoos-rpi" > "$ROOT_MNT/etc/hostname"
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up..."
|
||||
sync
|
||||
umount "$BOOT_MNT"
|
||||
umount "$ROOT_MNT"
|
||||
losetup -d "$LOOP_DEV"
|
||||
rm -rf "$BOOT_MNT" "$ROOT_MNT"
|
||||
|
||||
# Final size
|
||||
FINAL_SIZE=$(ls -lh "$OUTPUT_IMG" | awk '{print $5}')
|
||||
echo ""
|
||||
echo "=== Raspberry Pi Image Created ==="
|
||||
echo "Output: $OUTPUT_IMG"
|
||||
echo "Size: $FINAL_SIZE"
|
||||
echo ""
|
||||
echo "To flash to SD card:"
|
||||
echo " sudo dd if=$OUTPUT_IMG of=/dev/sdX bs=4M status=progress"
|
||||
echo ""
|
||||
echo "Or use Raspberry Pi Imager for a safer flash."
|
||||
@@ -3,6 +3,8 @@
|
||||
# Build EcoOS ISO using Docker
|
||||
# This avoids needing to install live-build on the host
|
||||
#
|
||||
# Usage: ./docker-build.sh [--arch=amd64|arm64|rpi]
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
@@ -10,25 +12,110 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ISOBUILD_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
ECO_OS_DIR="$(dirname "$ISOBUILD_DIR")"
|
||||
|
||||
# Default architecture
|
||||
TARGET_ARCH="amd64"
|
||||
|
||||
# Parse arguments
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--arch=*)
|
||||
TARGET_ARCH="${arg#*=}"
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [--arch=amd64|arm64|rpi]"
|
||||
echo ""
|
||||
echo "Architectures:"
|
||||
echo " amd64 - x86_64 with UEFI/GRUB boot (default)"
|
||||
echo " arm64 - Generic ARM64 with UEFI/GRUB boot"
|
||||
echo " rpi - Raspberry Pi 3/4/5 with native bootloader"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $arg"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate architecture
|
||||
case "$TARGET_ARCH" in
|
||||
amd64|arm64|rpi)
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Invalid architecture '$TARGET_ARCH'"
|
||||
echo "Valid options: amd64, arm64, rpi"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Determine output filename based on architecture
|
||||
case "$TARGET_ARCH" in
|
||||
amd64)
|
||||
OUTPUT_FILE="ecoos.iso"
|
||||
;;
|
||||
arm64)
|
||||
OUTPUT_FILE="ecoos-arm64.iso"
|
||||
;;
|
||||
rpi)
|
||||
OUTPUT_FILE="ecoos-rpi.img"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "=== EcoOS ISO Builder (Docker) ==="
|
||||
echo "Target architecture: $TARGET_ARCH"
|
||||
echo "Output file: $OUTPUT_FILE"
|
||||
echo ""
|
||||
|
||||
cd "$ECO_OS_DIR"
|
||||
|
||||
# Build the Docker image
|
||||
echo "[1/2] Building Docker image..."
|
||||
docker build -t ecoos-builder -f isobuild/Dockerfile .
|
||||
# Build the Docker image with architecture argument
|
||||
echo "[1/2] Building Docker image for $TARGET_ARCH..."
|
||||
|
||||
# For ARM builds on x86 hosts, we need to use buildx with platform emulation
|
||||
if [ "$TARGET_ARCH" = "arm64" ] || [ "$TARGET_ARCH" = "rpi" ]; then
|
||||
HOST_ARCH=$(uname -m)
|
||||
if [ "$HOST_ARCH" = "x86_64" ]; then
|
||||
echo "Cross-building ARM on x86_64 host - using Docker buildx with QEMU emulation"
|
||||
echo "Note: This requires QEMU binfmt. If this fails, run:"
|
||||
echo " docker run --privileged --rm tonistiigi/binfmt --install all"
|
||||
echo ""
|
||||
|
||||
# Ensure buildx is available and create builder if needed
|
||||
docker buildx inspect ecoos-builder >/dev/null 2>&1 || \
|
||||
docker buildx create --name ecoos-builder --use
|
||||
|
||||
docker buildx build \
|
||||
--platform linux/arm64 \
|
||||
--build-arg TARGET_ARCH="$TARGET_ARCH" \
|
||||
--load \
|
||||
-t ecoos-builder-$TARGET_ARCH \
|
||||
-f isobuild/Dockerfile .
|
||||
else
|
||||
# Running on ARM host, use regular build
|
||||
docker build \
|
||||
--build-arg TARGET_ARCH="$TARGET_ARCH" \
|
||||
-t ecoos-builder-$TARGET_ARCH \
|
||||
-f isobuild/Dockerfile .
|
||||
fi
|
||||
else
|
||||
docker build \
|
||||
--build-arg TARGET_ARCH="$TARGET_ARCH" \
|
||||
-t ecoos-builder-$TARGET_ARCH \
|
||||
-f isobuild/Dockerfile .
|
||||
fi
|
||||
|
||||
# Run the build
|
||||
echo ""
|
||||
echo "[2/2] Building ISO (this may take 15-30 minutes)..."
|
||||
echo "[2/2] Building image (this may take 15-30 minutes)..."
|
||||
mkdir -p "$ISOBUILD_DIR/output"
|
||||
|
||||
docker run --rm \
|
||||
--privileged \
|
||||
-e TARGET_ARCH="$TARGET_ARCH" \
|
||||
-v "$ISOBUILD_DIR/output:/output" \
|
||||
ecoos-builder
|
||||
ecoos-builder-$TARGET_ARCH
|
||||
|
||||
echo ""
|
||||
echo "=== Build Complete ==="
|
||||
echo "ISO: $ISOBUILD_DIR/output/ecoos.iso"
|
||||
echo "Output: $ISOBUILD_DIR/output/$OUTPUT_FILE"
|
||||
|
||||
326
isotest/enable-displays.py
Executable file
326
isotest/enable-displays.py
Executable file
@@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enable multiple displays on a SPICE VM by sending monitor configuration.
|
||||
Retries until the SPICE agent in the guest is connected.
|
||||
"""
|
||||
|
||||
import gi
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
|
||||
gi.require_version('SpiceClientGLib', '2.0')
|
||||
from gi.repository import SpiceClientGLib, GLib
|
||||
|
||||
# Channel types (from spice-protocol)
|
||||
CHANNEL_MAIN = 1
|
||||
CHANNEL_DISPLAY = 2
|
||||
|
||||
def log(msg):
|
||||
"""Print with flush for immediate output when backgrounded"""
|
||||
print(msg, flush=True)
|
||||
|
||||
|
||||
def wait_for_port(host, port, timeout=60):
|
||||
"""Wait for a TCP port to be available"""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(1)
|
||||
result = sock.connect_ex((host, port))
|
||||
sock.close()
|
||||
if result == 0:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
return False
|
||||
|
||||
|
||||
def parse_spice_uri(uri):
|
||||
"""Parse spice://host:port URI"""
|
||||
match = re.match(r'spice://([^:]+):(\d+)', uri)
|
||||
if match:
|
||||
return match.group(1), int(match.group(2))
|
||||
return 'localhost', 5930
|
||||
|
||||
class SpiceDisplayEnabler:
|
||||
def __init__(self, uri, num_displays=3, width=1920, height=1080, timeout=60):
|
||||
self.uri = uri
|
||||
self.num_displays = num_displays
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.timeout = timeout
|
||||
self.session = None
|
||||
self.main_channel = None
|
||||
self.display_channels = []
|
||||
self.loop = GLib.MainLoop()
|
||||
self.configured = False
|
||||
self.agent_connected = False
|
||||
self.config_sent = False
|
||||
self.config_retries = 0
|
||||
self.max_retries = 3
|
||||
self.stabilization_scheduled = False
|
||||
self.connection_retries = 0
|
||||
self.max_connection_retries = 30 # Try reconnecting for up to 5 minutes
|
||||
self.agent_check_count = 0
|
||||
self.configure_count = 0 # Track how many times we've configured (for reboots)
|
||||
|
||||
def on_channel_new(self, session, channel):
|
||||
"""Handle new channel creation"""
|
||||
channel_type = channel.get_property('channel-type')
|
||||
channel_id = channel.get_property('channel-id')
|
||||
|
||||
if channel_type == CHANNEL_MAIN:
|
||||
log(f"Main channel received (id={channel_id})")
|
||||
self.main_channel = channel
|
||||
channel.connect_after('channel-event', self.on_channel_event)
|
||||
# Check agent status periodically
|
||||
GLib.timeout_add(500, self.check_agent_and_configure)
|
||||
elif channel_type == CHANNEL_DISPLAY:
|
||||
log(f"Display channel received (id={channel_id})")
|
||||
self.display_channels.append((channel_id, channel))
|
||||
|
||||
def on_channel_event(self, channel, event):
|
||||
"""Handle channel events"""
|
||||
log(f"Channel event: {event}")
|
||||
if event == SpiceClientGLib.ChannelEvent.OPENED:
|
||||
# Start checking for agent
|
||||
GLib.timeout_add(100, self.check_agent_and_configure)
|
||||
|
||||
def check_agent_and_configure(self):
|
||||
"""Check if agent is connected and configure if ready"""
|
||||
if self.stabilization_scheduled:
|
||||
return True # Keep checking but don't act yet
|
||||
|
||||
if not self.main_channel:
|
||||
return True # Keep checking
|
||||
|
||||
was_connected = self.agent_connected
|
||||
self.agent_connected = self.main_channel.get_property('agent-connected')
|
||||
self.agent_check_count += 1
|
||||
|
||||
# Detect agent disconnect (VM reboot)
|
||||
if was_connected and not self.agent_connected:
|
||||
log(f"Agent disconnected (VM may be rebooting)...")
|
||||
self.configured = False
|
||||
self.config_sent = False
|
||||
self.config_retries = 0
|
||||
|
||||
# Log every 10 checks (5 seconds)
|
||||
if self.agent_check_count % 10 == 0:
|
||||
status = "connected" if self.agent_connected else "waiting"
|
||||
log(f"Agent {status} (check #{self.agent_check_count}, configured={self.configure_count}x)")
|
||||
|
||||
if self.agent_connected and not self.config_sent and not self.stabilization_scheduled:
|
||||
log(f"Agent connected! Waiting 2s for stabilization...")
|
||||
self.stabilization_scheduled = True
|
||||
# Wait 2 seconds for agent to fully initialize before configuring
|
||||
GLib.timeout_add(2000, self.configure_monitors)
|
||||
|
||||
return True # Always keep checking for reboots
|
||||
|
||||
def configure_monitors(self):
|
||||
"""Configure multiple monitors via SPICE protocol"""
|
||||
if self.configured:
|
||||
return False # Already done
|
||||
|
||||
if not self.main_channel:
|
||||
log("No main channel!")
|
||||
return False
|
||||
|
||||
self.config_retries += 1
|
||||
attempt_str = f" (attempt {self.config_retries}/{self.max_retries})" if self.config_retries > 1 else ""
|
||||
log(f"Configuring {self.num_displays} displays{attempt_str}...")
|
||||
|
||||
# Enable and configure each display
|
||||
for i in range(self.num_displays):
|
||||
x = i * self.width # Position displays side by side
|
||||
y = 0
|
||||
|
||||
try:
|
||||
self.main_channel.update_display_enabled(i, True, False)
|
||||
self.main_channel.update_display(i, x, y, self.width, self.height, False)
|
||||
except Exception as e:
|
||||
log(f" Error setting display {i}: {e}")
|
||||
|
||||
# Send the configuration
|
||||
try:
|
||||
self.main_channel.send_monitor_config()
|
||||
self.config_sent = True
|
||||
log(f"Sent config for {self.num_displays} displays at {self.width}x{self.height}")
|
||||
except Exception as e:
|
||||
log(f"Error sending config: {e}")
|
||||
|
||||
# Schedule verification/retry after 3 seconds
|
||||
GLib.timeout_add(3000, self.verify_and_retry)
|
||||
return False # Don't repeat this timeout
|
||||
|
||||
def verify_and_retry(self):
|
||||
"""Verify configuration was applied, retry if needed"""
|
||||
if self.configured:
|
||||
return False # Already done
|
||||
|
||||
# Check if displays are actually enabled by re-checking agent state
|
||||
if not self.main_channel:
|
||||
log("Lost main channel during verification")
|
||||
self.quit()
|
||||
return False
|
||||
|
||||
# The SPICE protocol doesn't provide a direct way to verify display config
|
||||
# was applied. We assume success if we sent config and agent is still connected.
|
||||
agent_still_connected = self.main_channel.get_property('agent-connected')
|
||||
|
||||
if agent_still_connected and self.config_sent:
|
||||
# Mark as configured and send again for good measure
|
||||
if self.config_retries < self.max_retries:
|
||||
log(f"Sending config again to ensure it takes effect...")
|
||||
self.config_sent = False # Allow retry
|
||||
self.configure_monitors()
|
||||
else:
|
||||
# We've tried enough, assume success
|
||||
self.configured = True
|
||||
self.configure_count += 1
|
||||
self.stabilization_scheduled = False # Allow reconfiguration after reboot
|
||||
log(f"Configuration complete (configured {self.configure_count}x total)")
|
||||
# Don't quit - keep running to handle VM reboots
|
||||
elif not agent_still_connected:
|
||||
log("Agent disconnected during verification - will retry when reconnected")
|
||||
self.config_sent = False
|
||||
self.config_retries = 0
|
||||
self.stabilization_scheduled = False
|
||||
# Don't quit - agent will reconnect after reboot
|
||||
else:
|
||||
# Config not sent but agent connected - try again
|
||||
if self.config_retries < self.max_retries:
|
||||
log(f"Config not sent, retrying...")
|
||||
self.configure_monitors()
|
||||
else:
|
||||
log(f"Failed after {self.config_retries} attempts")
|
||||
self.quit()
|
||||
|
||||
return False # Don't repeat this timeout
|
||||
|
||||
def quit(self):
|
||||
self.loop.quit()
|
||||
return False
|
||||
|
||||
def on_timeout(self):
|
||||
"""Handle overall timeout"""
|
||||
if not self.configured:
|
||||
log(f"Timeout after {self.timeout}s - agent not connected (checks={self.agent_check_count})")
|
||||
self.quit()
|
||||
return False
|
||||
|
||||
def check_connection_health(self):
|
||||
"""Check if connection is healthy, reconnect if needed"""
|
||||
log(f"Health check: configured={self.configure_count}x, main_channel={self.main_channel is not None}, agent={self.agent_connected}")
|
||||
|
||||
# Don't stop checking - we need to handle reboots
|
||||
if self.stabilization_scheduled:
|
||||
return True # Keep checking but don't reconnect during stabilization
|
||||
|
||||
# If we don't have a main channel after 10 seconds, reconnect
|
||||
if not self.main_channel:
|
||||
self.connection_retries += 1
|
||||
if self.connection_retries > self.max_connection_retries:
|
||||
log(f"Giving up after {self.connection_retries} connection attempts")
|
||||
return False
|
||||
|
||||
log(f"No main channel received, reconnecting (attempt {self.connection_retries})...")
|
||||
self.reconnect()
|
||||
return True # Keep checking
|
||||
|
||||
return True # Keep checking connection health
|
||||
|
||||
def reconnect(self):
|
||||
"""Disconnect and reconnect to SPICE"""
|
||||
if self.session:
|
||||
try:
|
||||
self.session.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Reset state for new connection
|
||||
self.main_channel = None
|
||||
self.display_channels = []
|
||||
|
||||
# Create new session
|
||||
self.session = SpiceClientGLib.Session()
|
||||
self.session.set_property('uri', self.uri)
|
||||
self.session.connect_after('channel-new', self.on_channel_new)
|
||||
|
||||
if not self.session.connect():
|
||||
log(" Reconnection failed, will retry...")
|
||||
|
||||
def run(self):
|
||||
log(f"Connecting to {self.uri}...")
|
||||
log(f"Waiting up to {self.timeout}s for agent...")
|
||||
|
||||
# Wait for SPICE port to be available before connecting
|
||||
host, port = parse_spice_uri(self.uri)
|
||||
log(f"Waiting for SPICE server at {host}:{port}...")
|
||||
if not wait_for_port(host, port, timeout=60):
|
||||
log(f"SPICE server not available after 60s")
|
||||
return False
|
||||
log(f"SPICE port {port} is open, connecting...")
|
||||
|
||||
# Give SPICE server a moment to fully initialize after port opens
|
||||
time.sleep(1)
|
||||
|
||||
self.session = SpiceClientGLib.Session()
|
||||
self.session.set_property('uri', self.uri)
|
||||
self.session.connect_after('channel-new', self.on_channel_new)
|
||||
|
||||
if not self.session.connect():
|
||||
log("Initial connection failed, will retry...")
|
||||
|
||||
# Check connection health every 10 seconds
|
||||
GLib.timeout_add(10000, self.check_connection_health)
|
||||
|
||||
# Set overall timeout
|
||||
GLib.timeout_add(self.timeout * 1000, self.on_timeout)
|
||||
|
||||
log("Entering main loop...")
|
||||
self.loop.run()
|
||||
log("Main loop exited")
|
||||
|
||||
if self.configured:
|
||||
log(f"Success: {self.num_displays} displays enabled")
|
||||
else:
|
||||
log("Failed: Could not enable displays")
|
||||
|
||||
return self.configured
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description='Enable SPICE VM displays')
|
||||
parser.add_argument('uri', nargs='?', default='spice://localhost:5930',
|
||||
help='SPICE URI (default: spice://localhost:5930)')
|
||||
parser.add_argument('num_displays', nargs='?', type=int, default=3,
|
||||
help='Number of displays to enable (default: 3)')
|
||||
parser.add_argument('--timeout', '-t', type=int, default=60,
|
||||
help='Timeout in seconds (default: 60)')
|
||||
parser.add_argument('--width', '-W', type=int, default=1920,
|
||||
help='Display width (default: 1920)')
|
||||
parser.add_argument('--height', '-H', type=int, default=1080,
|
||||
help='Display height (default: 1080)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
enabler = SpiceDisplayEnabler(
|
||||
args.uri,
|
||||
args.num_displays,
|
||||
args.width,
|
||||
args.height,
|
||||
args.timeout
|
||||
)
|
||||
success = enabler.run()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -2,27 +2,104 @@
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# Parse arguments
|
||||
AUTO_MODE=false
|
||||
TARGET_ARCH="amd64"
|
||||
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--auto)
|
||||
AUTO_MODE=true
|
||||
shift
|
||||
;;
|
||||
--arch=*)
|
||||
TARGET_ARCH="${arg#*=}"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [--arch=amd64|arm64|rpi] [--auto]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --arch=ARCH Target architecture (default: amd64)"
|
||||
echo " amd64 - x86_64 with UEFI/OVMF"
|
||||
echo " arm64 - Generic ARM64 with UEFI"
|
||||
echo " rpi - Raspberry Pi 3 emulation"
|
||||
echo " --auto Run in automatic mode for CI/testing"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate architecture
|
||||
case "$TARGET_ARCH" in
|
||||
amd64|arm64|rpi)
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Invalid architecture '$TARGET_ARCH'"
|
||||
echo "Valid options: amd64, arm64, rpi"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
PROJECT_ROOT="$SCRIPT_DIR/.."
|
||||
VM_DIR="$PROJECT_ROOT/.nogit/vm"
|
||||
ISO_PATH="$PROJECT_ROOT/.nogit/iso/ecoos.iso"
|
||||
DISK_PATH="$VM_DIR/test-disk.qcow2"
|
||||
MONITOR_SOCK="$VM_DIR/qemu-monitor.sock"
|
||||
SERIAL_SOCK="$VM_DIR/serial.sock"
|
||||
SERIAL_LOG="$VM_DIR/serial.log"
|
||||
PID_FILE="$VM_DIR/qemu.pid"
|
||||
|
||||
# Architecture-specific settings
|
||||
case "$TARGET_ARCH" in
|
||||
amd64)
|
||||
ISO_PATH="$PROJECT_ROOT/.nogit/iso/ecoos.iso"
|
||||
DISK_PATH="$VM_DIR/test-disk.qcow2"
|
||||
QEMU_CMD="qemu-system-x86_64"
|
||||
QEMU_MACHINE=""
|
||||
QEMU_BIOS="-bios /usr/share/qemu/OVMF.fd"
|
||||
KVM_CHECK="/dev/kvm"
|
||||
DISK_IF="virtio"
|
||||
;;
|
||||
arm64)
|
||||
ISO_PATH="$PROJECT_ROOT/.nogit/iso/ecoos-arm64.iso"
|
||||
DISK_PATH="$VM_DIR/test-disk-arm64.qcow2"
|
||||
QEMU_CMD="qemu-system-aarch64"
|
||||
QEMU_MACHINE="-M virt -cpu cortex-a72"
|
||||
QEMU_BIOS="-bios /usr/share/qemu-efi-aarch64/QEMU_EFI.fd"
|
||||
KVM_CHECK="" # ARM KVM only works on ARM hosts
|
||||
DISK_IF="virtio"
|
||||
;;
|
||||
rpi)
|
||||
IMG_PATH="$PROJECT_ROOT/.nogit/iso/ecoos-rpi.img"
|
||||
DISK_PATH="" # RPi uses the image directly
|
||||
QEMU_CMD="qemu-system-aarch64"
|
||||
QEMU_MACHINE="-M raspi3b -cpu cortex-a53"
|
||||
QEMU_BIOS="" # RPi uses direct kernel boot
|
||||
KVM_CHECK=""
|
||||
DISK_IF="sd"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Create VM directory if not exists
|
||||
mkdir -p "$VM_DIR"
|
||||
|
||||
# Check if ISO exists
|
||||
if [ ! -f "$ISO_PATH" ]; then
|
||||
echo "ERROR: ISO not found at $ISO_PATH"
|
||||
echo "Run 'pnpm run build' first to create the ISO"
|
||||
exit 1
|
||||
# Check if image exists
|
||||
if [ "$TARGET_ARCH" = "rpi" ]; then
|
||||
if [ ! -f "$IMG_PATH" ]; then
|
||||
echo "ERROR: RPi image not found at $IMG_PATH"
|
||||
echo "Run 'pnpm run build:rpi' first to create the image"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ ! -f "$ISO_PATH" ]; then
|
||||
echo "ERROR: ISO not found at $ISO_PATH"
|
||||
echo "Run 'pnpm run build:$TARGET_ARCH' first to create the ISO"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create test disk if not exists
|
||||
if [ ! -f "$DISK_PATH" ]; then
|
||||
# Create test disk if not exists (not needed for RPi)
|
||||
if [ -n "$DISK_PATH" ] && [ ! -f "$DISK_PATH" ]; then
|
||||
echo "Creating test disk (20GB)..."
|
||||
qemu-img create -f qcow2 "$DISK_PATH" 20G
|
||||
fi
|
||||
@@ -37,44 +114,279 @@ if [ -f "$PID_FILE" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Starting QEMU with EcoOS ISO..."
|
||||
echo "Starting QEMU with EcoOS for $TARGET_ARCH..."
|
||||
|
||||
# Check if KVM is available
|
||||
# Check if KVM is available (only for amd64 on x86 hosts)
|
||||
KVM_OPTS=""
|
||||
if [ -e /dev/kvm ] && [ -r /dev/kvm ] && [ -w /dev/kvm ]; then
|
||||
if [ -n "$KVM_CHECK" ] && [ -e "$KVM_CHECK" ] && [ -r "$KVM_CHECK" ] && [ -w "$KVM_CHECK" ]; then
|
||||
KVM_OPTS="-enable-kvm -cpu host"
|
||||
echo "Using KVM acceleration"
|
||||
else
|
||||
echo "KVM not available, using software emulation (slower)"
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
echo "KVM not available, using software emulation (slower)"
|
||||
else
|
||||
echo "Running ARM emulation on x86 host (slower)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start QEMU with VirtIO-GPU (VirGL OpenGL acceleration) and serial console
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
echo ""
|
||||
echo "Shutting down..."
|
||||
if [ -n "$SCREENSHOT_LOOP_PID" ] && kill -0 "$SCREENSHOT_LOOP_PID" 2>/dev/null; then
|
||||
kill "$SCREENSHOT_LOOP_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [ -n "$ENABLE_PID" ] && kill -0 "$ENABLE_PID" 2>/dev/null; then
|
||||
kill "$ENABLE_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [ -n "$VIEWER_PID" ] && kill -0 "$VIEWER_PID" 2>/dev/null; then
|
||||
kill "$VIEWER_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [ -n "$TWM_PID" ] && kill -0 "$TWM_PID" 2>/dev/null; then
|
||||
kill "$TWM_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [ -n "$XORG_PID" ] && kill -0 "$XORG_PID" 2>/dev/null; then
|
||||
kill "$XORG_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
PID=$(cat "$PID_FILE")
|
||||
if kill -0 "$PID" 2>/dev/null; then
|
||||
kill "$PID" 2>/dev/null || true
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
echo "Done"
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# Start QEMU based on architecture
|
||||
> "$SERIAL_LOG" # Clear old log
|
||||
qemu-system-x86_64 \
|
||||
$KVM_OPTS \
|
||||
-m 4G \
|
||||
-smp 4 \
|
||||
-bios /usr/share/qemu/OVMF.fd \
|
||||
-drive file="$ISO_PATH",media=cdrom \
|
||||
-drive file="$DISK_PATH",format=qcow2,if=virtio \
|
||||
-device virtio-vga \
|
||||
-display none \
|
||||
-spice port=5930,disable-ticketing=on \
|
||||
-serial unix:"$SERIAL_SOCK",server,nowait \
|
||||
-monitor unix:"$MONITOR_SOCK",server,nowait \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::3006-:3006,hostfwd=tcp::2222-:22 \
|
||||
-pidfile "$PID_FILE" &
|
||||
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
# AMD64 with multi-display support
|
||||
$QEMU_CMD \
|
||||
$KVM_OPTS \
|
||||
-m 4G \
|
||||
-smp 4 \
|
||||
$QEMU_BIOS \
|
||||
-drive file="$ISO_PATH",media=cdrom \
|
||||
-drive file="$DISK_PATH",format=qcow2,if=virtio \
|
||||
-device qxl-vga,id=video0,ram_size=67108864,vram_size=67108864,vgamem_mb=64 \
|
||||
-device qxl,id=video1,ram_size=67108864,vram_size=67108864,vgamem_mb=64 \
|
||||
-device qxl,id=video2,ram_size=67108864,vram_size=67108864,vgamem_mb=64 \
|
||||
-display none \
|
||||
-spice port=5930,disable-ticketing=on \
|
||||
-device virtio-serial-pci \
|
||||
-chardev spicevmc,id=vdagent,name=vdagent \
|
||||
-device virtserialport,chardev=vdagent,name=com.redhat.spice.0 \
|
||||
-serial unix:"$SERIAL_SOCK",server,nowait \
|
||||
-monitor unix:"$MONITOR_SOCK",server,nowait \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::3006-:3006,hostfwd=tcp::2222-:22 \
|
||||
-pidfile "$PID_FILE" &
|
||||
|
||||
elif [ "$TARGET_ARCH" = "arm64" ]; then
|
||||
# ARM64 with UEFI
|
||||
$QEMU_CMD \
|
||||
$QEMU_MACHINE \
|
||||
-m 4G \
|
||||
-smp 4 \
|
||||
$QEMU_BIOS \
|
||||
-drive file="$ISO_PATH",media=cdrom,if=none,id=cdrom \
|
||||
-device virtio-blk-device,drive=cdrom \
|
||||
-drive file="$DISK_PATH",format=qcow2,if=none,id=hd0 \
|
||||
-device virtio-blk-device,drive=hd0 \
|
||||
-device virtio-gpu-pci \
|
||||
-display none \
|
||||
-serial unix:"$SERIAL_SOCK",server,nowait \
|
||||
-monitor unix:"$MONITOR_SOCK",server,nowait \
|
||||
-device virtio-net-device,netdev=net0 \
|
||||
-netdev user,id=net0,hostfwd=tcp::3006-:3006,hostfwd=tcp::2222-:22 \
|
||||
-pidfile "$PID_FILE" &
|
||||
|
||||
elif [ "$TARGET_ARCH" = "rpi" ]; then
|
||||
# Raspberry Pi 3B emulation
|
||||
# Note: raspi3b machine has limited support, uses direct kernel boot
|
||||
echo "NOTE: Raspberry Pi emulation is limited."
|
||||
echo " For full testing, use real hardware."
|
||||
|
||||
# Extract kernel and initrd from image for direct boot
|
||||
TEMP_MNT=$(mktemp -d)
|
||||
LOOP_DEV=$(sudo losetup --find --show --partscan "$IMG_PATH")
|
||||
sudo mount "${LOOP_DEV}p1" "$TEMP_MNT"
|
||||
|
||||
KERNEL="$TEMP_MNT/vmlinuz"
|
||||
INITRD="$TEMP_MNT/initrd.img"
|
||||
DTB="$TEMP_MNT/bcm2710-rpi-3-b.dtb"
|
||||
|
||||
if [ ! -f "$KERNEL" ]; then
|
||||
echo "ERROR: Kernel not found in RPi image"
|
||||
sudo umount "$TEMP_MNT"
|
||||
sudo losetup -d "$LOOP_DEV"
|
||||
rm -rf "$TEMP_MNT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy kernel/initrd to temp location for QEMU
|
||||
cp "$KERNEL" "$VM_DIR/rpi-kernel"
|
||||
cp "$INITRD" "$VM_DIR/rpi-initrd" 2>/dev/null || true
|
||||
cp "$DTB" "$VM_DIR/rpi-dtb" 2>/dev/null || true
|
||||
|
||||
sudo umount "$TEMP_MNT"
|
||||
sudo losetup -d "$LOOP_DEV"
|
||||
rm -rf "$TEMP_MNT"
|
||||
|
||||
$QEMU_CMD \
|
||||
$QEMU_MACHINE \
|
||||
-m 1G \
|
||||
-kernel "$VM_DIR/rpi-kernel" \
|
||||
-initrd "$VM_DIR/rpi-initrd" \
|
||||
-dtb "$VM_DIR/rpi-dtb" \
|
||||
-append "console=ttyAMA0,115200 root=LABEL=EcoOS rootfstype=ext4 rootwait" \
|
||||
-drive file="$IMG_PATH",format=raw,if=sd \
|
||||
-serial unix:"$SERIAL_SOCK",server,nowait \
|
||||
-display none \
|
||||
-pidfile "$PID_FILE" &
|
||||
fi
|
||||
|
||||
QEMU_PID=$!
|
||||
|
||||
echo ""
|
||||
sleep 1
|
||||
echo "=== EcoOS Test VM Started ==="
|
||||
echo "PID: $(cat $PID_FILE 2>/dev/null || echo 'running')"
|
||||
echo "SPICE: spicy -h localhost -p 5930"
|
||||
echo "Serial Log: $SERIAL_LOG"
|
||||
echo "Management UI: http://localhost:3006"
|
||||
echo "=== EcoOS Test VM Started ($TARGET_ARCH) ==="
|
||||
echo "QEMU PID: $QEMU_PID"
|
||||
if [ "$TARGET_ARCH" != "rpi" ]; then
|
||||
echo "Management UI: http://localhost:3006"
|
||||
fi
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " pnpm run test:screenshot - Take screenshot"
|
||||
echo " pnpm run test:stop - Stop VM"
|
||||
echo " tail -f $SERIAL_LOG - Watch serial console"
|
||||
echo " socat - UNIX-CONNECT:$SERIAL_SOCK - Interactive serial"
|
||||
|
||||
# AMD64-specific display setup
|
||||
if [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
# Wait for QEMU to start and SPICE to be ready
|
||||
echo "Waiting for SPICE server..."
|
||||
sleep 3
|
||||
|
||||
# Check if remote-viewer is available
|
||||
if ! command -v remote-viewer &> /dev/null; then
|
||||
echo "WARNING: remote-viewer not installed"
|
||||
echo "Install with: sudo apt install virt-viewer"
|
||||
echo ""
|
||||
echo "Running without display viewer. Press Ctrl-C to stop."
|
||||
wait $QEMU_PID
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Set up virt-viewer settings for multi-display
|
||||
VIRT_VIEWER_CONFIG_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/virt-viewer"
|
||||
mkdir -p "$VIRT_VIEWER_CONFIG_DIR"
|
||||
if [ -f "$SCRIPT_DIR/virt-viewer-settings" ]; then
|
||||
cp "$SCRIPT_DIR/virt-viewer-settings" "$VIRT_VIEWER_CONFIG_DIR/settings"
|
||||
echo "Configured virt-viewer for 3 displays"
|
||||
fi
|
||||
|
||||
# Detect DISPLAY if not set
|
||||
if [ -z "$DISPLAY" ]; then
|
||||
if [ -S /tmp/.X11-unix/X0 ]; then
|
||||
export DISPLAY=:0
|
||||
elif [ -S /tmp/.X11-unix/X1 ]; then
|
||||
export DISPLAY=:1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Detect WAYLAND_DISPLAY if not set
|
||||
if [ -z "$WAYLAND_DISPLAY" ] && [ -z "$DISPLAY" ]; then
|
||||
if [ -S "$XDG_RUNTIME_DIR/wayland-0" ]; then
|
||||
export WAYLAND_DISPLAY=wayland-0
|
||||
elif [ -S "/run/user/$(id -u)/wayland-0" ]; then
|
||||
export XDG_RUNTIME_DIR="/run/user/$(id -u)"
|
||||
export WAYLAND_DISPLAY=wayland-0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Launch remote-viewer
|
||||
if [ -z "$DISPLAY" ] && [ -z "$WAYLAND_DISPLAY" ]; then
|
||||
echo "No display found, starting headless X server with 3 virtual monitors..."
|
||||
|
||||
XDISPLAY=99
|
||||
while [ -S "/tmp/.X11-unix/X$XDISPLAY" ]; do
|
||||
XDISPLAY=$((XDISPLAY + 1))
|
||||
done
|
||||
|
||||
XORG_CONFIG="$SCRIPT_DIR/xorg-dummy.conf"
|
||||
Xorg :$XDISPLAY -config "$XORG_CONFIG" -noreset +extension GLX +extension RANDR +extension RENDER &
|
||||
XORG_PID=$!
|
||||
sleep 2
|
||||
|
||||
export DISPLAY=:$XDISPLAY
|
||||
|
||||
xrandr --newmode "1920x1080" 173.00 1920 2048 2248 2576 1080 1083 1088 1120 -hsync +vsync 2>/dev/null || true
|
||||
xrandr --addmode DUMMY1 "1920x1080" 2>/dev/null || true
|
||||
xrandr --addmode DUMMY2 "1920x1080" 2>/dev/null || true
|
||||
xrandr --output DUMMY0 --mode 1920x1080 --pos 0x0 --primary
|
||||
xrandr --output DUMMY1 --mode 1920x1080 --pos 1920x0 2>/dev/null || true
|
||||
xrandr --output DUMMY2 --mode 1920x1080 --pos 3840x0 2>/dev/null || true
|
||||
|
||||
echo "Headless X server started on :$XDISPLAY"
|
||||
|
||||
remote-viewer --full-screen spice://localhost:5930 &
|
||||
VIEWER_PID=$!
|
||||
echo "remote-viewer running headlessly (PID: $VIEWER_PID)"
|
||||
else
|
||||
echo "Launching remote-viewer with fullscreen for multi-display..."
|
||||
remote-viewer --full-screen spice://localhost:5930 &
|
||||
VIEWER_PID=$!
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Enable all 3 displays via SPICE protocol
|
||||
if [ -f "$SCRIPT_DIR/enable-displays.py" ]; then
|
||||
echo "Enabling displays (waiting for SPICE agent, up to 5 minutes)..."
|
||||
python3 "$SCRIPT_DIR/enable-displays.py" --timeout 300 2>&1 &
|
||||
ENABLE_PID=$!
|
||||
fi
|
||||
|
||||
# Start screenshot loop in background
|
||||
echo "Starting screenshot loop..."
|
||||
(while true; do "$SCRIPT_DIR/screenshot.sh" 2>/dev/null; sleep 5; done) &
|
||||
SCREENSHOT_LOOP_PID=$!
|
||||
fi
|
||||
|
||||
echo "Tips:"
|
||||
echo " - socat - UNIX-CONNECT:.nogit/vm/serial.sock - Serial console (login: ecouser/ecouser)"
|
||||
if [ "$TARGET_ARCH" != "rpi" ]; then
|
||||
echo " - http://localhost:3006 - Management UI"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
if [ "$AUTO_MODE" = true ] && [ "$TARGET_ARCH" = "amd64" ]; then
|
||||
echo "=== Auto mode: waiting for display setup ==="
|
||||
|
||||
if [ -n "$ENABLE_PID" ]; then
|
||||
wait $ENABLE_PID
|
||||
ENABLE_EXIT=$?
|
||||
if [ $ENABLE_EXIT -ne 0 ]; then
|
||||
echo "FAIL: Could not enable displays (exit code: $ENABLE_EXIT)"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Taking screenshot..."
|
||||
"$SCRIPT_DIR/screenshot.sh"
|
||||
|
||||
SCREENSHOT="$PROJECT_ROOT/.nogit/screenshots/latest.png"
|
||||
if [ -f "$SCREENSHOT" ]; then
|
||||
WIDTH=$(identify -format "%w" "$SCREENSHOT" 2>/dev/null || echo "0")
|
||||
if [ "$WIDTH" -ge 5760 ]; then
|
||||
echo "SUCCESS: Multi-display test passed (width: ${WIDTH}px)"
|
||||
exit 0
|
||||
else
|
||||
echo "FAIL: Screenshot width is ${WIDTH}px, expected >= 5760px"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "FAIL: Screenshot not found"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "=== Press Ctrl-C to stop ==="
|
||||
echo ""
|
||||
wait $QEMU_PID 2>/dev/null || true
|
||||
fi
|
||||
|
||||
@@ -4,6 +4,7 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$SCRIPT_DIR/.."
|
||||
VM_DIR="$PROJECT_ROOT/.nogit/vm"
|
||||
SCREENSHOT_DIR="$PROJECT_ROOT/.nogit/screenshots"
|
||||
TIMESTAMPED_DIR="$SCREENSHOT_DIR/timestamped"
|
||||
MONITOR_SOCK="$VM_DIR/qemu-monitor.sock"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
@@ -15,35 +16,38 @@ if [ ! -S "$MONITOR_SOCK" ]; then
|
||||
fi
|
||||
|
||||
mkdir -p "$SCREENSHOT_DIR"
|
||||
PPM_FILE="$SCREENSHOT_DIR/ecoos-$TIMESTAMP.ppm"
|
||||
PNG_FILE="$SCREENSHOT_DIR/ecoos-$TIMESTAMP.png"
|
||||
LATEST_FILE="$SCREENSHOT_DIR/latest.png"
|
||||
mkdir -p "$TIMESTAMPED_DIR"
|
||||
|
||||
echo "Taking screenshot..."
|
||||
echo "screendump $PPM_FILE" | socat - UNIX-CONNECT:"$MONITOR_SOCK"
|
||||
sleep 1
|
||||
|
||||
# Check if PPM was created
|
||||
PPM_FILE="$SCREENSHOT_DIR/temp.ppm"
|
||||
LATEST_FILE="$SCREENSHOT_DIR/latest.png"
|
||||
TIMESTAMPED_FILE="$TIMESTAMPED_DIR/ecoos-$TIMESTAMP.png"
|
||||
|
||||
# Take screenshot (virtio-vga captures all outputs in one framebuffer)
|
||||
echo "screendump $PPM_FILE" | socat - UNIX-CONNECT:"$MONITOR_SOCK" > /dev/null 2>&1
|
||||
sleep 0.5
|
||||
|
||||
if [ ! -f "$PPM_FILE" ]; then
|
||||
echo "ERROR: Screenshot failed"
|
||||
echo "ERROR: Failed to capture screenshot"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Convert to PNG if imagemagick is available
|
||||
# Convert to PNG
|
||||
if command -v convert &> /dev/null; then
|
||||
convert "$PPM_FILE" "$PNG_FILE"
|
||||
convert "$PPM_FILE" "$LATEST_FILE"
|
||||
cp "$LATEST_FILE" "$TIMESTAMPED_FILE"
|
||||
rm "$PPM_FILE"
|
||||
|
||||
# Copy to latest.png
|
||||
cp "$PNG_FILE" "$LATEST_FILE"
|
||||
|
||||
echo "Screenshot saved: $PNG_FILE"
|
||||
echo "Also saved as: $LATEST_FILE"
|
||||
echo "Screenshot saved: $LATEST_FILE"
|
||||
echo "Timestamped copy: $TIMESTAMPED_FILE"
|
||||
else
|
||||
echo "Screenshot saved: $PPM_FILE"
|
||||
echo "(Install imagemagick to auto-convert to PNG)"
|
||||
mv "$PPM_FILE" "$SCREENSHOT_DIR/latest.ppm"
|
||||
cp "$SCREENSHOT_DIR/latest.ppm" "$TIMESTAMPED_DIR/ecoos-$TIMESTAMP.ppm"
|
||||
echo "Screenshot saved: $SCREENSHOT_DIR/latest.ppm"
|
||||
echo "(Install ImageMagick for PNG conversion)"
|
||||
fi
|
||||
|
||||
# Keep only last 20 screenshots (excluding latest.png)
|
||||
cd "$SCREENSHOT_DIR"
|
||||
ls -t ecoos-*.png 2>/dev/null | tail -n +21 | xargs -r rm -f
|
||||
# Keep only last 50 timestamped screenshots
|
||||
cd "$TIMESTAMPED_DIR"
|
||||
ls -t ecoos-*.png 2>/dev/null | tail -n +51 | xargs -r rm -f
|
||||
ls -t ecoos-*.ppm 2>/dev/null | tail -n +51 | xargs -r rm -f
|
||||
|
||||
5
isotest/virt-viewer-settings
Normal file
5
isotest/virt-viewer-settings
Normal file
@@ -0,0 +1,5 @@
|
||||
[virt-viewer]
|
||||
share-clipboard=true
|
||||
|
||||
[fallback]
|
||||
monitor-mapping=1:1;2:2;3:3
|
||||
41
isotest/xorg-dummy.conf
Normal file
41
isotest/xorg-dummy.conf
Normal file
@@ -0,0 +1,41 @@
|
||||
# Xorg configuration for 3 virtual monitors using dummy driver with RandR
|
||||
# Used for headless multi-display testing with SPICE/remote-viewer
|
||||
|
||||
Section "ServerFlags"
|
||||
Option "DontVTSwitch" "true"
|
||||
Option "AllowMouseOpenFail" "true"
|
||||
Option "PciForceNone" "true"
|
||||
Option "AutoEnableDevices" "false"
|
||||
Option "AutoAddDevices" "false"
|
||||
EndSection
|
||||
|
||||
Section "Device"
|
||||
Identifier "dummy"
|
||||
Driver "dummy"
|
||||
VideoRam 768000
|
||||
EndSection
|
||||
|
||||
Section "Monitor"
|
||||
Identifier "Monitor0"
|
||||
HorizSync 28.0-80.0
|
||||
VertRefresh 48.0-75.0
|
||||
# 1920x1080 @ 60Hz (CVT) modeline
|
||||
Modeline "1920x1080" 173.00 1920 2048 2248 2576 1080 1083 1088 1120 -hsync +vsync
|
||||
EndSection
|
||||
|
||||
Section "Screen"
|
||||
Identifier "Screen0"
|
||||
Device "dummy"
|
||||
Monitor "Monitor0"
|
||||
DefaultDepth 24
|
||||
SubSection "Display"
|
||||
Depth 24
|
||||
Modes "1920x1080"
|
||||
Virtual 5760 1080
|
||||
EndSubSection
|
||||
EndSection
|
||||
|
||||
Section "ServerLayout"
|
||||
Identifier "Layout0"
|
||||
Screen 0 "Screen0" 0 0
|
||||
EndSection
|
||||
18
package.json
18
package.json
@@ -1,18 +1,26 @@
|
||||
{
|
||||
"name": "@ecobridge/eco-os",
|
||||
"version": "0.2.0",
|
||||
"version": "0.7.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "npm version patch --no-git-tag-version && node -e \"const v=require('./package.json').version; require('fs').writeFileSync('ecoos_daemon/ts/version.ts', 'export const VERSION = \\\"'+v+'\\\";\\n');\" && pnpm run daemon:bundle && cp ecoos_daemon/bundle/eco-daemon isobuild/config/includes.chroot/opt/eco/bin/ && mkdir -p .nogit/iso && docker build --no-cache -t ecoos-builder -f isobuild/Dockerfile . && docker run --rm --privileged -v $(pwd)/.nogit/iso:/output ecoos-builder",
|
||||
"build": "pnpm run build:amd64",
|
||||
"build:prepare": "[ -z \"$CI\" ] && npm version patch --no-git-tag-version || true && node -e \"const v=require('./package.json').version; require('fs').writeFileSync('ecoos_daemon/ts/version.ts', 'export const VERSION = \\\"'+v+'\\\";\\n');\" && pnpm run daemon:ui",
|
||||
"build:amd64": "pnpm run build:prepare && mkdir -p .nogit/iso && ./isobuild/scripts/docker-build.sh --arch=amd64 && cp isobuild/output/ecoos.iso .nogit/iso/",
|
||||
"build:arm64": "pnpm run build:prepare && mkdir -p .nogit/iso && ./isobuild/scripts/docker-build.sh --arch=arm64 && cp isobuild/output/ecoos-arm64.iso .nogit/iso/",
|
||||
"build:rpi": "pnpm run build:prepare && mkdir -p .nogit/iso && ./isobuild/scripts/docker-build.sh --arch=rpi && cp isobuild/output/ecoos-rpi.img .nogit/iso/",
|
||||
"daemon:dev": "cd ecoos_daemon && deno run --allow-all --watch mod.ts",
|
||||
"daemon:start": "cd ecoos_daemon && deno run --allow-all mod.ts",
|
||||
"daemon:typecheck": "cd ecoos_daemon && deno check mod.ts",
|
||||
"daemon:bundle": "cd ecoos_daemon && deno compile --allow-all --output bundle/eco-daemon mod.ts",
|
||||
"daemon:ui": "cd ecoos_daemon && pnpm run build",
|
||||
"daemon:bundle": "cd ecoos_daemon && pnpm run build && deno compile --allow-all --output bundle/eco-daemon mod.ts",
|
||||
"test": "pnpm run test:clean && cd isotest && ./run-test.sh",
|
||||
"test:arm64": "pnpm run test:clean && cd isotest && ./run-test.sh --arch=arm64",
|
||||
"test:rpi": "pnpm run test:clean && cd isotest && ./run-test.sh --arch=rpi",
|
||||
"test:screenshot": "cd isotest && ./screenshot.sh",
|
||||
"test:screenshot:loop": "while true; do pnpm run test:screenshot; sleep 5; done",
|
||||
"test:stop": "cd isotest && ./stop.sh",
|
||||
"test:clean": "pnpm run test:stop && rm -rf .nogit/vm/*.qcow2 .nogit/screenshots/*",
|
||||
"clean": "rm -rf .nogit/iso/*.iso && pnpm run test:clean"
|
||||
}
|
||||
"clean": "rm -rf .nogit/iso/*.iso .nogit/iso/*.img && pnpm run test:clean"
|
||||
},
|
||||
"dependencies": {}
|
||||
}
|
||||
|
||||
9
pnpm-lock.yaml
generated
Normal file
9
pnpm-lock.yaml
generated
Normal file
@@ -0,0 +1,9 @@
|
||||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.: {}
|
||||
268
readme.md
Normal file
268
readme.md
Normal file
@@ -0,0 +1,268 @@
|
||||
# 🌍 EcoOS
|
||||
|
||||
> **A purpose-built, minimal Linux distribution for kiosk and digital signage deployments.**
|
||||
|
||||
EcoOS is a streamlined operating system that boots directly into a full-screen Chromium browser, managed by a powerful daemon with a built-in web UI. Perfect for digital signage, interactive kiosks, info displays, and any scenario where you need a locked-down, browser-based interface.
|
||||
|
||||
## Issue Reporting and Security
|
||||
|
||||
For reporting bugs, issues, or security vulnerabilities, please visit [community.foss.global/](https://community.foss.global/). This is the central community hub for all issue reporting. Developers who sign and comply with our contribution agreement and go through identification can also get a [code.foss.global/](https://code.foss.global/) account to submit Pull Requests directly.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **🚀 Zero-Config Boot** — Boots straight into a Wayland-based kiosk browser
|
||||
- **🖥️ Sway Compositor** — Modern, tiling Wayland compositor with automatic fallback modes
|
||||
- **🌐 Chromium Kiosk** — Full-screen browser in locked-down kiosk mode
|
||||
- **🎛️ Management UI** — Real-time system monitoring and control via web interface on port 3006
|
||||
- **🔄 Auto-Updates** — Daemon self-updates with smart stability checking
|
||||
- **📊 System Monitoring** — CPU, memory, disk, network, GPU, and audio device stats
|
||||
- **📝 Live Logs** — System journal and daemon logs accessible from the UI
|
||||
- **🔌 Hardware Support** — Input devices, speakers, microphones detection and display
|
||||
- **⚡ Rapid Recovery** — Auto-restart of crashed services within seconds
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ EcoOS ISO │
|
||||
├─────────────────────────────────────────────────────────┤
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ eco-daemon (systemd) │ │
|
||||
│ │ ┌─────────┐ ┌─────────┐ ┌─────────────────┐ │ │
|
||||
│ │ │ Process │ │ System │ │ Updater │ │ │
|
||||
│ │ │ Manager │ │ Info │ │ (auto-upgrade) │ │ │
|
||||
│ │ └────┬────┘ └────┬────┘ └────────┬────────┘ │ │
|
||||
│ │ │ │ │ │ │
|
||||
│ │ ▼ ▼ ▼ │ │
|
||||
│ │ ┌─────────────────────────────────────────┐ │ │
|
||||
│ │ │ UI Server (:3006) │ │ │
|
||||
│ │ │ REST API │ WebSocket │ Dashboard │ │ │
|
||||
│ │ └─────────────────────────────────────────┘ │ │
|
||||
│ └─────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌──────────────────┐ ┌─────────────────────────┐ │
|
||||
│ │ Sway Compositor │───│ Chromium (kiosk mode) │ │
|
||||
│ │ (Wayland) │ │ → localhost:3006 │ │
|
||||
│ └──────────────────┘ └─────────────────────────┘ │
|
||||
├─────────────────────────────────────────────────────────┤
|
||||
│ Ubuntu 24.04 Base │ systemd │ seatd │ pipewire │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Docker** (for ISO building)
|
||||
- **pnpm** (package manager)
|
||||
- **Deno** v2.x (for daemon development)
|
||||
- **QEMU** (for testing)
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
eco_os/
|
||||
├── ecoos_daemon/ # Daemon source (Deno/TypeScript)
|
||||
│ ├── mod.ts # Entry point
|
||||
│ └── ts/
|
||||
│ ├── daemon/ # Core daemon logic
|
||||
│ │ ├── index.ts # EcoDaemon class
|
||||
│ │ ├── process-manager.ts # Sway/Chromium management
|
||||
│ │ ├── system-info.ts # Hardware detection
|
||||
│ │ └── updater.ts # Auto-update system
|
||||
│ ├── ui/ # Web UI server
|
||||
│ └── utils/ # Utilities
|
||||
├── isobuild/ # ISO build configuration
|
||||
│ ├── Dockerfile # Build container
|
||||
│ ├── config/ # live-build config
|
||||
│ └── scripts/ # Build scripts
|
||||
├── isotest/ # QEMU test scripts
|
||||
└── .nogit/ # Generated artifacts (not in git)
|
||||
├── iso/ # Built ISO
|
||||
├── vm/ # QEMU files
|
||||
└── screenshots/ # VM screenshots
|
||||
```
|
||||
|
||||
### Commands
|
||||
|
||||
```bash
|
||||
# Build the full ISO (auto-rebuilds daemon first)
|
||||
pnpm run build
|
||||
|
||||
# Test ISO in QEMU virtual machine
|
||||
pnpm run test
|
||||
|
||||
# Take screenshot of running VM
|
||||
pnpm run test:screenshot
|
||||
|
||||
# Stop the QEMU VM
|
||||
pnpm run test:stop
|
||||
|
||||
# Clean all build artifacts
|
||||
pnpm run clean
|
||||
|
||||
# Daemon development (watch mode)
|
||||
pnpm run daemon:dev
|
||||
|
||||
# Bundle daemon to standalone binary
|
||||
pnpm run daemon:bundle
|
||||
|
||||
# Type-check daemon code
|
||||
pnpm run daemon:typecheck
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ Management UI
|
||||
|
||||
The daemon exposes a management interface at `http://localhost:3006` (or the device's IP on port 3006).
|
||||
|
||||
### Dashboard Features
|
||||
|
||||
| Panel | Description |
|
||||
|-------|-------------|
|
||||
| **Services** | Status of Sway compositor and Chromium browser |
|
||||
| **CPU** | Model, core count, real-time usage |
|
||||
| **Memory** | Used/total with visual progress bar |
|
||||
| **Network** | Interface names and IP addresses |
|
||||
| **Disks** | Mount points, usage, and capacity |
|
||||
| **System** | Hostname, uptime, GPU info |
|
||||
| **Controls** | Restart browser, reboot system buttons |
|
||||
| **Updates** | Version info, available updates, upgrade controls |
|
||||
| **Input Devices** | Keyboards, mice, touchscreens |
|
||||
| **Audio** | Detected speakers and microphones |
|
||||
| **Logs** | Daemon logs and system journal viewer |
|
||||
|
||||
### API Endpoints
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/api/status` | GET | Full system status |
|
||||
| `/api/logs` | GET | Daemon logs |
|
||||
| `/api/reboot` | POST | Reboot the system |
|
||||
| `/api/restart-chromium` | POST | Restart the kiosk browser |
|
||||
| `/api/updates` | GET | Update information |
|
||||
| `/api/updates/check` | POST | Check for new updates |
|
||||
| `/api/upgrade` | POST | Upgrade to specific version |
|
||||
| `/ws` | WebSocket | Real-time status updates |
|
||||
|
||||
---
|
||||
|
||||
## 🔄 Update System
|
||||
|
||||
EcoOS features a smart auto-update mechanism:
|
||||
|
||||
1. **Hourly Checks** — Daemon polls for new releases every hour
|
||||
2. **Stability Period** — New releases wait 24 hours before auto-upgrade (prevents deploying unstable releases)
|
||||
3. **Seamless Upgrade** — Downloads new daemon binary, replaces, and restarts service
|
||||
4. **Manual Override** — Force immediate upgrade via UI or API
|
||||
5. **Version Tracking** — UI auto-reloads when daemon version changes
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
### QEMU Virtual Machine
|
||||
|
||||
```bash
|
||||
# Start VM (creates disk, boots ISO)
|
||||
pnpm run test
|
||||
|
||||
# Take screenshots to monitor progress
|
||||
pnpm run test:screenshot
|
||||
|
||||
# Screenshot loop (every 5 seconds)
|
||||
pnpm run test:screenshot:loop
|
||||
|
||||
# Stop VM
|
||||
pnpm run test:stop
|
||||
|
||||
# Clean and restart fresh
|
||||
pnpm run test:clean && pnpm run test
|
||||
```
|
||||
|
||||
### Serial Console
|
||||
|
||||
For debugging without graphics:
|
||||
|
||||
```bash
|
||||
socat - UNIX-CONNECT:.nogit/vm/serial.sock
|
||||
# Login: ecouser / ecouser
|
||||
# Root: sudo -i
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📦 Release Assets
|
||||
|
||||
Each release includes:
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `ecoos-vX.X.X.iso` | Full bootable ISO image (~2GB) |
|
||||
| `eco-daemon-vX.X.X` | Standalone daemon binary for in-place upgrades |
|
||||
| `SHA256SUMS.txt` | Checksums for verification |
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Boot Menu Options
|
||||
|
||||
1. **Install EcoOS** *(default, auto-selects in 10s)* — Full installation to disk
|
||||
2. **EcoOS Live** — Try without installing (runs from RAM)
|
||||
3. **EcoOS Live (Safe Mode)** — Minimal boot for troubleshooting
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ Technical Details
|
||||
|
||||
### Daemon
|
||||
|
||||
- **Runtime**: Deno (compiled to standalone binary)
|
||||
- **Process Management**: Spawns and monitors Sway + Chromium
|
||||
- **Backend Fallback**: Tries DRM first, falls back to headless/pixman
|
||||
- **Auto-Recovery**: Restarts crashed services within 5 seconds
|
||||
- **Logging**: Integrates with systemd journal
|
||||
|
||||
### Kiosk Browser
|
||||
|
||||
- **Browser**: Chromium (official snapshots, not snap)
|
||||
- **Flags**: `--ozone-platform=wayland --kiosk --no-first-run --disable-infobars`
|
||||
- **Default URL**: `http://localhost:3006` (management UI)
|
||||
|
||||
### System Stack
|
||||
|
||||
- **Base**: Ubuntu 24.04 LTS
|
||||
- **Init**: systemd
|
||||
- **Display**: Sway (Wayland compositor)
|
||||
- **Seat Manager**: seatd
|
||||
- **Audio**: PipeWire
|
||||
|
||||
---
|
||||
|
||||
## License and Legal Information
|
||||
|
||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
||||
|
||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||
|
||||
### Trademarks
|
||||
|
||||
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH or third parties, and are not included within the scope of the MIT license granted herein.
|
||||
|
||||
Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines or the guidelines of the respective third-party owners, and any usage must be approved in writing. Third-party trademarks used herein are the property of their respective owners and used only in a descriptive manner, e.g. for an implementation of an API or similar.
|
||||
|
||||
### Company Information
|
||||
|
||||
Task Venture Capital GmbH
|
||||
Registered at District Court Bremen HRB 35230 HB, Germany
|
||||
|
||||
For any legal inquiries or further information, please contact us via email at hello@task.vc.
|
||||
|
||||
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||
Reference in New Issue
Block a user