Compare commits

...

153 Commits

Author SHA1 Message Date
242677404b v1.24.1
Some checks failed
Release / build-and-release (push) Failing after 24s
2026-03-24 20:08:25 +00:00
8c6159c596 fix(repo): migrate smart build config to .smartconfig.json and tidy repository metadata 2026-03-24 20:08:25 +00:00
c210507951 v1.24.0
All checks were successful
Release / build-and-release (push) Successful in 3m6s
2026-03-24 19:54:56 +00:00
0799efadae feat(backup): add containerarchive-backed backup storage, restore, download, and pruning support 2026-03-24 19:54:56 +00:00
22a7e76645 v1.23.0
All checks were successful
Release / build-and-release (push) Successful in 3m24s
2026-03-21 19:36:25 +00:00
22f34e7de5 feat(appstore): add remote app store templates with service upgrades and Redis/MariaDB platform support 2026-03-21 19:36:25 +00:00
c0f9f979c7 v1.22.2
All checks were successful
Release / build-and-release (push) Successful in 2m59s
2026-03-18 09:48:12 +00:00
6e5743c837 fix(web-ui): stabilize app store service creation flow and add Ghost sqlite defaults 2026-03-18 09:48:12 +00:00
829f7e47f1 v1.22.1
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m2s
CI / Build All Platforms (push) Successful in 2m19s
Release / build-and-release (push) Successful in 3m45s
2026-03-18 02:44:14 +00:00
a36af5c3d5 fix(repo): no changes to commit 2026-03-18 02:44:14 +00:00
3c99ee5f83 v1.22.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m1s
CI / Build All Platforms (push) Successful in 2m11s
Release / build-and-release (push) Successful in 3m56s
2026-03-18 02:37:39 +00:00
2faa416895 feat(web-appstore): add an App Store view for quick service deployment from curated templates 2026-03-18 02:37:39 +00:00
acbf448c6f v1.21.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 27s
CI / Build Test (Current Platform) (push) Successful in 1m2s
CI / Build All Platforms (push) Successful in 2m13s
Release / build-and-release (push) Successful in 3m34s
2026-03-18 02:22:45 +00:00
5c48ae4156 feat(opsserver): add container workspace API and backend execution environment for services 2026-03-18 02:22:45 +00:00
3108408133 v1.20.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 39s
CI / Build Test (Current Platform) (push) Successful in 1m7s
CI / Build All Platforms (push) Successful in 2m52s
Release / build-and-release (push) Successful in 6m2s
2026-03-17 23:39:25 +00:00
6defdb4431 feat(ops-dashboard): stream user service logs to the ops dashboard and resolve service containers for Docker log streaming 2026-03-17 23:39:24 +00:00
f63be883ce v1.19.12
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Build All Platforms (push) Failing after 13m15s
CI / Build Test (Current Platform) (push) Failing after 13m17s
CI / Type Check & Lint (push) Failing after 13m19s
Release / build-and-release (push) Successful in 6m6s
2026-03-17 12:52:34 +00:00
87844bbb8e fix(repo): no changes to commit 2026-03-17 12:52:34 +00:00
02b7cda2be v1.19.11
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 28s
CI / Build Test (Current Platform) (push) Successful in 1m0s
CI / Build All Platforms (push) Successful in 1m57s
Release / build-and-release (push) Successful in 5m51s
2026-03-17 10:40:40 +00:00
3b8f95e8e1 fix(repo): no changes to commit 2026-03-17 10:40:40 +00:00
ee774e3f41 v1.19.10
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Type Check & Lint (push) Failing after 38s
CI / Build Test (Current Platform) (push) Successful in 1m6s
CI / Build All Platforms (push) Successful in 2m17s
Release / build-and-release (push) Successful in 7m46s
2026-03-17 02:09:50 +00:00
6d93dfa459 fix(repo): no changes to commit 2026-03-17 02:09:50 +00:00
ac394cfafc v1.19.9
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m2s
CI / Build All Platforms (push) Successful in 2m0s
Release / build-and-release (push) Successful in 2m51s
2026-03-17 01:53:38 +00:00
97e9f232fa fix(repo): no changes to commit 2026-03-17 01:53:38 +00:00
3dcb6a38e5 v1.19.8
Some checks failed
CI / Build All Platforms (push) Failing after 9s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 28s
CI / Build Test (Current Platform) (push) Successful in 59s
Release / build-and-release (push) Failing after 1m24s
2026-03-17 01:37:18 +00:00
ca33970e9a fix(repo): no changes to commit 2026-03-17 01:37:18 +00:00
cd34b98a25 v1.19.7
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 33s
CI / Build Test (Current Platform) (push) Successful in 1m7s
CI / Build All Platforms (push) Successful in 1m52s
Release / build-and-release (push) Successful in 3m50s
2026-03-17 01:03:14 +00:00
a089e5bedb fix(repo): no changes to commit 2026-03-17 01:03:14 +00:00
9786ff62f0 v1.19.6
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 31s
CI / Build Test (Current Platform) (push) Successful in 1m6s
CI / Build All Platforms (push) Successful in 2m15s
Release / build-and-release (push) Failing after 3m30s
2026-03-17 00:45:56 +00:00
4a5abc4a0a fix(repository): no changes to commit 2026-03-17 00:45:55 +00:00
893a532758 v1.19.5
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
Release / build-and-release (push) Failing after 3m34s
2026-03-17 00:45:37 +00:00
7ea286c0a9 fix(repo): no changes to commit 2026-03-17 00:45:37 +00:00
f94f47e313 v1.19.4
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 32s
CI / Build Test (Current Platform) (push) Successful in 1m7s
CI / Build All Platforms (push) Successful in 2m8s
Release / build-and-release (push) Successful in 3m38s
2026-03-17 00:01:08 +00:00
b1a46f8757 fix(repository): no changes to commit 2026-03-17 00:01:08 +00:00
56c71226e5 v1.19.3
Some checks failed
CI / Build All Platforms (push) Failing after 3s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 36s
CI / Build Test (Current Platform) (push) Successful in 1m3s
Release / build-and-release (push) Successful in 2m48s
2026-03-16 20:06:11 +00:00
f53109a01e fix(repo): no changes to commit 2026-03-16 20:06:11 +00:00
bcb2473cc5 v1.19.2
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Type Check & Lint (push) Failing after 34s
CI / Build Test (Current Platform) (push) Successful in 1m10s
CI / Build All Platforms (push) Successful in 2m34s
Release / build-and-release (push) Failing after 4m2s
2026-03-16 20:00:10 +00:00
689dcf295b fix(docs): remove outdated UI screenshot assets from project documentation 2026-03-16 20:00:10 +00:00
c1e14e9fc7 v1.19.1
Some checks failed
CI / Type Check & Lint (push) Failing after 30s
CI / Build Test (Current Platform) (push) Successful in 1m3s
CI / Build All Platforms (push) Successful in 2m5s
2026-03-16 19:55:27 +00:00
d5fd57e2c3 fix(dashboard): add updated dashboard screenshots for refresh and resource usage states 2026-03-16 19:55:27 +00:00
079e6a64a9 fix(dashboard): add aggregated resource usage stats to the dashboard
Some checks failed
Publish to npm / npm-publish (push) Failing after 4s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m1s
Release / build-and-release (push) Failing after 1m43s
CI / Build All Platforms (push) Successful in 2m8s
- Aggregate CPU, memory, and network stats across all running containers
- Extend ISystemStatus.docker with resource usage fields
- Fix getContainerStats Swarm service ID fallback
- Wire dashboard resource usage card to real backend data
2026-03-16 16:47:05 +00:00
a04cf053db v1.19.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 11s
CI / Type Check & Lint (push) Failing after 30s
CI / Build Test (Current Platform) (push) Successful in 58s
CI / Build All Platforms (push) Successful in 2m21s
Release / build-and-release (push) Successful in 4m11s
2026-03-16 16:19:39 +00:00
ec0e377ccb feat(opsserver,web): add real-time platform service log streaming to the dashboard 2026-03-16 16:19:39 +00:00
3b3d0433cb fix(platform-services): fix detail view navigation and log display
Some checks failed
CI / Build All Platforms (push) Failing after 4s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m6s
Release / build-and-release (push) Successful in 3m21s
- Add back button for returning to services list
- Fix DOM lifecycle when switching between platform services
- Fix timestamp format for dees-chart-log compatibility
- Clear previous stats/logs state before fetching new data
2026-03-16 14:48:46 +00:00
5f876449ca v1.18.4
Some checks failed
CI / Build All Platforms (push) Failing after 6s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 24s
CI / Build Test (Current Platform) (push) Successful in 54s
Release / build-and-release (push) Successful in 2m19s
2026-03-16 14:35:45 +00:00
8e781c7f9d fix(repo): no changes to commit 2026-03-16 14:35:45 +00:00
a3eefbe92c v1.18.3
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m1s
CI / Build All Platforms (push) Successful in 2m3s
Release / build-and-release (push) Successful in 3m2s
2026-03-16 14:22:37 +00:00
41679427c6 fix(deps): bump @serve.zone/catalog to ^2.6.1 2026-03-16 14:22:37 +00:00
c420a30341 v1.18.2
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 28s
CI / Build Test (Current Platform) (push) Successful in 59s
CI / Build All Platforms (push) Successful in 2m0s
Release / build-and-release (push) Successful in 3m41s
2026-03-16 14:14:55 +00:00
fe109f0953 fix(repo): no changes to commit 2026-03-16 14:14:55 +00:00
012dce63b1 v1.18.1
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
Release / build-and-release (push) Successful in 4m0s
2026-03-16 14:14:34 +00:00
54780482c7 fix(repo): no changes to commit 2026-03-16 14:14:34 +00:00
7ab0fb3c1f v1.18.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 27s
CI / Build Test (Current Platform) (push) Successful in 58s
CI / Build All Platforms (push) Successful in 1m52s
Release / build-and-release (push) Successful in 2m58s
2026-03-16 13:51:43 +00:00
713fda2a86 feat(platform-services): add platform service log retrieval and display in the services UI 2026-03-16 13:51:43 +00:00
ec32c19300 v1.17.4
Some checks failed
CI / Type Check & Lint (push) Failing after 30s
Publish to npm / npm-publish (push) Failing after 24s
CI / Build Test (Current Platform) (push) Successful in 1m1s
CI / Build All Platforms (push) Successful in 2m12s
Release / build-and-release (push) Successful in 4m0s
2026-03-16 13:26:56 +00:00
7d1d91157c fix(docs): add hello world running screenshot for documentation 2026-03-16 13:26:56 +00:00
b69c96c240 v1.17.3
Some checks failed
CI / Build Test (Current Platform) (push) Failing after 6s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 32s
CI / Build All Platforms (push) Successful in 2m4s
Release / build-and-release (push) Successful in 3m13s
2026-03-16 13:05:47 +00:00
9ee8851d03 fix(mongodb): downgrade the MongoDB service image to 4.4 and use the legacy mongo shell for container operations 2026-03-16 13:05:47 +00:00
7f6031f31a v1.17.2
Some checks failed
CI / Type Check & Lint (push) Failing after 29s
Publish to npm / npm-publish (push) Failing after 25s
CI / Build Test (Current Platform) (push) Successful in 1m4s
CI / Build All Platforms (push) Successful in 2m5s
Release / build-and-release (push) Successful in 4m12s
2026-03-16 12:45:44 +00:00
6f1b8469e0 fix(platform-services): provision ClickHouse, MinIO, and MongoDB resources via docker exec instead of host port access 2026-03-16 12:45:44 +00:00
cd06c74cc3 v1.17.1
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Type Check & Lint (push) Failing after 41s
CI / Build Test (Current Platform) (push) Successful in 1m12s
Release / build-and-release (push) Failing after 1m54s
CI / Build All Platforms (push) Successful in 2m17s
2026-03-16 12:40:39 +00:00
d3acc720ca fix(repo): no changes to commit 2026-03-16 12:40:39 +00:00
1b6de75097 v1.17.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 56s
Release / build-and-release (push) Failing after 37s
CI / Build All Platforms (push) Successful in 2m7s
2026-03-16 12:36:02 +00:00
497f8f59a7 feat(web/services): add deploy service action to the services view 2026-03-16 12:36:02 +00:00
0c7d65e4ad v1.16.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 10s
CI / Type Check & Lint (push) Failing after 28s
CI / Build Test (Current Platform) (push) Successful in 52s
CI / Build All Platforms (push) Successful in 1m50s
Release / build-and-release (push) Successful in 2m49s
2026-03-16 11:45:56 +00:00
3f2cd074ce feat(services): add platform service navigation and stats in the services UI 2026-03-16 11:45:56 +00:00
59ed7233bd v1.15.3
Some checks failed
CI / Build All Platforms (push) Failing after 3s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 25s
CI / Build Test (Current Platform) (push) Successful in 54s
Release / build-and-release (push) Successful in 2m35s
2026-03-16 11:07:00 +00:00
01e3ba16c4 fix(install): refresh systemd service configuration before restarting previously running installations 2026-03-16 11:07:00 +00:00
f5c1d5fcda v1.15.2
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 32s
CI / Build Test (Current Platform) (push) Successful in 1m7s
CI / Build All Platforms (push) Successful in 1m58s
Release / build-and-release (push) Successful in 3m9s
2026-03-16 10:58:08 +00:00
45b0971f2f fix(systemd): set HOME and DENO_DIR for the systemd service environment 2026-03-16 10:58:08 +00:00
178f440d7e v1.15.1
Some checks failed
CI / Type Check & Lint (push) Failing after 30s
Publish to npm / npm-publish (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 1m0s
CI / Build All Platforms (push) Successful in 2m8s
Release / build-and-release (push) Successful in 2m53s
2026-03-16 10:23:05 +00:00
7fff15a90c fix(systemd): move Docker installation and swarm initialization to systemd enable flow 2026-03-16 10:23:05 +00:00
69e23f667e v1.15.0
Some checks failed
CI / Build All Platforms (push) Failing after 7s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 26s
CI / Build Test (Current Platform) (push) Successful in 58s
Release / build-and-release (push) Successful in 2m58s
2026-03-16 10:02:59 +00:00
a2bf4df7c2 feat(systemd): replace smartdaemon-based service management with native systemd commands 2026-03-16 10:02:59 +00:00
9e0a0b5a89 v1.14.10
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 32s
CI / Build Test (Current Platform) (push) Successful in 59s
CI / Build All Platforms (push) Successful in 2m1s
Release / build-and-release (push) Successful in 2m42s
2026-03-16 08:40:48 +00:00
3a227bd838 fix(services): stop auto-update monitoring during shutdown 2026-03-16 08:40:48 +00:00
f5a7fccfc2 v1.14.9
Some checks failed
CI / Type Check & Lint (push) Failing after 25s
Publish to npm / npm-publish (push) Failing after 32s
CI / Build Test (Current Platform) (push) Successful in 57s
CI / Build All Platforms (push) Successful in 2m20s
Release / build-and-release (push) Successful in 3m50s
2026-03-16 08:25:32 +00:00
a30d2029a5 fix(repo): no changes to commit 2026-03-16 08:25:32 +00:00
88727dd47d v1.14.8
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 45s
CI / Build Test (Current Platform) (push) Successful in 1m19s
CI / Build All Platforms (push) Successful in 2m46s
Release / build-and-release (push) Successful in 5m20s
2026-03-16 03:06:23 +00:00
9a5ed2220e fix(repo): no changes to commit 2026-03-16 03:06:23 +00:00
decd39e7c4 v1.14.7
Some checks failed
CI / Build All Platforms (push) Has been cancelled
CI / Type Check & Lint (push) Has been cancelled
CI / Build Test (Current Platform) (push) Has been cancelled
Publish to npm / npm-publish (push) Failing after 8s
Release / build-and-release (push) Successful in 5m34s
2026-03-16 03:06:17 +00:00
ad2e228208 fix(repo): no changes to commit 2026-03-16 03:06:17 +00:00
cf06019d79 v1.14.6
Some checks failed
CI / Build Test (Current Platform) (push) Has been cancelled
CI / Type Check & Lint (push) Has been cancelled
CI / Build All Platforms (push) Has been cancelled
Publish to npm / npm-publish (push) Failing after 6s
Release / build-and-release (push) Successful in 5m30s
2026-03-16 03:06:08 +00:00
cf44b0047d fix(project): no changes to commit 2026-03-16 03:06:08 +00:00
260b5364e6 v1.14.5
Some checks failed
CI / Build All Platforms (push) Failing after 6s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 24s
CI / Build Test (Current Platform) (push) Successful in 51s
Release / build-and-release (push) Successful in 3m30s
2026-03-16 03:04:57 +00:00
51c1962042 fix(onebox): move Docker auto-install and swarm initialization into Onebox startup flow 2026-03-16 03:04:57 +00:00
d3b78054ad v1.14.4
Some checks failed
CI / Build Test (Current Platform) (push) Failing after 5s
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 30s
CI / Build All Platforms (push) Successful in 1m57s
Release / build-and-release (push) Successful in 3m15s
2026-03-16 02:37:59 +00:00
d2ae35f0ce fix(repo): no changes to commit 2026-03-16 02:37:59 +00:00
a605477663 v1.14.3
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 29s
CI / Build Test (Current Platform) (push) Successful in 52s
CI / Build All Platforms (push) Successful in 1m54s
Release / build-and-release (push) Successful in 3m14s
2026-03-16 02:17:20 +00:00
ba98086548 fix(repo): no changes to commit 2026-03-16 02:17:20 +00:00
0b3c22556b v1.14.2
Some checks failed
CI / Build All Platforms (push) Failing after 6s
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 23s
CI / Build Test (Current Platform) (push) Successful in 51s
Release / build-and-release (push) Has been cancelled
2026-03-16 02:11:41 +00:00
069e6e6c8f fix(repo): no changes to commit 2026-03-16 02:11:41 +00:00
10598520d8 v1.14.1
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 28s
CI / Build Test (Current Platform) (push) Successful in 57s
CI / Build All Platforms (push) Successful in 2m6s
Release / build-and-release (push) Successful in 3m32s
2026-03-16 01:33:07 +00:00
075b7946b1 fix(repo): no changes to commit 2026-03-16 01:33:07 +00:00
f47fca3304 v1.14.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Type Check & Lint (push) Failing after 31s
CI / Build Test (Current Platform) (push) Successful in 1m2s
CI / Build All Platforms (push) Successful in 2m5s
Release / build-and-release (push) Successful in 4m57s
2026-03-16 01:19:58 +00:00
575e010a6b feat(daemon): auto-install Docker and initialize Swarm during daemon service setup 2026-03-16 01:19:58 +00:00
60a5dc4663 v1.13.17
Some checks failed
CI / Type Check & Lint (push) Failing after 39s
Publish to npm / npm-publish (push) Failing after 54s
CI / Build Test (Current Platform) (push) Successful in 1m49s
CI / Build All Platforms (push) Successful in 3m18s
Release / build-and-release (push) Successful in 4m38s
2026-03-16 01:10:23 +00:00
36d80b1e27 fix(ci): remove forced container image pulling from Gitea workflow jobs 2026-03-16 01:10:23 +00:00
465cf0ee72 v1.13.16
Some checks failed
CI / Type Check & Lint (push) Failing after 0s
CI / Build Test (Current Platform) (push) Failing after 0s
CI / Build All Platforms (push) Failing after 0s
Publish to npm / npm-publish (push) Failing after 0s
Release / build-and-release (push) Failing after 2m33s
2026-03-16 00:59:27 +00:00
bd5cd5c0cb fix(ci): refresh workflow container images on every run and bump @apiclient.xyz/docker to ^5.1.1 2026-03-16 00:59:27 +00:00
b622565e34 v1.13.15
Some checks failed
CI / Build Test (Current Platform) (push) Failing after 5s
CI / Type Check & Lint (push) Failing after 30s
Publish to npm / npm-publish (push) Failing after 35s
CI / Build All Platforms (push) Successful in 2m25s
Release / build-and-release (push) Successful in 3m43s
2026-03-15 21:13:56 +00:00
56376121ab fix(repo): no changes to commit 2026-03-15 21:13:56 +00:00
e3359d1235 v1.13.14
Some checks failed
CI / Type Check & Lint (push) Failing after 2s
CI / Build Test (Current Platform) (push) Failing after 2s
CI / Build All Platforms (push) Failing after 2s
Publish to npm / npm-publish (push) Failing after 2s
Release / build-and-release (push) Successful in 5m55s
2026-03-15 20:56:22 +00:00
f1eeec6922 fix(repo): no changes to commit 2026-03-15 20:56:22 +00:00
69362bb529 v1.13.13
Some checks failed
CI / Build All Platforms (push) Failing after 6s
Publish to npm / npm-publish (push) Failing after 8s
CI / Type Check & Lint (push) Failing after 32s
CI / Build Test (Current Platform) (push) Failing after 50s
Release / build-and-release (push) Failing after 4s
2026-03-15 20:49:49 +00:00
857fcc50ba fix(repo): no changes to commit 2026-03-15 20:49:49 +00:00
5d0df006eb v1.13.12
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Type Check & Lint (push) Failing after 30s
CI / Build Test (Current Platform) (push) Failing after 59s
Release / build-and-release (push) Failing after 1m10s
CI / Build All Platforms (push) Successful in 1m49s
2026-03-15 19:27:13 +00:00
e6256502ce fix(ci): run pnpm install with --ignore-scripts in CI and release workflows 2026-03-15 19:27:13 +00:00
d5dc141171 v1.13.11
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Build Test (Current Platform) (push) Failing after 22s
CI / Build All Platforms (push) Failing after 20s
CI / Type Check & Lint (push) Failing after 30s
Release / build-and-release (push) Failing after 15s
2026-03-15 19:11:46 +00:00
2538f5ae2c fix(project): no changes to commit 2026-03-15 19:11:46 +00:00
4613193dcc v1.13.10
Some checks failed
Publish to npm / npm-publish (push) Failing after 12s
CI / Type Check & Lint (push) Failing after 40s
CI / Build All Platforms (push) Failing after 1m28s
CI / Build Test (Current Platform) (push) Failing after 1m31s
Release / build-and-release (push) Failing after 1m21s
2026-03-15 18:39:16 +00:00
848b3afe54 fix(deps): bump @git.zone/tsdeno to ^1.2.0 2026-03-15 18:39:16 +00:00
dd86bae942 v1.13.9
Some checks failed
Publish to npm / npm-publish (push) Failing after 8s
CI / Build Test (Current Platform) (push) Failing after 14s
CI / Build All Platforms (push) Failing after 18s
Release / build-and-release (push) Failing after 19s
CI / Type Check & Lint (push) Failing after 30s
2026-03-15 18:32:32 +00:00
4691c61544 fix(repo): no changes to commit 2026-03-15 18:32:32 +00:00
dfb2d3b340 v1.13.8
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Build Test (Current Platform) (push) Failing after 15s
CI / Build All Platforms (push) Failing after 16s
CI / Type Check & Lint (push) Failing after 38s
Release / build-and-release (push) Failing after 31s
2026-03-15 18:12:48 +00:00
6a19ab05e3 fix(repo): no changes to commit 2026-03-15 18:12:48 +00:00
7b718da7a2 v1.13.7
Some checks failed
CI / Build All Platforms (push) Failing after 13m21s
CI / Build Test (Current Platform) (push) Failing after 13m23s
CI / Type Check & Lint (push) Failing after 13m25s
Release / build-and-release (push) Failing after 15s
Publish to npm / npm-publish (push) Failing after 6s
2026-03-15 16:38:24 +00:00
ebaf545418 fix(repo): no changes to commit 2026-03-15 16:38:24 +00:00
2cdfdaed55 v1.13.6
Some checks failed
CI / Build All Platforms (push) Has been cancelled
CI / Build Test (Current Platform) (push) Has been cancelled
Publish to npm / npm-publish (push) Has been cancelled
Release / build-and-release (push) Has been cancelled
CI / Type Check & Lint (push) Failing after 53m53s
2026-03-15 15:49:42 +00:00
2216804652 fix(ci): correct workflow container image registry path 2026-03-15 15:49:42 +00:00
1b177037f5 v1.13.5
Some checks failed
CI / Type Check & Lint (push) Failing after 1s
CI / Build Test (Current Platform) (push) Failing after 1s
CI / Build All Platforms (push) Failing after 1s
Publish to npm / npm-publish (push) Failing after 1s
Release / build-and-release (push) Failing after 1s
2026-03-15 15:47:21 +00:00
9d6590927c fix(workflows): switch Gitea workflow containers from ht-docker-dbase to ht-docker-node 2026-03-15 15:47:21 +00:00
eaf401200c v1.13.4
Some checks failed
CI / Type Check & Lint (push) Failing after 2s
CI / Build Test (Current Platform) (push) Failing after 1s
CI / Build All Platforms (push) Failing after 1s
Publish to npm / npm-publish (push) Failing after 1s
Release / build-and-release (push) Failing after 1s
2026-03-15 15:44:54 +00:00
e97a4d53ae fix(ci): run workflows in the shared build container and enable corepack for pnpm installs 2026-03-15 15:44:54 +00:00
ca2b3b25a5 v1.13.3
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Build Test (Current Platform) (push) Failing after 12s
CI / Build All Platforms (push) Failing after 13s
Release / build-and-release (push) Failing after 13s
CI / Type Check & Lint (push) Failing after 24s
2026-03-15 15:41:37 +00:00
19703de50d fix(build): replace custom Deno compile scripts with tsdeno-based binary builds in CI and release workflows 2026-03-15 15:41:37 +00:00
bcab4f274e v1.13.2
Some checks failed
CI / Type Check & Lint (push) Failing after 9s
CI / Build Test (Current Platform) (push) Failing after 9s
Release / build-and-release (push) Failing after 9s
Publish to npm / npm-publish (push) Failing after 1m54s
CI / Build All Platforms (push) Successful in 3m37s
2026-03-15 13:37:03 +00:00
64e947735f fix(scripts): install production dependencies before compiling binaries and exclude local node_modules from builds 2026-03-15 13:37:03 +00:00
1e05c08002 v1.13.1
Some checks failed
CI / Type Check & Lint (push) Failing after 8s
CI / Build Test (Current Platform) (push) Failing after 8s
CI / Build All Platforms (push) Failing after 9s
Publish to npm / npm-publish (push) Failing after 9s
Release / build-and-release (push) Failing after 9s
2026-03-15 13:31:26 +00:00
167df321f9 fix(deno): remove nodeModulesDir from Deno configuration 2026-03-15 13:31:25 +00:00
49998c4c32 add migration
Some checks failed
CI / Type Check & Lint (push) Failing after 36s
CI / Build Test (Current Platform) (push) Failing after 1m8s
CI / Build All Platforms (push) Successful in 8m29s
2026-03-15 12:45:13 +00:00
8045ec38df v1.13.0
Some checks failed
CI / Build Test (Current Platform) (push) Failing after 1m0s
CI / Type Check & Lint (push) Failing after 1m10s
Release / build-and-release (push) Failing after 4m38s
Publish to npm / npm-publish (push) Failing after 5m34s
CI / Build All Platforms (push) Successful in 10m5s
2026-03-15 12:24:48 +00:00
793fb18b43 feat(install): improve installer with version selection, service restart handling, and upgrade documentation 2026-03-15 12:24:48 +00:00
09534fd899 v1.12.1
Some checks failed
CI / Type Check & Lint (push) Failing after 45s
CI / Build Test (Current Platform) (push) Failing after 1m24s
CI / Build All Platforms (push) Failing after 3m27s
Publish to npm / npm-publish (push) Failing after 3m21s
Release / build-and-release (push) Failing after 4m45s
2026-03-15 12:07:15 +00:00
5f3783a5e9 fix(package.json): update package metadata 2026-03-15 12:07:15 +00:00
92555c5a5e v1.12.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 4m33s
Release / build-and-release (push) Failing after 4m49s
2026-03-15 12:06:55 +00:00
ddc7fa4bee feat(cli,release): add self-upgrade command and automate CI, release, and npm publishing workflows 2026-03-15 12:06:55 +00:00
eceb5d99c8 v1.11.0 2026-03-03 11:57:41 +00:00
0631b7731f feat(services): map backend service data to UI components, add stats & logs parsing, fetch service stats, and fix logs request param 2026-03-03 11:57:41 +00:00
4c485cdc0a v1.10.3 2026-03-02 07:26:38 +00:00
0f0da0f2ef fix(bin): make bin/onebox-wrapper.js executable 2026-03-02 07:26:38 +00:00
88367f70eb v1.10.2 2026-03-02 07:18:54 +00:00
bfcfef79da fix(build): update build/watch configuration, switch to esbuild bundler and tswatch, and bump catalog and tooling dependencies 2026-03-02 07:18:54 +00:00
d95270613b v1.10.1 2026-02-24 20:31:08 +00:00
14f6746833 fix(package.json): update package metadata 2026-02-24 20:31:07 +00:00
fe8ca00337 v1.10.0 2026-02-24 18:15:44 +00:00
ba05cc84fe feat(opsserver): introduce OpsServer (TypedRequest API) and new lightweight web UI; replace legacy Angular UI and add typed interfaces 2026-02-24 18:15:44 +00:00
84c47cd7f5 v1.9.2 2025-12-03 22:10:56 +00:00
9365f20f6d fix(ui): Add VS Code configs for the UI workspace and normalize dark theme CSS variables 2025-12-03 22:10:56 +00:00
bc2ed4b03a v1.9.1 2025-11-27 22:35:35 +00:00
e4dd4cce0a fix(ui): Correct import success toast and add VS Code launch/tasks recommendations for the UI 2025-11-27 22:35:35 +00:00
34c90e21db v1.9.0 2025-11-27 22:29:36 +00:00
ea7bb1395f feat(backups): Add backup import API and improve backup download/import flow in UI 2025-11-27 22:29:36 +00:00
196 changed files with 53757 additions and 22072 deletions

View File

@@ -1,140 +0,0 @@
# Onebox Development Notes
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
### NEVER GUESS - ALWAYS READ THE ACTUAL CODE
**FUCKING ALWAYS look at the dependency actual code. Don't start fucking guessing stuff.**
run "pnpm run watch" when starting to do stuff, so the UI gets recompiled and the server automatically restarts on file changes.
When working with any dependency:
1. **READ the actual source code** in `node_modules/` or check the package documentation
2. **CHECK the exact API** - don't assume based on similar libraries
3. **VERIFY method names, return types, and property structures** before using them
4. **TEST with the actual implementation** - APIs change between versions
Common mistakes to avoid:
- ❌ Assuming API structure based on similar libraries
- ❌ Guessing method names or property paths
- ❌ Using outdated documentation without checking current version
- ✅ Read the actual TypeScript definitions in node_modules
- ✅ Check the package's README and changelog
- ✅ Test the actual behavior before implementing
## Architecture Changes
### Reverse Proxy Implementation
- **Replaced Nginx** with native Deno reverse proxy (`ts/classes/reverseproxy.ts`)
- Features:
- HTTP/HTTPS dual servers (ports 80/443)
- TLS/SSL certificate management with hot-reload
- WebSocket bidirectional proxying
- Dynamic routing from database
- SNI (Server Name Indication) support
### Code Organization
- Removed "onebox." prefix from all TypeScript files
- Organized into subfolders:
- `ts/classes/` - All class implementations
- `ts/` - Root level utilities (logging, types, plugins, cli, info)
### WebSocket Real-time Communication
- **Backend**: WebSocket endpoint at `/api/ws` (`ts/classes/httpserver.ts:96-174`)
- Connection management with client Set tracking
- Broadcast methods: `broadcast()`, `broadcastServiceUpdate()`, `broadcastServiceStatus()`
- Integrated with service lifecycle (start/stop/restart actions)
- Status monitoring loop broadcasts changes automatically
- **Frontend**: Angular WebSocket service (`ui/src/app/core/services/websocket.service.ts`)
- Auto-connects on app initialization
- Exponential backoff reconnection (max 5 attempts)
- RxJS Observable-based message streaming
- Components subscribe to real-time updates
- **Message Types**:
- `connected` - Initial connection confirmation
- `service_update` - Service lifecycle changes (action: created/updated/deleted/started/stopped)
- `service_status` - Real-time status changes from monitoring loop
- `system_status` - System-wide updates
- **Testing**: Use `.nogit/test-ws-updates.ts` to monitor WebSocket messages
### Docker Configuration
- **System Docker**: Uses root Docker at `/var/run/docker.sock` (NOT rootless)
- **Swarm Mode**: Enabled for service orchestration
- **API Access**: Interact with Docker via direct API calls to the socket
- ❌ DO NOT switch Docker CLI contexts
- ✅ Use curl/HTTP requests to `/var/run/docker.sock`
- **Network**: Overlay network `onebox-network` with `Attachable: true`
- **Services vs Containers**: All workloads run as Swarm services (not standalone containers)
## Debugging Tips
### Backend Logs
Use the background bash task to check server logs:
```bash
# Check for specific patterns (e.g., Login attempts)
BashOutput tool with filter: "Login|error|Error"
# Check all recent output
BashOutput tool without filter
```
The dev server runs with `--watch` so it auto-restarts on file changes.
### Frontend Testing
Use Playwright for UI testing:
```typescript
// Navigate to app
mcp__playwright__browser_navigate({ url: "http://localhost:3000" })
// Fill login form
mcp__playwright__browser_fill_form({
fields: [
{ name: "Username", type: "textbox", ref: "...", value: "admin" },
{ name: "Password", type: "textbox", ref: "...", value: "admin" }
]
})
// Click button
mcp__playwright__browser_click({ element: "Sign in button", ref: "..." })
// Check console errors
// Playwright automatically shows console messages in results
```
### Common Issues
#### Login Issue (Fixed)
**Problem**: `admin/admin` credentials returned "Invalid credentials"
**Root Cause**: `rowToUser()` function in database.ts was accessing rows as arrays `row[2]` instead of objects `row.password_hash`. The @db/sqlite library returns rows as objects with snake_case column names.
**Fix**: Updated `rowToUser()` to support both access patterns:
```typescript
private rowToUser(row: any): IUser {
return {
passwordHash: String(row.password_hash || row[2]),
// ... other fields
};
}
```
**Location**: `ts/classes/database.ts:506-515`
## Default Credentials
- Username: `admin`
- Password: `admin`
- ⚠️ Change immediately after first login!
## Development Server
```bash
# Main server (port 3000)
deno task dev
# Check server status
curl http://localhost:3000/api/status
```
## API Endpoints
- `POST /api/auth/login` - Login (returns JWT-like token)
- `GET /api/status` - System status (requires auth)
- `GET /api/services` - List services (requires auth)
- See `ts/classes/httpserver.ts` for full API

View File

@@ -0,0 +1,37 @@
## Onebox {{VERSION}}
Pre-compiled binaries for multiple platforms.
### Installation
#### Option 1: Via npm (recommended)
```bash
npm install -g @serve.zone/onebox
```
#### Option 2: Via installer script
```bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
```
#### Option 3: Direct binary download
Download the appropriate binary for your platform from the assets below and make it executable.
### Supported Platforms
- Linux x86_64 (x64)
- Linux ARM64 (aarch64)
- macOS x86_64 (Intel)
- macOS ARM64 (Apple Silicon)
- Windows x86_64
### Checksums
SHA256 checksums are provided in `SHA256SUMS.txt` for binary verification.
### npm Package
The npm package includes automatic binary detection and installation for your platform.

View File

@@ -0,0 +1,211 @@
name: Release
on:
push:
tags:
- 'v*'
jobs:
build-and-release:
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install --ignore-scripts
- name: Get version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "version_number=${VERSION#v}" >> $GITHUB_OUTPUT
echo "Building version: $VERSION"
- name: Verify deno.json version matches tag
run: |
DENO_VERSION=$(grep -o '"version": "[^"]*"' deno.json | cut -d'"' -f4)
TAG_VERSION="${{ steps.version.outputs.version_number }}"
echo "deno.json version: $DENO_VERSION"
echo "Tag version: $TAG_VERSION"
if [ "$DENO_VERSION" != "$TAG_VERSION" ]; then
echo "ERROR: Version mismatch!"
echo "deno.json has version $DENO_VERSION but tag is $TAG_VERSION"
exit 1
fi
- name: Compile binaries for all platforms
run: mkdir -p dist/binaries && npx tsdeno compile
- name: Generate SHA256 checksums
run: |
cd dist/binaries
sha256sum * > SHA256SUMS.txt
cat SHA256SUMS.txt
cd ../..
- name: Extract changelog for this version
id: changelog
run: |
VERSION="${{ steps.version.outputs.version }}"
# Check if CHANGELOG.md exists
if [ ! -f CHANGELOG.md ] && [ ! -f changelog.md ]; then
echo "No changelog found, using default release notes"
cat > /tmp/release_notes.md << EOF
## Onebox $VERSION
Pre-compiled binaries for multiple platforms.
### Installation
Use the installation script:
\`\`\`bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
\`\`\`
Or download the binary for your platform and make it executable.
### Supported Platforms
- Linux x86_64 (x64)
- Linux ARM64 (aarch64)
- macOS x86_64 (Intel)
- macOS ARM64 (Apple Silicon)
- Windows x86_64
### Checksums
SHA256 checksums are provided in SHA256SUMS.txt
EOF
else
CHANGELOG_FILE=$([ -f CHANGELOG.md ] && echo "CHANGELOG.md" || echo "changelog.md")
awk "/## \[$VERSION\]/,/## \[/" "$CHANGELOG_FILE" | sed '$d' > /tmp/release_notes.md || cat > /tmp/release_notes.md << EOF
## Onebox $VERSION
See changelog.md for full details.
### Installation
Use the installation script:
\`\`\`bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
\`\`\`
EOF
fi
echo "Release notes:"
cat /tmp/release_notes.md
- name: Delete existing release if it exists
run: |
VERSION="${{ steps.version.outputs.version }}"
echo "Checking for existing release $VERSION..."
# Try to get existing release by tag
EXISTING_RELEASE_ID=$(curl -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/tags/$VERSION" \
| jq -r '.id // empty')
if [ -n "$EXISTING_RELEASE_ID" ]; then
echo "Found existing release (ID: $EXISTING_RELEASE_ID), deleting..."
curl -X DELETE -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$EXISTING_RELEASE_ID"
echo "Existing release deleted"
sleep 2
else
echo "No existing release found, proceeding with creation"
fi
- name: Create Gitea Release
run: |
VERSION="${{ steps.version.outputs.version }}"
RELEASE_NOTES=$(cat /tmp/release_notes.md)
# Create the release
echo "Creating release for $VERSION..."
RELEASE_ID=$(curl -X POST -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/json" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases" \
-d "{
\"tag_name\": \"$VERSION\",
\"name\": \"Onebox $VERSION\",
\"body\": $(jq -Rs . /tmp/release_notes.md),
\"draft\": false,
\"prerelease\": false
}" | jq -r '.id')
echo "Release created with ID: $RELEASE_ID"
# Upload binaries as release assets
for binary in dist/binaries/*; do
filename=$(basename "$binary")
echo "Uploading $filename..."
curl -X POST -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/octet-stream" \
--data-binary "@$binary" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$RELEASE_ID/assets?name=$filename"
done
echo "All assets uploaded successfully"
- name: Clean up old releases
run: |
echo "Cleaning up old releases (keeping only last 3)..."
# Fetch all releases sorted by creation date
RELEASES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases" | \
jq -r 'sort_by(.created_at) | reverse | .[3:] | .[].id')
# Delete old releases
if [ -n "$RELEASES" ]; then
echo "Found releases to delete:"
for release_id in $RELEASES; do
echo " Deleting release ID: $release_id"
curl -X DELETE -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$release_id"
done
echo "Old releases deleted successfully"
else
echo "No old releases to delete (less than 4 releases total)"
fi
echo ""
- name: Release Summary
run: |
echo "================================================"
echo " Release ${{ steps.version.outputs.version }} Complete!"
echo "================================================"
echo ""
echo "Binaries published:"
ls -lh dist/binaries/
echo ""
echo "Release URL:"
echo "https://code.foss.global/serve.zone/onebox/releases/tag/${{ steps.version.outputs.version }}"
echo ""
echo "Installation command:"
echo "curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash"
echo ""

29
.gitignore vendored
View File

@@ -1,3 +1,30 @@
.nogit/
# artifacts
coverage/
public/
# installs
node_modules/
# caches
.yarn/
.cache/
.rpt2_cache
# builds
dist/
dist_*/
# rust
rust/target/
dist_rust/
# AI
.claude/
.serena/
#------# custom
# Deno # Deno
.deno/ .deno/
deno.lock deno.lock
@@ -50,4 +77,4 @@ logs/
*.log *.log
.playwright-mcp .playwright-mcp
./dist/ ./dist/

79
.smartconfig.json Normal file
View File

@@ -0,0 +1,79 @@
{
"@git.zone/tsbundle": {
"bundles": [
{
"from": "./ts_web/index.ts",
"to": "./ts_bundled/bundle.ts",
"outputMode": "base64ts",
"bundler": "esbuild",
"production": true,
"includeFiles": [
{
"from": "./html/index.html",
"to": "index.html"
}
]
}
]
},
"@git.zone/tsdeno": {
"compileTargets": [
{
"name": "onebox-linux-x64",
"entryPoint": "mod.ts",
"outDir": "dist/binaries",
"target": "x86_64-unknown-linux-gnu",
"permissions": ["--allow-all"],
"noCheck": true
},
{
"name": "onebox-linux-arm64",
"entryPoint": "mod.ts",
"outDir": "dist/binaries",
"target": "aarch64-unknown-linux-gnu",
"permissions": ["--allow-all"],
"noCheck": true
}
]
},
"@git.zone/tswatch": {
"bundles": [
{
"from": "./ts_web/index.ts",
"to": "./ts_bundled/bundle.ts",
"outputMode": "base64ts",
"bundler": "esbuild",
"production": true,
"watchPatterns": ["./ts_web/**/*", "./html/**/*"],
"includeFiles": [
{
"from": "./html/index.html",
"to": "index.html"
}
]
}
],
"watchers": [
{
"name": "backend",
"watch": ["./ts/**/*", "./ts_interfaces/**/*", "./ts_bundled/**/*"],
"command": "deno run --allow-all --unstable-ffi mod.ts server --ephemeral --monitor",
"restart": true,
"debounce": 500,
"runOnStart": true
}
]
},
"@git.zone/cli": {
"projectType": "denoSaaS",
"module": {
"githost": "code.foss.global",
"gitscope": "serve.zone",
"gitrepo": "onebox",
"description": "Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers",
"npmPackagename": "@serve.zone/onebox",
"license": "MIT"
}
},
"@ship.zone/szci": {}
}

View File

@@ -1,6 +1,504 @@
# Changelog # Changelog
## 2026-03-24 - 1.24.1 - fix(repo)
migrate smart build config to .smartconfig.json and tidy repository metadata
- Rename npmextra.json to .smartconfig.json and extend it with CLI project metadata for the repository.
- Mark the package as private and add an empty pnpm overrides block in package.json.
- Expand .gitignore to cover common build artifacts, caches, install directories, and local tooling folders.
- Reformat changelog and README files for cleaner spacing and Markdown table alignment without changing documented behavior.
## 2026-03-24 - 1.24.0 - feat(backup)
add containerarchive-backed backup storage, restore, download, and pruning support
- add database support for archive snapshot IDs and stored size tracking for backups
- initialize and close the backup archive during onebox lifecycle startup and shutdown
- allow backup download and restore flows to work with archive snapshots as well as legacy file-based backups
- schedule daily archive pruning based on the most generous configured retention policy
- replace smarts3 with smartstorage for registry-backed S3-compatible storage
## 2026-03-21 - 1.23.0 - feat(appstore)
add remote app store templates with service upgrades and Redis/MariaDB platform support
- introduces an App Store manager, API handlers, shared request types, and web UI flow for browsing remote templates and deploying services from template metadata
- tracks app template id and version on services, adds upgrade discovery and migration-based service upgrades, and includes a database migration for template version columns
- adds Redis and MariaDB platform service providers with provisioning plus backup and restore support, and exposes their requirements through service creation and app template config
## 2026-03-18 - 1.22.2 - fix(web-ui)
stabilize app store service creation flow and add Ghost sqlite defaults
- Defers App Store navigation to the services view to avoid destroying the current view during the deploy event handler.
- Processes pending app templates after services view updates so the create flow opens reliably.
- Adds default Ghost environment variables for sqlite3 and the database file path in the App Store template.
- Removes obsolete Gitea CI and npm publish workflow definitions.
## 2026-03-18 - 1.22.1 - fix(repo)
no changes to commit
## 2026-03-18 - 1.22.0 - feat(web-appstore)
add an App Store view for quick service deployment from curated templates
- adds a new App Store tab to the web UI with curated Docker app templates
- passes selected app templates through UI state into the services view for quick deployment
- supports quick deploy creation with prefilled image, port, environment variables, and optional platform service flags
- updates @serve.zone/catalog to ^2.8.0 to support the new app store view
## 2026-03-18 - 1.21.0 - feat(opsserver)
add container workspace API and backend execution environment for services
- introduces typed workspace handlers for reading, writing, listing, creating, removing, and executing commands inside service containers
- adds frontend backend-execution environment integration so the service view can open a workspace against a selected service
- extends Docker exec lookup to resolve Swarm service container IDs when a direct container ID is unavailable
## 2026-03-17 - 1.20.0 - feat(ops-dashboard)
stream user service logs to the ops dashboard and resolve service containers for Docker log streaming
- add typed socket support for pushing live user service log entries to the web app
- extend platform log streaming to include running user services with separate dashboard handlers
- fall back from direct container lookup to service-to-container resolution when streaming Docker logs
- update log parsing to preserve timestamps and infer log levels for service log entries
- bump @serve.zone/catalog to ^2.7.0
## 2026-03-17 - 1.19.12 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.11 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.10 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.9 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.8 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.7 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.6 - fix(repository)
no changes to commit
## 2026-03-17 - 1.19.5 - fix(repo)
no changes to commit
## 2026-03-17 - 1.19.4 - fix(repository)
no changes to commit
## 2026-03-16 - 1.19.3 - fix(repo)
no changes to commit
## 2026-03-16 - 1.19.2 - fix(docs)
remove outdated UI screenshot assets from project documentation
- Deletes multiple PNG screenshots that documented previous dashboard, service form, and hello-world states.
- Reduces repository clutter by removing obsolete image assets no longer needed in docs.
## 2026-03-16 - 1.19.1 - fix(dashboard)
add updated dashboard screenshots for refresh and resource usage states
- Adds new dashboard screenshots covering post-refresh, resource usage, and populated data views.
- Updates visual assets to document current dashboard behavior and UI states.
## 2026-03-16 - 1.19.1 - fix(dashboard)
add aggregated resource usage stats to the dashboard
- Aggregate CPU, memory, and network stats across all running user and platform service containers in getSystemStatus
- Extend ISystemStatus.docker interface with cpuUsage, memoryUsage, memoryTotal, networkIn, networkOut fields
- Fix getContainerStats to properly handle Swarm service IDs by catching exceptions and falling back to label-based container lookup
- Wire dashboard resource usage card to display real aggregated data from the backend
## 2026-03-16 - 1.19.0 - feat(opsserver,web)
add real-time platform service log streaming to the dashboard
- stream running platform service container logs from the ops server to connected dashboard clients via TypedSocket
- parse Docker log timestamps and levels for both pushed and fetched platform service log entries
- enhance the platform service detail view with mapped statuses and predefined host, port, version, and config metadata
- add the typedsocket dependency and update the catalog package for dashboard support
## 2026-03-16 - 1.18.5 - fix(platform-services)
fix platform service detail view navigation and log display
- Add back button to platform service detail view for returning to services list
- Fix DOM lifecycle when switching between platform services (destroy and recreate dees-chart-log)
- Fix timestamp format for log entries to use ISO 8601 for dees-chart-log compatibility
- Clear previous stats/logs state before fetching new platform service data
## 2026-03-16 - 1.18.4 - fix(repo)
no changes to commit
## 2026-03-16 - 1.18.3 - fix(deps)
bump @serve.zone/catalog to ^2.6.1
- Updates the @serve.zone/catalog runtime dependency from ^2.6.0 to ^2.6.1.
## 2026-03-16 - 1.18.2 - fix(repo)
no changes to commit
## 2026-03-16 - 1.18.1 - fix(repo)
no changes to commit
## 2026-03-16 - 1.18.0 - feat(platform-services)
add platform service log retrieval and display in the services UI
- add typed request support in the ops server to fetch Docker logs for platform service containers
- store fetched platform service logs in web app state and load them when opening platform service details
- render platform service logs in the services detail view and add sidebar icons for main navigation tabs
## 2026-03-16 - 1.17.4 - fix(docs)
add hello world running screenshot for documentation
- Adds a new PNG asset showing the application in a running hello world state.
- Supports project documentation or README usage without changing runtime behavior.
## 2026-03-16 - 1.17.3 - fix(mongodb)
downgrade the MongoDB service image to 4.4 and use the legacy mongo shell for container operations
- changes the default MongoDB container image from mongo:7 to mongo:4.4
- replaces mongosh with mongo for health checks, provisioning, and deprovisioning inside the container
## 2026-03-16 - 1.17.2 - fix(platform-services)
provision ClickHouse, MinIO, and MongoDB resources via docker exec instead of host port access
- switch ClickHouse provisioning and teardown to in-container client commands to avoid host port mapping issues
- replace MinIO host-side S3 API calls with in-container mc commands for bucket creation and removal
- run MongoDB provisioning and deprovisioning through mongosh inside the container and improve docker exec failure reporting
## 2026-03-16 - 1.17.1 - fix(repo)
no changes to commit
## 2026-03-16 - 1.17.0 - feat(web/services)
add deploy service action to the services view
- Adds a prominent "Deploy Service" button to the services page header.
- Routes users into the create service view directly from the services listing.
- Includes a new service creation form screenshot asset for the updated interface.
## 2026-03-16 - 1.16.0 - feat(services)
add platform service navigation and stats in the services UI
- add platform service stats state and fetch action
- show platform services in the services list and open a platform detail view
- enable dashboard clicks to jump directly to the selected platform service
- refresh platform service stats after start and restart actions
- bump @serve.zone/catalog to ^2.6.0 for the new platform service UI components
## 2026-03-16 - 1.15.3 - fix(install)
refresh systemd service configuration before restarting previously running installations
- Re-enable the systemd service during updates so unit file changes are applied before restart
- Add a log message indicating the service configuration is being refreshed
## 2026-03-16 - 1.15.2 - fix(systemd)
set HOME and DENO_DIR for the systemd service environment
- Adds HOME=/root to the generated onebox systemd unit
- Adds DENO_DIR=/root/.cache/deno so Deno cache paths are available when running as a service
## 2026-03-16 - 1.15.1 - fix(systemd)
move Docker installation and swarm initialization to systemd enable flow
- Ensures Docker is installed before writing and enabling the systemd unit that depends on docker.service.
- Removes Docker auto-installation from Onebox initialization so setup happens in the service management path.
## 2026-03-16 - 1.15.0 - feat(systemd)
replace smartdaemon-based service management with native systemd commands
- adds a dedicated OneboxSystemd manager for enabling, disabling, starting, stopping, checking status, and following logs
- introduces a new `onebox systemd` CLI command set and updates install and help output to use it
- removes the smartdaemon dependency and related service management code
## 2026-03-16 - 1.14.10 - fix(services)
stop auto-update monitoring during shutdown
- Track the auto-update polling interval in the services manager
- Clear the auto-update interval when Onebox shuts down to prevent background checks after shutdown
## 2026-03-16 - 1.14.9 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.8 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.7 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.6 - fix(project)
no changes to commit
## 2026-03-16 - 1.14.5 - fix(onebox)
move Docker auto-install and swarm initialization into Onebox startup flow
- removes Docker setup from daemon service installation
- ensures Docker is installed before Docker initialization during Onebox startup
- preserves automatic Docker Swarm initialization on fresh servers
## 2026-03-16 - 1.14.4 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.3 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.2 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.1 - fix(repo)
no changes to commit
## 2026-03-16 - 1.14.0 - feat(daemon)
auto-install Docker and initialize Swarm during daemon service setup
- Adds a Docker availability check before installing the Onebox daemon service
- Installs Docker automatically when it is missing using the standard installation script
- Attempts to initialize Docker Swarm after installation and handles already-initialized environments gracefully
## 2026-03-16 - 1.13.17 - fix(ci)
remove forced container image pulling from Gitea workflow jobs
- Drops the `--pull always` container option from CI, npm publish, and release workflows.
- Keeps workflow container images unchanged while avoiding forced pulls on every job run.
## 2026-03-16 - 1.13.16 - fix(ci)
refresh workflow container images on every run and bump @apiclient.xyz/docker to ^5.1.1
- add --pull always to CI, release, and npm publish workflow containers to avoid stale images
- update @apiclient.xyz/docker from ^5.1.0 to ^5.1.1 in deno.json
## 2026-03-15 - 1.13.15 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.14 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.13 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.12 - fix(ci)
run pnpm install with --ignore-scripts in CI and release workflows
- Update CI workflow dependency installation steps to skip lifecycle scripts during builds.
- Apply the same install change to the release workflow for consistent automation behavior.
## 2026-03-15 - 1.13.11 - fix(project)
no changes to commit
## 2026-03-15 - 1.13.10 - fix(deps)
bump @git.zone/tsdeno to ^1.2.0
- Updates the tsdeno development dependency from ^1.1.1 to ^1.2.0.
## 2026-03-15 - 1.13.9 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.8 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.7 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.6 - fix(ci)
correct workflow container image registry path
- Update Gitea CI, release, and npm publish workflows to use the corrected ht-docker-node image path
- Align all workflow container references from hosttoday to host.today to prevent pipeline image resolution issues
## 2026-03-15 - 1.13.5 - fix(workflows)
switch Gitea workflow containers from ht-docker-dbase to ht-docker-node
- Updates the CI, release, and npm publish workflows to use the Node-focused container image consistently.
- Aligns workflow runtime images with the project's Node and Deno build and publish steps.
## 2026-03-15 - 1.13.4 - fix(ci)
run workflows in the shared build container and enable corepack for pnpm installs
- adds the ht-docker-dbase container image to CI, release, and npm publish workflows
- enables corepack before pnpm install in build and release jobs to ensure package manager availability
## 2026-03-15 - 1.13.3 - fix(build)
replace custom Deno compile scripts with tsdeno-based binary builds in CI and release workflows
- adds @git.zone/tsdeno as a dev dependency and configures compile targets in npmextra.json
- updates CI and release workflows to install Node.js dependencies before running tsdeno compile
- removes the legacy scripts/compile-all.sh script and points the compile task to tsdeno compile
## 2026-03-15 - 1.13.2 - fix(scripts)
install production dependencies before compiling binaries and exclude local node_modules from builds
- Adds a dependency installation step using the application entrypoint before cross-platform compilation
- Updates all deno compile targets to use --node-modules-dir=none to avoid bundling local node_modules
## 2026-03-15 - 1.13.1 - fix(deno)
remove nodeModulesDir from Deno configuration
- Drops the explicit nodeModulesDir setting from deno.json.
- Keeps the package version unchanged at 1.13.0 while simplifying runtime configuration.
## 2026-03-15 - 1.13.0 - feat(install)
improve installer with version selection, service restart handling, and upgrade documentation
- Adds installer command-line options for help, specific version selection, and custom install directory.
- Fetches the latest release from the Gitea API when no version is provided and installs the matching platform binary.
- Preserves Onebox data directories, stops and restarts the systemd service during updates, and refreshes installation instructions in the README including upgrade usage.
## 2026-03-15 - 1.12.1 - fix(package.json)
update package metadata
- Single metadata-only file changed (+1, -1)
- No source code or runtime behavior modified; safe patch release
## 2026-03-15 - 1.12.0 - feat(cli,release)
add self-upgrade command and automate CI, release, and npm publishing workflows
- adds a new `onebox upgrade` CLI command that checks the latest release and reinstalls the current binary via the installer script
- introduces Gitea CI workflows for type checks, build verification, multi-platform binary compilation, release creation, and npm publishing
- adds a reusable release template describing installation options, supported platforms, and checksum availability
## 2026-03-03 - 1.11.0 - feat(services)
map backend service data to UI components, add stats & logs parsing, fetch service stats, and fix logs request param
- Fix: rename service logs request property from 'lines' to 'tail' when calling typedRequest
- Add data transformation helpers: formatBytes, parseImageString, mapStatus, toServiceDetail, toServiceStats, parseLogs
- Transform service list and detail props to match @serve.zone/catalog component interfaces (map status, image, repo/tag, timestamps, registry)
- Dispatch fetchServiceStatsAction on service click and surface transformed stats with default values to avoid nulls
- Parse and normalize logs into timestamp/message pairs for the detail view
## 2026-03-02 - 1.10.3 - fix(bin)
make bin/onebox-wrapper.js executable
- Metadata-only change: file mode updated for bin/onebox-wrapper.js to include the executable bit
- No source or behavior changes to the code
## 2026-03-02 - 1.10.2 - fix(build)
update build/watch configuration, switch to esbuild bundler and tswatch, and bump catalog and tooling dependencies
- Switch watch script to 'tswatch' (replaced previous concurrently command invoking deno + tswatch).
- npmextra.json: set bundler to 'esbuild', enable production mode, include html/index.html in the bundle, and extend watchPatterns to include ./html/\*_/_.
- Backend watcher: expanded watch globs and changed command to include --unstable-ffi and runtime flags (--ephemeral --monitor); restart and debounce kept.
- Bump runtime deps: @design.estate/dees-catalog -> ^3.43.3, @serve.zone/catalog -> ^2.5.0.
- Bump devDependencies: @git.zone/tsbundle -> ^2.9.0, @git.zone/tswatch -> ^3.2.0.
## 2026-02-24 - 1.10.1 - fix(package.json)
update package metadata
- Single metadata-only file changed (+1 -1)
- No source code or runtime behavior modified; safe patch release
- Current package version is 1.10.0; recommend patch bump to 1.10.1
## 2026-02-24 - 1.10.0 - feat(opsserver)
introduce OpsServer (TypedRequest API) and new lightweight web UI; replace legacy Angular UI and add typed interfaces
- Add OpsServer (ts/opsserver) with TypedRequest handlers for admin, services, platform, dns, domains, registry, network, backups, schedules, settings and logs.
- Integrate typedrequest/typedserver and smartjwt/smartguard plugins (ts/plugins.ts) and add comprehensive ts_interfaces for requests and data shapes.
- Replace legacy HTTP server usage with OpsServer throughout daemon, Onebox class and CLI (ts/classes/daemon.ts, ts/classes/onebox.ts, ts/cli.ts).
- Implement log streaming via VirtualStream and support for downloading/restoring backups and registry token management within handlers.
- Introduce new web UI built with dees-element web components under ts_web (ob-app-shell and views) and bundle/watch tooling (npmextra.json, tsbundle/tswatch integration).
- Update package.json: add build/watch scripts, tsbundle/tswatch dev deps and new runtime dependencies for typedrequest and catalog components.
- Remove large Angular-based ui application and related services/components in ui/ (major cleanup of Angular code and assets).
- Note: This adds many new endpoints and internal API changes (TypedRequest-based); consumers of the old UI/HTTP endpoints should migrate to the new OpsServer TypedRequest API and web components.
## 2025-12-03 - 1.9.2 - fix(ui)
Add VS Code configs for the UI workspace and normalize dark theme CSS variables
- Add VS Code workspace files under ui/.vscode:
- - extensions.json: recommend the Angular language support extension
- - launch.json: Chrome launch configurations for 'ng serve' and 'ng test' (preLaunchTask hooks)
- - tasks.json: npm 'start' and 'test' tasks with a background TypeScript problem matcher to improve dev workflow
- Update ui/src/styles.css dark theme variables to use neutral black/gray HSL values for background, foreground, cards, popovers, accents, borders, inputs and ring to improve contrast and consistency
## 2025-11-27 - 1.9.1 - fix(ui)
Correct import success toast and add VS Code launch/tasks recommendations for the UI
- Fix backup import success toast in backups-tab.component to reference response.data.service.name (previously response.data.serviceName), preventing incorrect service name display.
- Add VS Code workspace settings for the UI: extensions recommendation, launch configurations for 'ng serve' and 'ng test', and npm tasks for start/test to simplify local development and debugging.
## 2025-11-27 - 1.9.0 - feat(backups)
Add backup import API and improve backup download/import flow in UI
- Backend: add /api/backups/import endpoint to accept multipart file uploads or JSON with a URL and import backups (saves temp file, validates .tar.enc, calls backupManager.restoreBackup in import mode).
- Backend: server-side import handler downloads remote backup URLs, stores temporary file, invokes restore/import logic and cleans up temp files.
- Frontend: add downloadBackup, importBackupFromFile and importBackupFromUrl methods to ApiService; trigger browser download using Blob and object URL with Authorization header.
- Frontend: replace raw download link in service detail UI with a Download button that calls downloadBackup and shows success/error toasts.
- Dev: add VS Code launch, tasks and recommended extensions for the ui workspace to simplify local development.
## 2025-11-27 - 1.8.0 - feat(backup) ## 2025-11-27 - 1.8.0 - feat(backup)
Add backup scheduling system with GFS retention, API and UI integration Add backup scheduling system with GFS retention, API and UI integration
- Introduce backup scheduling subsystem (BackupScheduler) and integrate it into Onebox lifecycle (init & shutdown) - Introduce backup scheduling subsystem (BackupScheduler) and integrate it into Onebox lifecycle (init & shutdown)
@@ -13,6 +511,7 @@ Add backup scheduling system with GFS retention, API and UI integration
- Type and repository updates across codebase to support schedule-aware backups, schedule CRUD, and retention enforcement - Type and repository updates across codebase to support schedule-aware backups, schedule CRUD, and retention enforcement
## 2025-11-27 - 1.7.0 - feat(backup) ## 2025-11-27 - 1.7.0 - feat(backup)
Add backup system: BackupManager, DB schema, API endpoints and UI support Add backup system: BackupManager, DB schema, API endpoints and UI support
Introduce a complete service backup/restore subsystem with encrypted archives, database records and REST endpoints. Implements BackupManager with export/import for service config, platform resources (MongoDB, MinIO, ClickHouse), and Docker images; adds BackupRepository and migrations for backups table and include_image_in_backup; integrates backup flows into the HTTP API and the UI client; exposes backup password management and restore modes (restore/import/clone). Wire BackupManager into Onebox initialization. Introduce a complete service backup/restore subsystem with encrypted archives, database records and REST endpoints. Implements BackupManager with export/import for service config, platform resources (MongoDB, MinIO, ClickHouse), and Docker images; adds BackupRepository and migrations for backups table and include_image_in_backup; integrates backup flows into the HTTP API and the UI client; exposes backup password management and restore modes (restore/import/clone). Wire BackupManager into Onebox initialization.
@@ -25,6 +524,7 @@ Introduce a complete service backup/restore subsystem with encrypted archives, d
- Integrate BackupManager into Onebox core (initialized in Onebox constructor) and wire HTTP handlers to use the new manager; add DB repository export/import glue so backups are stored and referenced by ID. - Integrate BackupManager into Onebox core (initialized in Onebox constructor) and wire HTTP handlers to use the new manager; add DB repository export/import glue so backups are stored and referenced by ID.
## 2025-11-27 - 1.6.0 - feat(ui.dashboard) ## 2025-11-27 - 1.6.0 - feat(ui.dashboard)
Add Resource Usage card to dashboard and make dashboard cards full-height; add VSCode launch/tasks/config Add Resource Usage card to dashboard and make dashboard cards full-height; add VSCode launch/tasks/config
- Introduce ResourceUsageCardComponent and include it as a full-width row in the dashboard layout. - Introduce ResourceUsageCardComponent and include it as a full-width row in the dashboard layout.
@@ -33,6 +533,7 @@ Add Resource Usage card to dashboard and make dashboard cards full-height; add V
- Add VSCode workspace configuration: recommended Angular extension, launch configurations for ng serve/ng test, and npm tasks to run/start the UI in development. - Add VSCode workspace configuration: recommended Angular extension, launch configurations for ng serve/ng test, and npm tasks to run/start the UI in development.
## 2025-11-27 - 1.5.0 - feat(network) ## 2025-11-27 - 1.5.0 - feat(network)
Add traffic stats endpoint and dashboard UI; enhance platform services and certificate health reporting Add traffic stats endpoint and dashboard UI; enhance platform services and certificate health reporting
- Add /api/network/traffic-stats GET endpoint to the HTTP API with an optional minutes query parameter (validated, 1-60). - Add /api/network/traffic-stats GET endpoint to the HTTP API with an optional minutes query parameter (validated, 1-60).
@@ -44,26 +545,29 @@ Add traffic stats endpoint and dashboard UI; enhance platform services and certi
- Add VSCode workspace launch/tasks recommendations for the UI development environment. - Add VSCode workspace launch/tasks recommendations for the UI development environment.
## 2025-11-26 - 1.4.0 - feat(platform-services) ## 2025-11-26 - 1.4.0 - feat(platform-services)
Add ClickHouse platform service support and improve related healthchecks and tooling Add ClickHouse platform service support and improve related healthchecks and tooling
- Add ClickHouse as a first-class platform service: register provider, provision/cleanup support and env var injection - Add ClickHouse as a first-class platform service: register provider, provision/cleanup support and env var injection
- Expose ClickHouse endpoints in the HTTP API routing (list/get/start/stop/stats) and map default port (8123) - Expose ClickHouse endpoints in the HTTP API routing (list/get/start/stop/stats) and map default port (8123)
- Enable services to request ClickHouse as a platform requirement (enableClickHouse / platformRequirements) during deploy/provision flows - Enable services to request ClickHouse as a platform requirement (enableClickHouse / platformRequirements) during deploy/provision flows
- Fix ClickHouse container health check to use absolute wget path (/usr/bin/wget) for more reliable in-container checks - Fix ClickHouse container health check to use absolute wget path (/usr/bin/wget) for more reliable in-container checks
- Add VS Code workspace launch/tasks/extensions configs for the UI (ui/.vscode/*) to improve local dev experience - Add VS Code workspace launch/tasks/extensions configs for the UI (ui/.vscode/\*) to improve local dev experience
## 2025-11-26 - 1.3.0 - feat(platform-services) ## 2025-11-26 - 1.3.0 - feat(platform-services)
Add ClickHouse platform service support (provider, types, provisioning, UI and port mappings) Add ClickHouse platform service support (provider, types, provisioning, UI and port mappings)
- Introduce ClickHouse as a first-class platform service: added ClickHouseProvider and registered it in PlatformServicesManager - Introduce ClickHouse as a first-class platform service: added ClickHouseProvider and registered it in PlatformServicesManager
- Support provisioning ClickHouse resources for user services and storing encrypted credentials in platform_resources - Support provisioning ClickHouse resources for user services and storing encrypted credentials in platform_resources
- Add ClickHouse to core types (TPlatformServiceType, IPlatformRequirements, IServiceDeployOptions) and service DB handling so services can request ClickHouse - Add ClickHouse to core types (TPlatformServiceType, IPlatformRequirements, IServiceDeployOptions) and service DB handling so services can request ClickHouse
- Inject ClickHouse-related environment variables into deployed services (CLICKHOUSE_* mappings) when provisioning resources - Inject ClickHouse-related environment variables into deployed services (CLICKHOUSE\_\* mappings) when provisioning resources
- Expose ClickHouse default port (8123) in platform port mappings / network targets - Expose ClickHouse default port (8123) in platform port mappings / network targets
- UI: add checkbox and description for enabling ClickHouse during service creation; form now submits enableClickHouse - UI: add checkbox and description for enabling ClickHouse during service creation; form now submits enableClickHouse
- Add VS Code recommendations and launch/tasks for the UI development workflow - Add VS Code recommendations and launch/tasks for the UI development workflow
## 2025-11-26 - 1.2.1 - fix(platform-services/minio) ## 2025-11-26 - 1.2.1 - fix(platform-services/minio)
Improve MinIO provider: reuse existing data and credentials, use host-bound port for provisioning, and safer provisioning/deprovisioning Improve MinIO provider: reuse existing data and credentials, use host-bound port for provisioning, and safer provisioning/deprovisioning
- MinIO provider now detects existing data directory and will reuse stored admin credentials when available instead of regenerating them. - MinIO provider now detects existing data directory and will reuse stored admin credentials when available instead of regenerating them.
@@ -74,15 +578,17 @@ Improve MinIO provider: reuse existing data and credentials, use host-bound port
- Added VSCode workspace files (extensions, launch, tasks) for the ui project to improve developer experience. - Added VSCode workspace files (extensions, launch, tasks) for the ui project to improve developer experience.
## 2025-11-26 - 1.2.0 - feat(ui) ## 2025-11-26 - 1.2.0 - feat(ui)
Sync UI tab state with URL and update routes/links Sync UI tab state with URL and update routes/links
- Add VSCode workspace recommendations, launch and tasks configs for the UI (ui/.vscode/*) - Add VSCode workspace recommendations, launch and tasks configs for the UI (ui/.vscode/\*)
- Update Angular routes to support tab URL segments and default redirects for services, network and registries - Update Angular routes to support tab URL segments and default redirects for services, network and registries
- Change service detail route to use explicit 'detail/:name' path and update links accordingly - Change service detail route to use explicit 'detail/:name' path and update links accordingly
- Make ServicesList, Registries and Network components read tab from route params and navigate on tab changes; add ngOnDestroy to unsubscribe - Make ServicesList, Registries and Network components read tab from route params and navigate on tab changes; add ngOnDestroy to unsubscribe
- Update Domain detail template link to point to the new services detail route - Update Domain detail template link to point to the new services detail route
## 2025-11-26 - 1.1.0 - feat(platform-services) ## 2025-11-26 - 1.1.0 - feat(platform-services)
Add platform service log streaming, improve health checks and provisioning robustness Add platform service log streaming, improve health checks and provisioning robustness
- Add WebSocket log streaming support for platform services (backend + UI) to stream MinIO/MongoDB/Caddy logs in real time - Add WebSocket log streaming support for platform services (backend + UI) to stream MinIO/MongoDB/Caddy logs in real time
@@ -102,6 +608,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
### Added ### Added
- Initial project structure - Initial project structure
- Core architecture classes - Core architecture classes
- Docker container management - Docker container management
@@ -120,4 +627,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [1.0.0] - TBD ## [1.0.0] - TBD
### Added ### Added
- First stable release - First stable release

View File

@@ -1,12 +1,11 @@
{ {
"name": "@serve.zone/onebox", "name": "@serve.zone/onebox",
"version": "1.8.0", "version": "1.24.1",
"exports": "./mod.ts", "exports": "./mod.ts",
"nodeModulesDir": "auto",
"tasks": { "tasks": {
"test": "deno test --allow-all test/", "test": "deno test --allow-all test/",
"test:watch": "deno test --allow-all --watch test/", "test:watch": "deno test --allow-all --watch test/",
"compile": "bash scripts/compile-all.sh", "compile": "tsdeno compile",
"dev": "pnpm run watch" "dev": "pnpm run watch"
}, },
"imports": { "imports": {
@@ -16,13 +15,19 @@
"@std/assert": "jsr:@std/assert@^1.0.15", "@std/assert": "jsr:@std/assert@^1.0.15",
"@std/encoding": "jsr:@std/encoding@^1.0.10", "@std/encoding": "jsr:@std/encoding@^1.0.10",
"@db/sqlite": "jsr:@db/sqlite@0.12.0", "@db/sqlite": "jsr:@db/sqlite@0.12.0",
"@push.rocks/smartdaemon": "npm:@push.rocks/smartdaemon@^2.1.0", "@apiclient.xyz/docker": "npm:@apiclient.xyz/docker@^5.1.1",
"@apiclient.xyz/docker": "npm:@apiclient.xyz/docker@^5.1.0",
"@apiclient.xyz/cloudflare": "npm:@apiclient.xyz/cloudflare@6.4.3", "@apiclient.xyz/cloudflare": "npm:@apiclient.xyz/cloudflare@6.4.3",
"@push.rocks/smartacme": "npm:@push.rocks/smartacme@^8.0.0", "@push.rocks/smartacme": "npm:@push.rocks/smartacme@^8.0.0",
"@push.rocks/smartregistry": "npm:@push.rocks/smartregistry@^2.2.0", "@push.rocks/smartregistry": "npm:@push.rocks/smartregistry@^2.2.0",
"@push.rocks/smarts3": "npm:@push.rocks/smarts3@^5.1.0", "@push.rocks/smartstorage": "npm:@push.rocks/smartstorage@^6.3.0",
"@push.rocks/taskbuffer": "npm:@push.rocks/taskbuffer@^3.1.0" "@push.rocks/taskbuffer": "npm:@push.rocks/taskbuffer@^3.1.0",
"@api.global/typedrequest-interfaces": "npm:@api.global/typedrequest-interfaces@^3.0.19",
"@api.global/typedrequest": "npm:@api.global/typedrequest@^3.2.6",
"@api.global/typedserver": "npm:@api.global/typedserver@^8.3.1",
"@push.rocks/smartguard": "npm:@push.rocks/smartguard@^3.1.0",
"@push.rocks/smartjwt": "npm:@push.rocks/smartjwt@^2.2.1",
"@api.global/typedsocket": "npm:@api.global/typedsocket@^4.1.2",
"@serve.zone/containerarchive": "npm:@serve.zone/containerarchive@^0.1.3"
}, },
"compilerOptions": { "compilerOptions": {
"lib": [ "lib": [

36196
dist_serve/bundle.js Normal file

File diff suppressed because one or more lines are too long

33
dist_serve/index.html Normal file
View File

@@ -0,0 +1,33 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta
name="viewport"
content="user-scalable=0, initial-scale=1, maximum-scale=1, minimum-scale=1, width=device-width, height=device-height"
/>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="theme-color" content="#000000" />
<title>Onebox</title>
<link rel="preconnect" href="https://assetbroker.lossless.one/" crossorigin>
<link rel="stylesheet" href="https://assetbroker.lossless.one/fonts/fonts.css">
<style>
html {
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
body {
position: relative;
background: #000;
margin: 0px;
}
</style>
</head>
<body>
<noscript>
<p style="color: #fff; text-align: center; margin-top: 100px;">
JavaScript is required to run the Onebox dashboard.
</p>
</noscript>
</body>
<script defer type="module" src="/bundle.js"></script>
</html>

33
html/index.html Normal file
View File

@@ -0,0 +1,33 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta
name="viewport"
content="user-scalable=0, initial-scale=1, maximum-scale=1, minimum-scale=1, width=device-width, height=device-height"
/>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="theme-color" content="#000000" />
<title>Onebox</title>
<link rel="preconnect" href="https://assetbroker.lossless.one/" crossorigin>
<link rel="stylesheet" href="https://assetbroker.lossless.one/fonts/fonts.css">
<style>
html {
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
body {
position: relative;
background: #000;
margin: 0px;
}
</style>
</head>
<body>
<noscript>
<p style="color: #fff; text-align: center; margin-top: 100px;">
JavaScript is required to run the Onebox dashboard.
</p>
</noscript>
</body>
<script defer type="module" src="/bundle.js"></script>
</html>

View File

@@ -1,192 +1,310 @@
#!/bin/bash #!/bin/bash
# Onebox Installer Script
# Downloads and installs pre-compiled Onebox binary from Gitea releases
# #
# Onebox installer script # Usage:
# Direct piped installation (recommended):
# curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
# #
# With version specification:
# curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0
#
# Options:
# -h, --help Show this help message
# --version VERSION Install specific version (e.g., v1.11.0)
# --install-dir DIR Installation directory (default: /opt/onebox)
set -e set -e
# Configuration # Default values
REPO_URL="https://code.foss.global/serve.zone/onebox" SHOW_HELP=0
SPECIFIED_VERSION=""
INSTALL_DIR="/opt/onebox" INSTALL_DIR="/opt/onebox"
BIN_LINK="/usr/local/bin/onebox" GITEA_BASE_URL="https://code.foss.global"
GITEA_REPO="serve.zone/onebox"
SERVICE_NAME="onebox"
# Colors # Parse command line arguments
RED='\033[0;31m' while [[ $# -gt 0 ]]; do
GREEN='\033[0;32m' case $1 in
YELLOW='\033[1;33m' -h|--help)
NC='\033[0m' # No Color SHOW_HELP=1
shift
;;
--version)
SPECIFIED_VERSION="$2"
shift 2
;;
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
*)
echo "Unknown option: $1"
echo "Use -h or --help for usage information"
exit 1
;;
esac
done
# Functions if [ $SHOW_HELP -eq 1 ]; then
error() { echo "Onebox Installer Script"
echo -e "${RED}Error: $1${NC}" >&2 echo "Downloads and installs pre-compiled Onebox binary"
exit 1 echo ""
} echo "Usage: $0 [options]"
echo ""
info() { echo "Options:"
echo -e "${GREEN}$1${NC}" echo " -h, --help Show this help message"
} echo " --version VERSION Install specific version (e.g., v1.11.0)"
echo " --install-dir DIR Installation directory (default: /opt/onebox)"
warn() { echo ""
echo -e "${YELLOW}$1${NC}" echo "Examples:"
} echo " # Install latest version"
echo " curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash"
# Detect platform and architecture echo ""
detect_platform() { echo " # Install specific version"
OS=$(uname -s | tr '[:upper:]' '[:lower:]') echo " curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0"
ARCH=$(uname -m) exit 0
fi
case "$OS" in
linux)
PLATFORM="linux"
;;
darwin)
PLATFORM="macos"
;;
*)
error "Unsupported operating system: $OS"
;;
esac
case "$ARCH" in
x86_64|amd64)
ARCH="x64"
;;
aarch64|arm64)
ARCH="arm64"
;;
*)
error "Unsupported architecture: $ARCH"
;;
esac
BINARY_NAME="onebox-${PLATFORM}-${ARCH}"
}
# Get latest version from Gitea API
get_latest_version() {
info "Fetching latest version..."
VERSION=$(curl -s "${REPO_URL}/releases" | grep -o '"tag_name":"v[^"]*' | head -1 | cut -d'"' -f4 | cut -c2-)
if [ -z "$VERSION" ]; then
warn "Could not fetch latest version, using 'main' branch"
VERSION="main"
else
info "Latest version: v${VERSION}"
fi
}
# Check if running as root # Check if running as root
check_root() { if [ "$EUID" -ne 0 ]; then
if [ "$EUID" -ne 0 ]; then echo "Please run as root (sudo bash install.sh or pipe to sudo bash)"
error "This script must be run as root (use sudo)" exit 1
fi fi
# Helper function to detect OS and architecture
detect_platform() {
local os=$(uname -s)
local arch=$(uname -m)
# Map OS
case "$os" in
Linux)
os_name="linux"
;;
Darwin)
os_name="macos"
;;
MINGW*|MSYS*|CYGWIN*)
os_name="windows"
;;
*)
echo "Error: Unsupported operating system: $os"
echo "Supported: Linux, macOS, Windows"
exit 1
;;
esac
# Map architecture
case "$arch" in
x86_64|amd64)
arch_name="x64"
;;
aarch64|arm64)
arch_name="arm64"
;;
*)
echo "Error: Unsupported architecture: $arch"
echo "Supported: x86_64/amd64 (x64), aarch64/arm64 (arm64)"
exit 1
;;
esac
# Construct binary name
if [ "$os_name" = "windows" ]; then
echo "onebox-${os_name}-${arch_name}.exe"
else
echo "onebox-${os_name}-${arch_name}"
fi
} }
# Get latest release version from Gitea API
get_latest_version() {
echo "Fetching latest release version from Gitea..." >&2
local api_url="${GITEA_BASE_URL}/api/v1/repos/${GITEA_REPO}/releases/latest"
local response=$(curl -sSL "$api_url" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$response" ]; then
echo "Error: Failed to fetch latest release information from Gitea API" >&2
echo "URL: $api_url" >&2
exit 1
fi
# Extract tag_name from JSON response
local version=$(echo "$response" | grep -o '"tag_name":"[^"]*"' | cut -d'"' -f4)
if [ -z "$version" ]; then
echo "Error: Could not determine latest version from API response" >&2
exit 1
fi
echo "$version"
}
# Main installation process
echo "================================================"
echo " Onebox Installation Script"
echo "================================================"
echo ""
# Detect platform
BINARY_NAME=$(detect_platform)
echo "Detected platform: $BINARY_NAME"
echo ""
# Determine version to install
if [ -n "$SPECIFIED_VERSION" ]; then
VERSION="$SPECIFIED_VERSION"
echo "Installing specified version: $VERSION"
else
VERSION=$(get_latest_version)
echo "Installing latest version: $VERSION"
fi
echo ""
# Construct download URL
DOWNLOAD_URL="${GITEA_BASE_URL}/${GITEA_REPO}/releases/download/${VERSION}/${BINARY_NAME}"
echo "Download URL: $DOWNLOAD_URL"
echo ""
# Check if service is running and stop it
SERVICE_WAS_RUNNING=0
if systemctl is-enabled --quiet "$SERVICE_NAME" 2>/dev/null || systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
SERVICE_WAS_RUNNING=1
if systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
echo "Stopping Onebox service..."
systemctl stop "$SERVICE_NAME"
fi
fi
# Clean installation directory - ensure only binary exists
if [ -d "$INSTALL_DIR" ]; then
echo "Cleaning installation directory: $INSTALL_DIR"
rm -rf "$INSTALL_DIR"
fi
# Create fresh installation directory
echo "Creating installation directory: $INSTALL_DIR"
mkdir -p "$INSTALL_DIR"
# Download binary # Download binary
download_binary() { echo "Downloading Onebox binary..."
info "Downloading Onebox ${VERSION} for ${PLATFORM}-${ARCH}..." TEMP_FILE="$INSTALL_DIR/onebox.download"
curl -sSL "$DOWNLOAD_URL" -o "$TEMP_FILE"
# Create temp directory if [ $? -ne 0 ]; then
TMP_DIR=$(mktemp -d) echo "Error: Failed to download binary from $DOWNLOAD_URL"
TMP_FILE="${TMP_DIR}/${BINARY_NAME}" echo ""
echo "Please check:"
echo " 1. Your internet connection"
echo " 2. The specified version exists: ${GITEA_BASE_URL}/${GITEA_REPO}/releases"
echo " 3. The platform binary is available for this release"
rm -f "$TEMP_FILE"
exit 1
fi
# Try release download first # Check if download was successful (file exists and not empty)
if [ "$VERSION" != "main" ]; then if [ ! -s "$TEMP_FILE" ]; then
DOWNLOAD_URL="${REPO_URL}/releases/download/v${VERSION}/${BINARY_NAME}" echo "Error: Downloaded file is empty or does not exist"
else rm -f "$TEMP_FILE"
DOWNLOAD_URL="${REPO_URL}/raw/branch/main/dist/binaries/${BINARY_NAME}" exit 1
fi fi
if ! curl -L -f -o "$TMP_FILE" "$DOWNLOAD_URL"; then # Move to final location
error "Failed to download binary from $DOWNLOAD_URL" BINARY_PATH="$INSTALL_DIR/onebox"
fi mv "$TEMP_FILE" "$BINARY_PATH"
# Verify download if [ $? -ne 0 ] || [ ! -f "$BINARY_PATH" ]; then
if [ ! -f "$TMP_FILE" ] || [ ! -s "$TMP_FILE" ]; then echo "Error: Failed to move binary to $BINARY_PATH"
error "Downloaded file is empty or missing" rm -f "$TEMP_FILE" 2>/dev/null
fi exit 1
fi
info "✓ Download complete" # Make executable
} chmod +x "$BINARY_PATH"
# Install binary if [ $? -ne 0 ]; then
install_binary() { echo "Error: Failed to make binary executable"
info "Installing Onebox to ${INSTALL_DIR}..." exit 1
fi
# Create install directory echo "Binary installed successfully to: $BINARY_PATH"
mkdir -p "$INSTALL_DIR" echo ""
# Copy binary # Check if /usr/local/bin is in PATH
cp "$TMP_FILE" "${INSTALL_DIR}/onebox" if [[ ":$PATH:" == *":/usr/local/bin:"* ]]; then
chmod +x "${INSTALL_DIR}/onebox" BIN_DIR="/usr/local/bin"
else
BIN_DIR="/usr/bin"
fi
# Create symlink # Create symlink for global access
ln -sf "${INSTALL_DIR}/onebox" "$BIN_LINK" ln -sf "$BINARY_PATH" "$BIN_DIR/onebox"
echo "Symlink created: $BIN_DIR/onebox -> $BINARY_PATH"
echo ""
# Cleanup temp files # Create data directories
rm -rf "$TMP_DIR" mkdir -p /var/lib/onebox
mkdir -p /var/www/certbot
info "✓ Installation complete" # Re-enable and restart service if it was previously running (refreshes unit file)
} if [ $SERVICE_WAS_RUNNING -eq 1 ]; then
echo "Refreshing systemd service..."
onebox systemd enable
echo "Restarting Onebox service..."
systemctl restart "$SERVICE_NAME"
echo "Service restarted successfully."
echo ""
fi
# Initialize database and config echo "================================================"
initialize() { echo " Onebox Installation Complete!"
info "Initializing Onebox..." echo "================================================"
echo ""
echo "Installation details:"
echo " Binary location: $BINARY_PATH"
echo " Symlink location: $BIN_DIR/onebox"
echo " Version: $VERSION"
echo ""
# Create data directory # Check if database exists (indicates existing installation)
mkdir -p /var/lib/onebox if [ -f "/var/lib/onebox/onebox.db" ]; then
echo "Data directory: /var/lib/onebox (preserved)"
# Create certbot directory for ACME challenges echo ""
mkdir -p /var/www/certbot echo "Your existing data has been preserved."
if [ $SERVICE_WAS_RUNNING -eq 1 ]; then
info "✓ Initialization complete" echo "The service has been restarted with your current settings."
} else
echo "Start the service with: onebox systemd start"
# Print success message fi
print_success() { else
echo "" echo "Get started:"
info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo ""
info " Onebox installed successfully!" echo " onebox --version"
info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo " onebox --help"
echo "" echo ""
echo "Next steps:" echo " 1. Configure Cloudflare (optional):"
echo "" echo " onebox config set cloudflareAPIKey <key>"
echo "1. Configure Cloudflare (optional):" echo " onebox config set cloudflareEmail <email>"
echo " onebox config set cloudflareAPIKey <key>" echo " onebox config set cloudflareZoneID <zone-id>"
echo " onebox config set cloudflareEmail <email>" echo " onebox config set serverIP <your-server-ip>"
echo " onebox config set cloudflareZoneID <zone-id>" echo ""
echo " onebox config set serverIP <your-server-ip>" echo " 2. Configure ACME email:"
echo "" echo " onebox config set acmeEmail <your@email.com>"
echo "2. Configure ACME email:" echo ""
echo " onebox config set acmeEmail <your@email.com>" echo " 3. Enable systemd service:"
echo "" echo " onebox systemd enable"
echo "3. Install daemon:" echo ""
echo " onebox daemon install" echo " 4. Start service:"
echo "" echo " onebox systemd start"
echo "4. Start daemon:" echo ""
echo " onebox daemon start" echo " 5. Deploy your first service:"
echo "" echo " onebox service add myapp --image nginx:latest --domain app.example.com"
echo "5. Deploy your first service:" echo ""
echo " onebox service add myapp --image nginx:latest --domain app.example.com" echo " Web UI: http://localhost:3000"
echo "" echo " Default credentials: admin / admin"
echo "Web UI: http://localhost:3000" fi
echo "Default credentials: admin / admin" echo ""
echo ""
}
# Main installation flow
main() {
info "Onebox Installer"
echo ""
check_root
detect_platform
get_latest_version
download_binary
install_binary
initialize
print_success
}
# Run main function
main

View File

@@ -1,6 +1,6 @@
{ {
"name": "@serve.zone/onebox", "name": "@serve.zone/onebox",
"version": "1.8.0", "version": "1.24.1",
"description": "Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers", "description": "Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers",
"main": "mod.ts", "main": "mod.ts",
"type": "module", "type": "module",
@@ -9,7 +9,9 @@
}, },
"scripts": { "scripts": {
"postinstall": "node scripts/install-binary.js", "postinstall": "node scripts/install-binary.js",
"watch": "concurrently --kill-others --names \"BACKEND,UI\" --prefix-colors \"cyan,magenta\" \"deno run --allow-all --unstable-ffi --watch mod.ts server --ephemeral --monitor\" \"cd ui && pnpm run watch\"" "watch": "tswatch",
"build": "tsbundle",
"bundle": "tsbundle"
}, },
"keywords": [ "keywords": [
"docker", "docker",
@@ -51,8 +53,20 @@
"arm64" "arm64"
], ],
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34", "packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34",
"dependencies": {}, "dependencies": {
"@api.global/typedrequest-interfaces": "^3.0.19",
"@api.global/typedsocket": "^4.1.2",
"@design.estate/dees-catalog": "^3.43.3",
"@design.estate/dees-element": "^2.1.6",
"@serve.zone/catalog": "^2.9.0"
},
"devDependencies": { "devDependencies": {
"concurrently": "^9.1.2" "@git.zone/tsbundle": "^2.9.0",
"@git.zone/tsdeno": "^1.2.0",
"@git.zone/tswatch": "^3.2.0"
},
"private": true,
"pnpm": {
"overrides": {}
} }
} }

5182
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@
## SSL Certificate Storage (November 2025) ## SSL Certificate Storage (November 2025)
SSL certificates are now stored directly in the SQLite database as PEM content instead of file paths: SSL certificates are now stored directly in the SQLite database as PEM content instead of file paths:
- `ISslCertificate` and `ICertificate` interfaces use `certPem`, `keyPem`, `fullchainPem` properties - `ISslCertificate` and `ICertificate` interfaces use `certPem`, `keyPem`, `fullchainPem` properties
- Database migration 8 converted the `certificates` table schema - Database migration 8 converted the `certificates` table schema
- No filesystem storage for certificates - everything in DB - No filesystem storage for certificates - everything in DB
@@ -16,6 +17,7 @@ SSL certificates are now stored directly in the SQLite database as PEM content i
The database layer has been refactored into a repository pattern: The database layer has been refactored into a repository pattern:
**Directory Structure:** **Directory Structure:**
``` ```
ts/database/ ts/database/
├── index.ts # Main OneboxDatabase class (composes repositories, handles migrations) ├── index.ts # Main OneboxDatabase class (composes repositories, handles migrations)
@@ -32,10 +34,12 @@ ts/database/
``` ```
**Import paths:** **Import paths:**
- Main: `import { OneboxDatabase } from './database/index.ts'` - Main: `import { OneboxDatabase } from './database/index.ts'`
- Legacy (deprecated): `import { OneboxDatabase } from './classes/database.ts'` (re-exports from new location) - Legacy (deprecated): `import { OneboxDatabase } from './classes/database.ts'` (re-exports from new location)
**API Compatibility:** **API Compatibility:**
- The `OneboxDatabase` class maintains the same public API - The `OneboxDatabase` class maintains the same public API
- All methods delegate to the appropriate repository - All methods delegate to the appropriate repository
- No breaking changes for existing code - No breaking changes for existing code
@@ -49,6 +53,7 @@ Migration 8 converted certificate storage from file paths to PEM content.
The reverse proxy uses **Caddy** running as a Docker Swarm service for production-grade reverse proxying with native SNI support, HTTP/2, HTTP/3, and WebSocket handling. The reverse proxy uses **Caddy** running as a Docker Swarm service for production-grade reverse proxying with native SNI support, HTTP/2, HTTP/3, and WebSocket handling.
**Architecture:** **Architecture:**
- Caddy runs as Docker Swarm service (`onebox-caddy`) on the overlay network - Caddy runs as Docker Swarm service (`onebox-caddy`) on the overlay network
- No binary download required - uses `caddy:2-alpine` Docker image - No binary download required - uses `caddy:2-alpine` Docker image
- Configuration pushed dynamically via Caddy Admin API (port 2019) - Configuration pushed dynamically via Caddy Admin API (port 2019)
@@ -57,10 +62,12 @@ The reverse proxy uses **Caddy** running as a Docker Swarm service for productio
- Services reached by Docker service name (e.g., `onebox-hello-world:80`) - Services reached by Docker service name (e.g., `onebox-hello-world:80`)
**Key files:** **Key files:**
- `ts/classes/caddy.ts` - CaddyManager class for Docker service and Admin API - `ts/classes/caddy.ts` - CaddyManager class for Docker service and Admin API
- `ts/classes/reverseproxy.ts` - Delegates to CaddyManager - `ts/classes/reverseproxy.ts` - Delegates to CaddyManager
**Certificate workflow:** **Certificate workflow:**
1. `CertRequirementManager` creates requirements for domains 1. `CertRequirementManager` creates requirements for domains
2. Daemon processes requirements via `certmanager.ts` 2. Daemon processes requirements via `certmanager.ts`
3. Certificates stored in database (PEM content) 3. Certificates stored in database (PEM content)
@@ -68,16 +75,19 @@ The reverse proxy uses **Caddy** running as a Docker Swarm service for productio
5. Caddy serves TLS with the loaded certificates (no volume mounts needed) 5. Caddy serves TLS with the loaded certificates (no volume mounts needed)
**Docker Service Configuration:** **Docker Service Configuration:**
- Service name: `onebox-caddy` - Service name: `onebox-caddy`
- Image: `caddy:2-alpine` - Image: `caddy:2-alpine`
- Network: `onebox-network` (overlay, attachable) - Network: `onebox-network` (overlay, attachable)
- Startup: Writes initial config with `admin.listen: 0.0.0.0:2019` for host access - Startup: Writes initial config with `admin.listen: 0.0.0.0:2019` for host access
**Port Mapping:** **Port Mapping:**
- Dev mode: HTTP on 8080, HTTPS on 8443, Admin on 2019 - Dev mode: HTTP on 8080, HTTPS on 8443, Admin on 2019
- Production: HTTP on 80, HTTPS on 443, Admin on 2019 - Production: HTTP on 80, HTTPS on 443, Admin on 2019
- All ports use `PublishMode: 'host'` for direct binding - All ports use `PublishMode: 'host'` for direct binding
**Log Receiver:** **Log Receiver:**
- Caddy sends access logs to `tcp/172.17.0.1:9999` (Docker bridge gateway) - Caddy sends access logs to `tcp/172.17.0.1:9999` (Docker bridge gateway)
- `CaddyLogReceiver` on host receives and processes logs - `CaddyLogReceiver` on host receives and processes logs

137
readme.md
View File

@@ -22,6 +22,7 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
## Features ✨ ## Features ✨
### Core Platform ### Core Platform
- 🐳 **Docker Swarm Management** - Deploy, scale, and orchestrate services with Swarm mode - 🐳 **Docker Swarm Management** - Deploy, scale, and orchestrate services with Swarm mode
- 🌐 **Caddy Reverse Proxy** - Production-grade proxy running as Docker service with SNI, HTTP/2, HTTP/3 - 🌐 **Caddy Reverse Proxy** - Production-grade proxy running as Docker service with SNI, HTTP/2, HTTP/3
- 🔒 **Automatic SSL Certificates** - Let's Encrypt integration with hot-reload and renewal monitoring - 🔒 **Automatic SSL Certificates** - Let's Encrypt integration with hot-reload and renewal monitoring
@@ -30,6 +31,7 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
- 🔄 **Real-time WebSocket Updates** - Live service status, logs, and system events - 🔄 **Real-time WebSocket Updates** - Live service status, logs, and system events
### Monitoring & Management ### Monitoring & Management
- 📊 **Metrics Collection** - Historical CPU, memory, and network stats (every 60s) - 📊 **Metrics Collection** - Historical CPU, memory, and network stats (every 60s)
- 📝 **Centralized Logging** - Container logs with streaming and retention policies - 📝 **Centralized Logging** - Container logs with streaming and retention policies
- 🎨 **Angular Web UI** - Modern, responsive interface with real-time updates - 🎨 **Angular Web UI** - Modern, responsive interface with real-time updates
@@ -37,6 +39,7 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
- 💾 **SQLite Database** - Embedded, zero-configuration storage - 💾 **SQLite Database** - Embedded, zero-configuration storage
### Developer Experience ### Developer Experience
- 🚀 **Auto-update on Push** - Push to registry and services update automatically - 🚀 **Auto-update on Push** - Push to registry and services update automatically
- 🔐 **Private Registry Support** - Use Docker Hub, Gitea, or custom registries - 🔐 **Private Registry Support** - Use Docker Hub, Gitea, or custom registries
- 🔄 **Systemd Integration** - Run as a daemon with auto-restart - 🔄 **Systemd Integration** - Run as a daemon with auto-restart
@@ -47,10 +50,11 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
### Installation ### Installation
```bash ```bash
# Download the latest release for your platform # One-line install (recommended)
curl -sSL https://code.foss.global/serve.zone/onebox/releases/latest/download/onebox-linux-x64 -o onebox curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
chmod +x onebox
sudo mv onebox /usr/local/bin/ # Install a specific version
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0
# Or install from npm # Or install from npm
pnpm install -g @serve.zone/onebox pnpm install -g @serve.zone/onebox
@@ -74,6 +78,7 @@ onebox service add myapp \
Open `http://localhost:3000` in your browser. Open `http://localhost:3000` in your browser.
**Default credentials:** **Default credentials:**
- Username: `admin` - Username: `admin`
- Password: `admin` - Password: `admin`
@@ -129,15 +134,15 @@ Onebox is built with modern technologies for performance and developer experienc
### Core Components ### Core Components
| Component | Description | | Component | Description |
|-----------|-------------| | ----------------------- | -------------------------------------------------------------------- |
| **Deno Runtime** | Modern TypeScript with built-in security | | **Deno Runtime** | Modern TypeScript with built-in security |
| **Caddy Reverse Proxy** | Docker Swarm service with HTTP/2, HTTP/3, SNI, and WebSocket support | | **Caddy Reverse Proxy** | Docker Swarm service with HTTP/2, HTTP/3, SNI, and WebSocket support |
| **Docker Swarm** | Container orchestration (all workloads run as services) | | **Docker Swarm** | Container orchestration (all workloads run as services) |
| **SQLite Database** | Configuration, metrics, and user data | | **SQLite Database** | Configuration, metrics, and user data |
| **WebSocket Server** | Real-time bidirectional communication | | **WebSocket Server** | Real-time bidirectional communication |
| **Let's Encrypt** | Automatic SSL certificate management | | **Let's Encrypt** | Automatic SSL certificate management |
| **Cloudflare API** | DNS record automation | | **Cloudflare API** | DNS record automation |
## CLI Reference 📖 ## CLI Reference 📖
@@ -242,6 +247,13 @@ onebox config set cloudflareZoneID your-zone-id
onebox status onebox status
``` ```
### Upgrade
```bash
# Upgrade to the latest version (requires root)
sudo onebox upgrade
```
## Configuration 🔧 ## Configuration 🔧
### System Requirements ### System Requirements
@@ -254,11 +266,11 @@ onebox status
### Data Locations ### Data Locations
| Data | Location | | Data | Location |
|------|----------| | -------------------- | ------------------------------ |
| **Database** | `./onebox.db` (or custom path) | | **Database** | `./onebox.db` (or custom path) |
| **SSL Certificates** | Managed by CertManager | | **SSL Certificates** | Managed by CertManager |
| **Registry Data** | `./.nogit/registry-data` | | **Registry Data** | `./.nogit/registry-data` |
### Environment Variables ### Environment Variables
@@ -347,62 +359,69 @@ onebox/
The HTTP server exposes a comprehensive REST API: The HTTP server exposes a comprehensive REST API:
#### Authentication #### Authentication
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| ------ | ----------------- | ----------------------------------- |
| `POST` | `/api/auth/login` | User authentication (returns token) | | `POST` | `/api/auth/login` | User authentication (returns token) |
#### Services #### Services
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/services` | List all services | | -------- | --------------------------------- | ------------------------- |
| `POST` | `/api/services` | Create/deploy service | | `GET` | `/api/services` | List all services |
| `GET` | `/api/services/:name` | Get service details | | `POST` | `/api/services` | Create/deploy service |
| `PUT` | `/api/services/:name` | Update service | | `GET` | `/api/services/:name` | Get service details |
| `DELETE` | `/api/services/:name` | Delete service | | `PUT` | `/api/services/:name` | Update service |
| `POST` | `/api/services/:name/start` | Start service | | `DELETE` | `/api/services/:name` | Delete service |
| `POST` | `/api/services/:name/stop` | Stop service | | `POST` | `/api/services/:name/start` | Start service |
| `POST` | `/api/services/:name/restart` | Restart service | | `POST` | `/api/services/:name/stop` | Stop service |
| `GET` | `/api/services/:name/logs` | Get service logs | | `POST` | `/api/services/:name/restart` | Restart service |
| `WS` | `/api/services/:name/logs/stream` | Stream logs via WebSocket | | `GET` | `/api/services/:name/logs` | Get service logs |
| `WS` | `/api/services/:name/logs/stream` | Stream logs via WebSocket |
#### SSL Certificates #### SSL Certificates
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/ssl/list` | List all certificates | | ------ | ------------------------ | ----------------------- |
| `GET` | `/api/ssl/:domain` | Get certificate details | | `GET` | `/api/ssl/list` | List all certificates |
| `POST` | `/api/ssl/obtain` | Request new certificate | | `GET` | `/api/ssl/:domain` | Get certificate details |
| `POST` | `/api/ssl/obtain` | Request new certificate |
| `POST` | `/api/ssl/:domain/renew` | Force renew certificate | | `POST` | `/api/ssl/:domain/renew` | Force renew certificate |
#### Domains #### Domains
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/domains` | List all domains | | ------ | ---------------------- | ---------------------------- |
| `GET` | `/api/domains/:domain` | Get domain details | | `GET` | `/api/domains` | List all domains |
| `POST` | `/api/domains/sync` | Sync domains from Cloudflare | | `GET` | `/api/domains/:domain` | Get domain details |
| `POST` | `/api/domains/sync` | Sync domains from Cloudflare |
#### DNS Records #### DNS Records
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/dns` | List DNS records | | -------- | ------------------ | ------------------------ |
| `POST` | `/api/dns` | Create DNS record | | `GET` | `/api/dns` | List DNS records |
| `DELETE` | `/api/dns/:domain` | Delete DNS record | | `POST` | `/api/dns` | Create DNS record |
| `POST` | `/api/dns/sync` | Sync DNS from Cloudflare | | `DELETE` | `/api/dns/:domain` | Delete DNS record |
| `POST` | `/api/dns/sync` | Sync DNS from Cloudflare |
#### Registry #### Registry
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/registry/tags/:service` | Get registry tags for service | | -------- | ----------------------------- | ----------------------------- |
| `GET` | `/api/registry/tokens` | List registry tokens | | `GET` | `/api/registry/tags/:service` | Get registry tags for service |
| `POST` | `/api/registry/tokens` | Create registry token | | `GET` | `/api/registry/tokens` | List registry tokens |
| `DELETE` | `/api/registry/tokens/:id` | Delete registry token | | `POST` | `/api/registry/tokens` | Create registry token |
| `DELETE` | `/api/registry/tokens/:id` | Delete registry token |
#### System #### System
| Method | Endpoint | Description |
|--------|----------|-------------| | Method | Endpoint | Description |
| `GET` | `/api/status` | System status | | ------ | --------------- | ------------------------------- |
| `GET` | `/api/settings` | Get settings | | `GET` | `/api/status` | System status |
| `PUT` | `/api/settings` | Update settings | | `GET` | `/api/settings` | Get settings |
| `WS` | `/api/ws` | WebSocket for real-time updates | | `PUT` | `/api/settings` | Update settings |
| `WS` | `/api/ws` | WebSocket for real-time updates |
### WebSocket Messages ### WebSocket Messages

View File

@@ -1,56 +0,0 @@
#!/bin/bash
#
# Compile Onebox for all platforms
#
set -e
VERSION=$(grep '"version"' deno.json | cut -d'"' -f4)
echo "Compiling Onebox v${VERSION} for all platforms..."
# Create dist directory
mkdir -p dist/binaries
# Compile for each platform
echo "Compiling for Linux x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-linux-x64" \
--target x86_64-unknown-linux-gnu \
mod.ts
echo "Compiling for Linux ARM64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-linux-arm64" \
--target aarch64-unknown-linux-gnu \
mod.ts
echo "Compiling for macOS x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-macos-x64" \
--target x86_64-apple-darwin \
mod.ts
echo "Compiling for macOS ARM64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-macos-arm64" \
--target aarch64-apple-darwin \
mod.ts
echo "Compiling for Windows x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-windows-x64.exe" \
--target x86_64-pc-windows-msvc \
mod.ts
echo ""
echo "✓ Compilation complete!"
echo ""
echo "Binaries:"
ls -lh dist/binaries/
echo ""
echo "Next steps:"
echo "1. Test binaries on their respective platforms"
echo "2. Create git tag: git tag v${VERSION}"
echo "3. Push tag: git push origin v${VERSION}"
echo "4. Upload binaries to Gitea release"
echo "5. Publish to npm: pnpm publish"

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@serve.zone/onebox', name: '@serve.zone/onebox',
version: '1.8.0', version: '1.24.1',
description: 'Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers' description: 'Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers'
} }

View File

@@ -0,0 +1,73 @@
/**
* App Store type definitions
*/
export interface ICatalog {
schemaVersion: number;
updatedAt: string;
apps: ICatalogApp[];
}
export interface ICatalogApp {
id: string;
name: string;
description: string;
category: string;
iconName?: string;
iconUrl?: string;
latestVersion: string;
tags?: string[];
}
export interface IAppMeta {
id: string;
name: string;
description: string;
category: string;
iconName?: string;
latestVersion: string;
versions: string[];
maintainer?: string;
links?: Record<string, string>;
}
export interface IAppVersionConfig {
image: string;
port: number;
envVars?: Array<{ key: string; value: string; description: string; required?: boolean }>;
volumes?: string[];
platformRequirements?: {
mongodb?: boolean;
s3?: boolean;
clickhouse?: boolean;
redis?: boolean;
mariadb?: boolean;
};
minOneboxVersion?: string;
}
export interface IMigrationContext {
service: {
name: string;
image: string;
envVars: Record<string, string>;
port: number;
};
fromVersion: string;
toVersion: string;
}
export interface IMigrationResult {
success: boolean;
envVars?: Record<string, string>;
image?: string;
warnings: string[];
}
export interface IUpgradeableService {
serviceName: string;
appTemplateId: string;
currentVersion: string;
latestVersion: string;
hasMigration: boolean;
}

335
ts/classes/appstore.ts Normal file
View File

@@ -0,0 +1,335 @@
/**
* App Store Manager
* Fetches, caches, and serves app templates from the remote appstore-apptemplates repo.
* The remote repo is the single source of truth — no fallback catalog.
*/
import type {
ICatalog,
ICatalogApp,
IAppMeta,
IAppVersionConfig,
IMigrationContext,
IMigrationResult,
IUpgradeableService,
} from './appstore-types.ts';
import { logger } from '../logging.ts';
import { getErrorMessage } from '../utils/error.ts';
import type { Onebox } from './onebox.ts';
import type { IService } from '../types.ts';
export class AppStoreManager {
private oneboxRef: Onebox;
private catalogCache: ICatalog | null = null;
private lastFetchTime = 0;
private readonly repoBaseUrl = 'https://code.foss.global/serve.zone/appstore-apptemplates/raw/branch/main';
private readonly cacheTtlMs = 5 * 60 * 1000; // 5 minutes
constructor(oneboxRef: Onebox) {
this.oneboxRef = oneboxRef;
}
async init(): Promise<void> {
try {
await this.getCatalog();
logger.info(`App Store initialized with ${this.catalogCache?.apps.length || 0} templates`);
} catch (error) {
logger.warn(`App Store initialization failed: ${getErrorMessage(error)}`);
logger.warn('App Store will retry on next request');
}
}
/**
* Get the catalog (cached, refreshes after TTL)
*/
async getCatalog(): Promise<ICatalog> {
const now = Date.now();
if (this.catalogCache && (now - this.lastFetchTime) < this.cacheTtlMs) {
return this.catalogCache;
}
try {
const catalog = await this.fetchJson('catalog.json') as ICatalog;
if (catalog && catalog.apps && Array.isArray(catalog.apps)) {
this.catalogCache = catalog;
this.lastFetchTime = now;
return catalog;
}
throw new Error('Invalid catalog format');
} catch (error) {
logger.warn(`Failed to fetch remote catalog: ${getErrorMessage(error)}`);
// Return cached if available, otherwise return empty catalog
if (this.catalogCache) {
return this.catalogCache;
}
return { schemaVersion: 1, updatedAt: '', apps: [] };
}
}
/**
* Get the catalog apps list (convenience method for the API)
*/
async getApps(): Promise<ICatalogApp[]> {
const catalog = await this.getCatalog();
return catalog.apps;
}
/**
* Fetch app metadata (versions list, etc.)
*/
async getAppMeta(appId: string): Promise<IAppMeta> {
try {
return await this.fetchJson(`apps/${appId}/app.json`) as IAppMeta;
} catch (error) {
throw new Error(`Failed to fetch metadata for app '${appId}': ${getErrorMessage(error)}`);
}
}
/**
* Fetch full config for an app version
*/
async getAppVersionConfig(appId: string, version: string): Promise<IAppVersionConfig> {
try {
return await this.fetchJson(`apps/${appId}/versions/${version}/config.json`) as IAppVersionConfig;
} catch (error) {
throw new Error(`Failed to fetch config for ${appId}@${version}: ${getErrorMessage(error)}`);
}
}
/**
* Compare deployed services against catalog to find those with available upgrades
*/
async getUpgradeableServices(): Promise<IUpgradeableService[]> {
const catalog = await this.getCatalog();
const services = this.oneboxRef.database.getAllServices();
const upgradeable: IUpgradeableService[] = [];
for (const service of services) {
if (!service.appTemplateId || !service.appTemplateVersion) continue;
const catalogApp = catalog.apps.find(a => a.id === service.appTemplateId);
if (!catalogApp) continue;
if (catalogApp.latestVersion !== service.appTemplateVersion) {
// Check if a migration script exists
const hasMigration = await this.hasMigrationScript(
service.appTemplateId,
service.appTemplateVersion,
catalogApp.latestVersion,
);
upgradeable.push({
serviceName: service.name,
appTemplateId: service.appTemplateId,
currentVersion: service.appTemplateVersion,
latestVersion: catalogApp.latestVersion,
hasMigration,
});
}
}
return upgradeable;
}
/**
* Check if a migration script exists for a specific version transition
*/
async hasMigrationScript(appId: string, fromVersion: string, toVersion: string): Promise<boolean> {
try {
const scriptPath = `apps/${appId}/versions/${toVersion}/migrate-from-${fromVersion}.ts`;
await this.fetchText(scriptPath);
return true;
} catch {
return false;
}
}
/**
* Execute a migration in a sandboxed Deno child process
*/
async executeMigration(service: IService, fromVersion: string, toVersion: string): Promise<IMigrationResult> {
const appId = service.appTemplateId;
if (!appId) {
throw new Error('Service has no appTemplateId');
}
// Fetch the migration script
const scriptPath = `apps/${appId}/versions/${toVersion}/migrate-from-${fromVersion}.ts`;
let scriptContent: string;
try {
scriptContent = await this.fetchText(scriptPath);
} catch {
// No migration script — do a simple config-based upgrade
logger.info(`No migration script for ${appId} ${fromVersion} -> ${toVersion}, using config-only upgrade`);
const config = await this.getAppVersionConfig(appId, toVersion);
return {
success: true,
image: config.image,
envVars: undefined, // Keep existing env vars
warnings: [],
};
}
// Write to temp file
const tempFile = `/tmp/onebox-migration-${crypto.randomUUID()}.ts`;
await Deno.writeTextFile(tempFile, scriptContent);
try {
// Prepare context
const context: IMigrationContext = {
service: {
name: service.name,
image: service.image,
envVars: service.envVars,
port: service.port,
},
fromVersion,
toVersion,
};
// Execute in sandboxed Deno child process
const cmd = new Deno.Command('deno', {
args: ['run', '--allow-env', '--allow-net=none', '--allow-read=none', '--allow-write=none', tempFile],
stdin: 'piped',
stdout: 'piped',
stderr: 'piped',
});
const child = cmd.spawn();
// Write context to stdin
const writer = child.stdin.getWriter();
await writer.write(new TextEncoder().encode(JSON.stringify(context)));
await writer.close();
// Read result
const output = await child.output();
const exitCode = output.code;
const stdout = new TextDecoder().decode(output.stdout);
const stderr = new TextDecoder().decode(output.stderr);
if (exitCode !== 0) {
logger.error(`Migration script failed (exit ${exitCode}): ${stderr.substring(0, 500)}`);
return {
success: false,
warnings: [`Migration script failed: ${stderr.substring(0, 200)}`],
};
}
// Parse result from stdout
try {
const result = JSON.parse(stdout) as IMigrationResult;
result.success = true;
return result;
} catch {
logger.error(`Failed to parse migration output: ${stdout.substring(0, 200)}`);
return {
success: false,
warnings: ['Migration script produced invalid output'],
};
}
} finally {
// Cleanup temp file
try {
await Deno.remove(tempFile);
} catch {
// Ignore cleanup errors
}
}
}
/**
* Apply an upgrade: update image, env vars, recreate container
*/
async applyUpgrade(
serviceName: string,
migrationResult: IMigrationResult,
newVersion: string,
): Promise<IService> {
const service = this.oneboxRef.database.getServiceByName(serviceName);
if (!service) {
throw new Error(`Service not found: ${serviceName}`);
}
// Stop the existing container
if (service.containerID && service.status === 'running') {
await this.oneboxRef.services.stopService(serviceName);
}
// Update service record
const updates: Partial<IService> = {
appTemplateVersion: newVersion,
};
if (migrationResult.image) {
updates.image = migrationResult.image;
}
if (migrationResult.envVars) {
// Merge: migration result provides base, user overrides preserved
const mergedEnvVars = { ...migrationResult.envVars };
// Keep any user-set env vars that aren't in the migration result
for (const [key, value] of Object.entries(service.envVars)) {
if (!(key in mergedEnvVars)) {
mergedEnvVars[key] = value;
}
}
updates.envVars = mergedEnvVars;
}
this.oneboxRef.database.updateService(service.id!, updates);
// Pull new image if changed
const newImage = migrationResult.image || service.image;
if (migrationResult.image && migrationResult.image !== service.image) {
await this.oneboxRef.docker.pullImage(newImage);
}
// Recreate and start container
const updatedService = this.oneboxRef.database.getServiceByName(serviceName)!;
// Remove old container
if (service.containerID) {
try {
await this.oneboxRef.docker.removeContainer(service.containerID, true);
} catch {
// Container might already be gone
}
}
// Create new container
const containerID = await this.oneboxRef.docker.createContainer(updatedService);
this.oneboxRef.database.updateService(service.id!, { containerID, status: 'starting' });
// Start container
await this.oneboxRef.docker.startContainer(containerID);
this.oneboxRef.database.updateService(service.id!, { status: 'running' });
logger.success(`Service '${serviceName}' upgraded to template version ${newVersion}`);
return this.oneboxRef.database.getServiceByName(serviceName)!;
}
/**
* Fetch JSON from the remote repo
*/
private async fetchJson(path: string): Promise<unknown> {
const url = `${this.repoBaseUrl}/${path}`;
const response = await fetch(url);
if (!response.ok) {
throw new Error(`HTTP ${response.status} for ${url}`);
}
return response.json();
}
/**
* Fetch text from the remote repo
*/
private async fetchText(path: string): Promise<string> {
const url = `${this.repoBaseUrl}/${path}`;
const response = await fetch(url);
if (!response.ok) {
throw new Error(`HTTP ${response.status} for ${url}`);
}
return response.text();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -59,6 +59,15 @@ export class BackupScheduler {
await this.registerTask(schedule); await this.registerTask(schedule);
} }
// Add periodic archive prune task (runs daily at 3 AM)
const pruneTask = new plugins.taskbuffer.Task({
name: 'backup-archive-prune',
taskFunction: async () => {
await this.pruneArchive();
},
});
this.taskManager.addAndScheduleTask(pruneTask, '0 3 * * *');
// Start the task manager (activates cron scheduling) // Start the task manager (activates cron scheduling)
await this.taskManager.start(); await this.taskManager.start();
@@ -436,9 +445,11 @@ export class BackupScheduler {
if (!toKeep.has(backup.id!)) { if (!toKeep.has(backup.id!)) {
try { try {
await this.oneboxRef.backupManager.deleteBackup(backup.id!); await this.oneboxRef.backupManager.deleteBackup(backup.id!);
logger.info(`Deleted backup ${backup.filename} (retention policy)`); const backupRef = backup.snapshotId || backup.filename;
logger.info(`Deleted backup ${backupRef} (retention policy)`);
} catch (error) { } catch (error) {
logger.warn(`Failed to delete old backup ${backup.filename}: ${getErrorMessage(error)}`); const backupRef = backup.snapshotId || backup.filename;
logger.warn(`Failed to delete old backup ${backupRef}: ${getErrorMessage(error)}`);
} }
} }
} }
@@ -647,4 +658,48 @@ export class BackupScheduler {
private getRetentionDescription(retention: IRetentionPolicy): string { private getRetentionDescription(retention: IRetentionPolicy): string {
return `H:${retention.hourly} D:${retention.daily} W:${retention.weekly} M:${retention.monthly}`; return `H:${retention.hourly} D:${retention.daily} W:${retention.weekly} M:${retention.monthly}`;
} }
/**
* Prune the containerarchive repository to reclaim storage.
* Uses the most generous retention policy across all schedules.
*/
private async pruneArchive(): Promise<void> {
const archive = this.oneboxRef.backupManager.archive;
if (!archive) return;
try {
// Compute the most generous retention across all schedules
const schedules = this.oneboxRef.database.getAllBackupSchedules();
// Default minimums if no schedules exist
let maxDays = 7;
let maxWeeks = 4;
let maxMonths = 12;
for (const schedule of schedules) {
if (schedule.retention.daily > maxDays) maxDays = schedule.retention.daily;
if (schedule.retention.weekly > maxWeeks) maxWeeks = schedule.retention.weekly;
if (schedule.retention.monthly > maxMonths) maxMonths = schedule.retention.monthly;
}
const result = await archive.prune(
{
keepDays: maxDays,
keepWeeks: maxWeeks,
keepMonths: maxMonths,
},
false, // not dry run
);
if (result.removedSnapshots > 0 || result.freedBytes > 0) {
const freedMB = Math.round(result.freedBytes / (1024 * 1024) * 10) / 10;
logger.info(
`Archive prune: removed ${result.removedSnapshots} snapshot(s), ` +
`${result.removedPacks} pack(s), freed ${freedMB} MB`
);
}
} catch (error) {
logger.warn(`Archive prune failed: ${getErrorMessage(error)}`);
}
}
} }

View File

@@ -4,9 +4,7 @@
* Handles background monitoring, metrics collection, and automatic tasks * Handles background monitoring, metrics collection, and automatic tasks
*/ */
import * as plugins from '../plugins.ts';
import { logger } from '../logging.ts'; import { logger } from '../logging.ts';
import { projectInfo } from '../info.ts';
import { getErrorMessage } from '../utils/error.ts'; import { getErrorMessage } from '../utils/error.ts';
import type { Onebox } from './onebox.ts'; import type { Onebox } from './onebox.ts';
@@ -18,7 +16,6 @@ const FALLBACK_PID_FILE = `${FALLBACK_PID_DIR}/onebox.pid`;
export class OneboxDaemon { export class OneboxDaemon {
private oneboxRef: Onebox; private oneboxRef: Onebox;
private smartdaemon: plugins.smartdaemon.SmartDaemon | null = null;
private running = false; private running = false;
private monitoringInterval: number | null = null; private monitoringInterval: number | null = null;
private statsInterval: number | null = null; private statsInterval: number | null = null;
@@ -46,68 +43,6 @@ export class OneboxDaemon {
} }
} }
/**
* Install systemd service
*/
async installService(): Promise<void> {
try {
logger.info('Installing Onebox daemon service...');
// Initialize smartdaemon if needed
if (!this.smartdaemon) {
this.smartdaemon = new plugins.smartdaemon.SmartDaemon();
}
// Get installation directory
const execPath = Deno.execPath();
const service = await this.smartdaemon.addService({
name: 'onebox',
version: projectInfo.version,
command: `${execPath} run --allow-all ${Deno.cwd()}/mod.ts daemon start`,
description: 'Onebox - Self-hosted container platform',
workingDir: Deno.cwd(),
});
await service.save();
await service.enable();
logger.success('Onebox daemon service installed');
logger.info('Start with: sudo systemctl start smartdaemon_onebox');
} catch (error) {
logger.error(`Failed to install daemon service: ${getErrorMessage(error)}`);
throw error;
}
}
/**
* Uninstall systemd service
*/
async uninstallService(): Promise<void> {
try {
logger.info('Uninstalling Onebox daemon service...');
// Initialize smartdaemon if needed
if (!this.smartdaemon) {
this.smartdaemon = new plugins.smartdaemon.SmartDaemon();
}
const services = await this.smartdaemon.systemdManager.getServices();
const service = services.find(s => s.name === 'onebox');
if (service) {
await service.stop();
await service.disable();
await service.delete();
}
logger.success('Onebox daemon service uninstalled');
} catch (error) {
logger.error(`Failed to uninstall daemon service: ${getErrorMessage(error)}`);
throw error;
}
}
/** /**
* Start daemon mode (background monitoring) * Start daemon mode (background monitoring)
*/ */
@@ -131,9 +66,9 @@ export class OneboxDaemon {
// Start monitoring loop // Start monitoring loop
this.startMonitoring(); this.startMonitoring();
// Start HTTP server // Start OpsServer (serves new UI + TypedRequest API)
const httpPort = parseInt(this.oneboxRef.database.getSetting('httpPort') || '3000', 10); const httpPort = parseInt(this.oneboxRef.database.getSetting('httpPort') || '3000', 10);
await this.oneboxRef.httpServer.start(httpPort); await this.oneboxRef.opsServer.start(httpPort);
logger.success('Onebox daemon started'); logger.success('Onebox daemon started');
logger.info(`Web UI available at http://localhost:${httpPort}`); logger.info(`Web UI available at http://localhost:${httpPort}`);
@@ -163,8 +98,8 @@ export class OneboxDaemon {
// Stop monitoring // Stop monitoring
this.stopMonitoring(); this.stopMonitoring();
// Stop HTTP server // Stop OpsServer
await this.oneboxRef.httpServer.stop(); await this.oneboxRef.opsServer.stop();
// Remove PID file // Remove PID file
await this.removePidFile(); await this.removePidFile();
@@ -280,31 +215,12 @@ export class OneboxDaemon {
} }
/** /**
* Broadcast stats to WebSocket clients (real-time updates) * Broadcast stats (placeholder for future WebSocket integration via OpsServer)
*/ */
private async broadcastStats(): Promise<void> { private async broadcastStats(): Promise<void> {
try { // Stats broadcasting via WebSocket is not yet implemented in OpsServer.
const services = this.oneboxRef.services.listServices(); // Metrics are still collected and stored in the DB by collectMetrics().
const runningServices = services.filter(s => s.status === 'running' && s.containerID); // The new UI fetches stats via TypedRequests on demand.
logger.info(`Broadcasting stats for ${runningServices.length} running services`);
for (const service of runningServices) {
try {
const stats = await this.oneboxRef.docker.getContainerStats(service.containerID!);
if (stats) {
logger.info(`Broadcasting stats for ${service.name}: CPU=${stats.cpuPercent.toFixed(1)}%, Mem=${Math.round(stats.memoryUsed / 1024 / 1024)}MB`);
this.oneboxRef.httpServer.broadcastStatsUpdate(service.name, stats);
} else {
logger.warn(`No stats returned for ${service.name} (containerID: ${service.containerID})`);
}
} catch (error) {
logger.warn(`Stats collection failed for ${service.name}: ${getErrorMessage(error)}`);
}
}
} catch (error) {
logger.error(`Broadcast stats error: ${getErrorMessage(error)}`);
}
} }
/** /**
@@ -501,36 +417,7 @@ export class OneboxDaemon {
static async ensureNoDaemon(): Promise<void> { static async ensureNoDaemon(): Promise<void> {
const running = await OneboxDaemon.isDaemonRunning(); const running = await OneboxDaemon.isDaemonRunning();
if (running) { if (running) {
throw new Error('Daemon is already running. Please stop it first with: onebox daemon stop'); throw new Error('Daemon is already running. Please stop it first with: onebox systemd stop');
}
}
/**
* Get service status from systemd
*/
async getServiceStatus(): Promise<string> {
try {
// Don't need smartdaemon to check status, just use systemctl directly
const command = new Deno.Command('systemctl', {
args: ['status', 'smartdaemon_onebox'],
stdout: 'piped',
stderr: 'piped',
});
const { code, stdout } = await command.output();
const output = new TextDecoder().decode(stdout);
if (code === 0 || output.includes('active (running)')) {
return 'running';
} else if (output.includes('inactive') || output.includes('dead')) {
return 'stopped';
} else if (output.includes('failed')) {
return 'failed';
} else {
return 'unknown';
}
} catch (error) {
return 'not-installed';
} }
} }
} }

View File

@@ -596,18 +596,26 @@ export class OneboxDockerManager {
async getContainerStats(containerID: string): Promise<IContainerStats | null> { async getContainerStats(containerID: string): Promise<IContainerStats | null> {
try { try {
// Try to get container directly first // Try to get container directly first
let container = await this.dockerClient!.getContainerById(containerID); let container: any = null;
try {
container = await this.dockerClient!.getContainerById(containerID);
} catch {
// Container not found by ID — might be a Swarm service ID
}
// If not found, it might be a service ID - try to get the actual container ID // If not found, it might be a service ID - try to get the actual container ID
if (!container) { if (!container) {
const serviceContainerId = await this.getContainerIdForService(containerID); const serviceContainerId = await this.getContainerIdForService(containerID);
if (serviceContainerId) { if (serviceContainerId) {
container = await this.dockerClient!.getContainerById(serviceContainerId); try {
container = await this.dockerClient!.getContainerById(serviceContainerId);
} catch {
// Service container also not found
}
} }
} }
if (!container) { if (!container) {
// Container/service not found
return null; return null;
} }
@@ -849,7 +857,23 @@ export class OneboxDockerManager {
cmd: string[] cmd: string[]
): Promise<{ stdout: string; stderr: string; exitCode: number }> { ): Promise<{ stdout: string; stderr: string; exitCode: number }> {
try { try {
const container = await this.dockerClient!.getContainerById(containerID); let container: any = null;
try {
container = await this.dockerClient!.getContainerById(containerID);
} catch {
// Not a direct container ID — try Swarm service lookup
}
if (!container) {
const serviceContainerId = await this.getContainerIdForService(containerID);
if (serviceContainerId) {
try {
container = await this.dockerClient!.getContainerById(serviceContainerId);
} catch {
// Service container also not found
}
}
}
if (!container) { if (!container) {
throw new Error(`Container not found: ${containerID}`); throw new Error(`Container not found: ${containerID}`);
@@ -881,12 +905,12 @@ export class OneboxDockerManager {
]); ]);
const execInfo = await inspect(); const execInfo = await inspect();
const exitCode = execInfo.ExitCode || 0; const exitCode = execInfo.ExitCode ?? -1;
return { stdout, stderr, exitCode }; return { stdout, stderr, exitCode };
} catch (error) { } catch (error) {
logger.error(`Failed to exec in container ${containerID}: ${getErrorMessage(error)}`); logger.error(`Failed to exec in container ${containerID}: ${getErrorMessage(error)}`);
throw error; return { stdout: '', stderr: getErrorMessage(error), exitCode: -1 };
} }
} }
@@ -1011,7 +1035,23 @@ export class OneboxDockerManager {
callback: (line: string, isError: boolean) => void callback: (line: string, isError: boolean) => void
): Promise<void> { ): Promise<void> {
try { try {
const container = await this.dockerClient!.getContainerById(containerID); let container: any = null;
try {
container = await this.dockerClient!.getContainerById(containerID);
} catch {
// Not a direct container ID — try Swarm service lookup
}
if (!container) {
const serviceContainerId = await this.getContainerIdForService(containerID);
if (serviceContainerId) {
try {
container = await this.dockerClient!.getContainerById(serviceContainerId);
} catch {
// Service container also not found
}
}
}
if (!container) { if (!container) {
throw new Error(`Container not found: ${containerID}`); throw new Error(`Container not found: ${containerID}`);

View File

@@ -347,6 +347,8 @@ export class OneboxHttpServer {
return await this.handleDeleteBackupRequest(backupId); return await this.handleDeleteBackupRequest(backupId);
} else if (path === '/api/backups/restore' && method === 'POST') { } else if (path === '/api/backups/restore' && method === 'POST') {
return await this.handleRestoreBackupRequest(req); return await this.handleRestoreBackupRequest(req);
} else if (path === '/api/backups/import' && method === 'POST') {
return await this.handleImportBackupRequest(req);
} else if (path === '/api/settings/backup-password' && method === 'POST') { } else if (path === '/api/settings/backup-password' && method === 'POST') {
return await this.handleSetBackupPasswordRequest(req); return await this.handleSetBackupPasswordRequest(req);
} else if (path === '/api/settings/backup-password' && method === 'GET') { } else if (path === '/api/settings/backup-password' && method === 'GET') {
@@ -2159,27 +2161,47 @@ export class OneboxHttpServer {
*/ */
private async handleDownloadBackupRequest(backupId: number): Promise<Response> { private async handleDownloadBackupRequest(backupId: number): Promise<Response> {
try { try {
const filePath = this.oneboxRef.backupManager.getBackupFilePath(backupId); const backup = this.oneboxRef.database.getBackupById(backupId);
if (!filePath) { if (!backup) {
return this.jsonResponse({ success: false, error: 'Backup not found' }, 404); return this.jsonResponse({ success: false, error: 'Backup not found' }, 404);
} }
let downloadPath: string | null = null;
let tempExport = false;
if (backup.snapshotId) {
// ContainerArchive backup: export as encrypted tar
downloadPath = await this.oneboxRef.backupManager.getBackupExportPath(backupId);
tempExport = true;
} else {
// Legacy file-based backup
downloadPath = this.oneboxRef.backupManager.getBackupFilePath(backupId);
}
if (!downloadPath) {
return this.jsonResponse({ success: false, error: 'Backup file not available' }, 404);
}
// Check if file exists // Check if file exists
try { try {
await Deno.stat(filePath); await Deno.stat(downloadPath);
} catch { } catch {
return this.jsonResponse({ success: false, error: 'Backup file not found on disk' }, 404); return this.jsonResponse({ success: false, error: 'Backup file not found on disk' }, 404);
} }
// Read file and return as download const file = await Deno.readFile(downloadPath);
const backup = this.oneboxRef.database.getBackupById(backupId); const filename = backup.filename || `${backup.serviceName}-${backup.createdAt}.tar.enc`;
const file = await Deno.readFile(filePath);
// Clean up temp export file
if (tempExport) {
try { await Deno.remove(downloadPath); } catch { /* ignore */ }
}
return new Response(file, { return new Response(file, {
status: 200, status: 200,
headers: { headers: {
'Content-Type': 'application/octet-stream', 'Content-Type': 'application/octet-stream',
'Content-Disposition': `attachment; filename="${backup?.filename || 'backup.tar.enc'}"`, 'Content-Disposition': `attachment; filename="${filename}"`,
'Content-Length': String(file.length), 'Content-Length': String(file.length),
}, },
}); });
@@ -2239,12 +2261,6 @@ export class OneboxHttpServer {
}, 400); }, 400);
} }
// Get backup file path
const filePath = this.oneboxRef.backupManager.getBackupFilePath(backupId);
if (!filePath) {
return this.jsonResponse({ success: false, error: 'Backup not found' }, 404);
}
// Validate mode-specific requirements // Validate mode-specific requirements
if ((mode === 'import' || mode === 'clone') && !newServiceName) { if ((mode === 'import' || mode === 'clone') && !newServiceName) {
return this.jsonResponse({ return this.jsonResponse({
@@ -2253,7 +2269,7 @@ export class OneboxHttpServer {
}, 400); }, 400);
} }
const result = await this.oneboxRef.backupManager.restoreBackup(filePath, { const result = await this.oneboxRef.backupManager.restoreBackup(backupId, {
mode, mode,
newServiceName, newServiceName,
overwriteExisting: overwriteExisting === true, overwriteExisting: overwriteExisting === true,
@@ -2278,6 +2294,118 @@ export class OneboxHttpServer {
} }
} }
/**
* Import a backup from file upload or URL
*/
private async handleImportBackupRequest(req: Request): Promise<Response> {
try {
const contentType = req.headers.get('content-type') || '';
let filePath: string | null = null;
let newServiceName: string | undefined;
let tempFile = false;
if (contentType.includes('multipart/form-data')) {
// Handle file upload
const formData = await req.formData();
const file = formData.get('file');
newServiceName = formData.get('newServiceName')?.toString() || undefined;
if (!file || !(file instanceof File)) {
return this.jsonResponse({
success: false,
error: 'No file provided',
}, 400);
}
// Validate file extension
if (!file.name.endsWith('.tar.enc')) {
return this.jsonResponse({
success: false,
error: 'Invalid file format. Expected .tar.enc file',
}, 400);
}
// Save to temp location
const tempDir = './.nogit/temp-imports';
await Deno.mkdir(tempDir, { recursive: true });
filePath = `${tempDir}/${Date.now()}-${file.name}`;
tempFile = true;
const arrayBuffer = await file.arrayBuffer();
await Deno.writeFile(filePath, new Uint8Array(arrayBuffer));
logger.info(`Saved uploaded backup to ${filePath}`);
} else {
// Handle JSON body with URL
const body = await req.json();
const { url, newServiceName: serviceName } = body;
newServiceName = serviceName;
if (!url) {
return this.jsonResponse({
success: false,
error: 'URL is required when not uploading a file',
}, 400);
}
// Download from URL
const tempDir = './.nogit/temp-imports';
await Deno.mkdir(tempDir, { recursive: true });
const urlFilename = url.split('/').pop() || 'backup.tar.enc';
filePath = `${tempDir}/${Date.now()}-${urlFilename}`;
tempFile = true;
logger.info(`Downloading backup from ${url}...`);
const response = await fetch(url);
if (!response.ok) {
return this.jsonResponse({
success: false,
error: `Failed to download from URL: ${response.statusText}`,
}, 400);
}
const arrayBuffer = await response.arrayBuffer();
await Deno.writeFile(filePath, new Uint8Array(arrayBuffer));
logger.info(`Downloaded backup to ${filePath}`);
}
// Import using restoreBackup with mode='import'
const result = await this.oneboxRef.backupManager.restoreBackup(filePath, {
mode: 'import',
newServiceName,
overwriteExisting: false,
skipPlatformData: false,
});
// Clean up temp file
if (tempFile && filePath) {
try {
await Deno.remove(filePath);
} catch {
// Ignore cleanup errors
}
}
return this.jsonResponse({
success: true,
message: `Backup imported successfully as service '${result.service.name}'`,
data: {
service: result.service,
platformResourcesRestored: result.platformResourcesRestored,
warnings: result.warnings,
},
});
} catch (error) {
logger.error(`Failed to import backup: ${getErrorMessage(error)}`);
return this.jsonResponse({
success: false,
error: getErrorMessage(error) || 'Failed to import backup',
}, 500);
}
}
/** /**
* Set backup encryption password * Set backup encryption password
*/ */

View File

@@ -14,14 +14,17 @@ import { OneboxReverseProxy } from './reverseproxy.ts';
import { OneboxDnsManager } from './dns.ts'; import { OneboxDnsManager } from './dns.ts';
import { OneboxSslManager } from './ssl.ts'; import { OneboxSslManager } from './ssl.ts';
import { OneboxDaemon } from './daemon.ts'; import { OneboxDaemon } from './daemon.ts';
import { OneboxSystemd } from './systemd.ts';
import { OneboxHttpServer } from './httpserver.ts'; import { OneboxHttpServer } from './httpserver.ts';
import { CloudflareDomainSync } from './cloudflare-sync.ts'; import { CloudflareDomainSync } from './cloudflare-sync.ts';
import { CertRequirementManager } from './cert-requirement-manager.ts'; import { CertRequirementManager } from './cert-requirement-manager.ts';
import { RegistryManager } from './registry.ts'; import { RegistryManager } from './registry.ts';
import { PlatformServicesManager } from './platform-services/index.ts'; import { PlatformServicesManager } from './platform-services/index.ts';
import { AppStoreManager } from './appstore.ts';
import { CaddyLogReceiver } from './caddy-log-receiver.ts'; import { CaddyLogReceiver } from './caddy-log-receiver.ts';
import { BackupManager } from './backup-manager.ts'; import { BackupManager } from './backup-manager.ts';
import { BackupScheduler } from './backup-scheduler.ts'; import { BackupScheduler } from './backup-scheduler.ts';
import { OpsServer } from '../opsserver/index.ts';
export class Onebox { export class Onebox {
public database: OneboxDatabase; public database: OneboxDatabase;
@@ -32,14 +35,17 @@ export class Onebox {
public dns: OneboxDnsManager; public dns: OneboxDnsManager;
public ssl: OneboxSslManager; public ssl: OneboxSslManager;
public daemon: OneboxDaemon; public daemon: OneboxDaemon;
public systemd: OneboxSystemd;
public httpServer: OneboxHttpServer; public httpServer: OneboxHttpServer;
public cloudflareDomainSync: CloudflareDomainSync; public cloudflareDomainSync: CloudflareDomainSync;
public certRequirementManager: CertRequirementManager; public certRequirementManager: CertRequirementManager;
public registry: RegistryManager; public registry: RegistryManager;
public platformServices: PlatformServicesManager; public platformServices: PlatformServicesManager;
public appStore: AppStoreManager;
public caddyLogReceiver: CaddyLogReceiver; public caddyLogReceiver: CaddyLogReceiver;
public backupManager: BackupManager; public backupManager: BackupManager;
public backupScheduler: BackupScheduler; public backupScheduler: BackupScheduler;
public opsServer: OpsServer;
private initialized = false; private initialized = false;
@@ -55,6 +61,7 @@ export class Onebox {
this.dns = new OneboxDnsManager(this); this.dns = new OneboxDnsManager(this);
this.ssl = new OneboxSslManager(this); this.ssl = new OneboxSslManager(this);
this.daemon = new OneboxDaemon(this); this.daemon = new OneboxDaemon(this);
this.systemd = new OneboxSystemd();
this.httpServer = new OneboxHttpServer(this); this.httpServer = new OneboxHttpServer(this);
this.registry = new RegistryManager({ this.registry = new RegistryManager({
dataDir: './.nogit/registry-data', dataDir: './.nogit/registry-data',
@@ -69,6 +76,9 @@ export class Onebox {
// Initialize platform services manager // Initialize platform services manager
this.platformServices = new PlatformServicesManager(this); this.platformServices = new PlatformServicesManager(this);
// Initialize App Store manager
this.appStore = new AppStoreManager(this);
// Initialize Caddy log receiver // Initialize Caddy log receiver
this.caddyLogReceiver = new CaddyLogReceiver(9999); this.caddyLogReceiver = new CaddyLogReceiver(9999);
@@ -77,6 +87,9 @@ export class Onebox {
// Initialize Backup scheduler // Initialize Backup scheduler
this.backupScheduler = new BackupScheduler(this); this.backupScheduler = new BackupScheduler(this);
// Initialize OpsServer (TypedRequest-based server)
this.opsServer = new OpsServer(this);
} }
/** /**
@@ -165,12 +178,28 @@ export class Onebox {
logger.warn(`Error: ${getErrorMessage(error)}`); logger.warn(`Error: ${getErrorMessage(error)}`);
} }
// Initialize App Store (non-critical)
try {
await this.appStore.init();
} catch (error) {
logger.warn('App Store initialization failed - app templates will be unavailable until reconnected');
logger.warn(`Error: ${getErrorMessage(error)}`);
}
// Login to all registries // Login to all registries
await this.registries.loginToAllRegistries(); await this.registries.loginToAllRegistries();
// Start auto-update monitoring for registry services // Start auto-update monitoring for registry services
this.services.startAutoUpdateMonitoring(); this.services.startAutoUpdateMonitoring();
// Initialize BackupManager (containerarchive repository, non-critical)
try {
await this.backupManager.init();
} catch (error) {
logger.warn('BackupManager initialization failed - backups will be limited');
logger.warn(`Error: ${getErrorMessage(error)}`);
}
// Initialize Backup Scheduler (non-critical) // Initialize Backup Scheduler (non-critical)
try { try {
await this.backupScheduler.init(); await this.backupScheduler.init();
@@ -283,10 +312,65 @@ export class Onebox {
// Sort expiring domains by days remaining (ascending) // Sort expiring domains by days remaining (ascending)
expiringDomains.sort((a, b) => a.daysRemaining - b.daysRemaining); expiringDomains.sort((a, b) => a.daysRemaining - b.daysRemaining);
// Aggregate resource usage across all running service containers
let totalCpu = 0;
let totalMemoryUsed = 0;
let totalMemoryLimit = 0;
let totalNetworkIn = 0;
let totalNetworkOut = 0;
if (dockerRunning) {
const allServices = this.services.listServices();
const runningUserServices = allServices.filter((s) => s.status === 'running' && s.containerID);
logger.debug(`Resource stats: ${runningUserServices.length} running user services`);
const statsPromises = runningUserServices
.map((s) => {
logger.debug(`Fetching stats for user service: ${s.name} (${s.containerID})`);
return this.docker.getContainerStats(s.containerID!).catch((err) => {
logger.debug(`Stats failed for ${s.name}: ${(err as Error).message}`);
return null;
});
});
// Also get stats for platform service containers
const allPlatformServices = this.platformServices.getAllPlatformServices();
const runningPlatformServices = allPlatformServices.filter((s) => s.status === 'running' && s.containerId);
logger.debug(`Resource stats: ${runningPlatformServices.length} running platform services`);
const platformStatsPromises = runningPlatformServices
.map((s) => {
logger.debug(`Fetching stats for platform service: ${s.type} (${s.containerId})`);
return this.docker.getContainerStats(s.containerId!).catch((err) => {
logger.debug(`Stats failed for ${s.type}: ${(err as Error).message}`);
return null;
});
});
const allStats = await Promise.all([...statsPromises, ...platformStatsPromises]);
let successCount = 0;
for (const stats of allStats) {
if (stats) {
successCount++;
totalCpu += stats.cpuPercent;
totalMemoryUsed += stats.memoryUsed;
totalMemoryLimit = Math.max(totalMemoryLimit, stats.memoryLimit);
totalNetworkIn += stats.networkRx;
totalNetworkOut += stats.networkTx;
}
}
logger.debug(`Resource stats: ${successCount}/${allStats.length} containers returned stats. CPU: ${totalCpu}, Mem: ${totalMemoryUsed}`);
}
return { return {
docker: { docker: {
running: dockerRunning, running: dockerRunning,
version: dockerRunning ? await this.docker.getDockerVersion() : null, version: dockerRunning ? await this.docker.getDockerVersion() : null,
cpuUsage: Math.round(totalCpu * 10) / 10,
memoryUsage: totalMemoryUsed,
memoryTotal: totalMemoryLimit,
networkIn: totalNetworkIn,
networkOut: totalNetworkOut,
}, },
reverseProxy: proxyStatus, reverseProxy: proxyStatus,
dns: { dns: {
@@ -316,31 +400,17 @@ export class Onebox {
} }
/** /**
* Start daemon mode * Start OpsServer (TypedRequest-based, serves new UI)
*/
async startDaemon(): Promise<void> {
await this.daemon.start();
}
/**
* Stop daemon mode
*/
async stopDaemon(): Promise<void> {
await this.daemon.stop();
}
/**
* Start HTTP server
*/ */
async startHttpServer(port?: number): Promise<void> { async startHttpServer(port?: number): Promise<void> {
await this.httpServer.start(port); await this.opsServer.start(port || 3000);
} }
/** /**
* Stop HTTP server * Stop OpsServer
*/ */
async stopHttpServer(): Promise<void> { async stopHttpServer(): Promise<void> {
await this.httpServer.stop(); await this.opsServer.stop();
} }
/** /**
@@ -350,14 +420,17 @@ export class Onebox {
try { try {
logger.info('Shutting down Onebox...'); logger.info('Shutting down Onebox...');
// Stop auto-update monitoring
this.services.stopAutoUpdateMonitoring();
// Stop backup scheduler // Stop backup scheduler
await this.backupScheduler.stop(); await this.backupScheduler.stop();
// Stop daemon if running // Stop daemon if running
await this.daemon.stop(); await this.daemon.stop();
// Stop HTTP server if running // Stop OpsServer if running
await this.httpServer.stop(); await this.opsServer.stop();
// Stop reverse proxy if running // Stop reverse proxy if running
await this.reverseProxy.stop(); await this.reverseProxy.stop();
@@ -365,6 +438,9 @@ export class Onebox {
// Stop Caddy log receiver // Stop Caddy log receiver
await this.caddyLogReceiver.stop(); await this.caddyLogReceiver.stop();
// Close backup archive
await this.backupManager.close();
// Close database // Close database
this.database.close(); this.database.close();

View File

@@ -8,3 +8,6 @@ export type { IPlatformServiceProvider } from './providers/base.ts';
export { BasePlatformServiceProvider } from './providers/base.ts'; export { BasePlatformServiceProvider } from './providers/base.ts';
export { MongoDBProvider } from './providers/mongodb.ts'; export { MongoDBProvider } from './providers/mongodb.ts';
export { MinioProvider } from './providers/minio.ts'; export { MinioProvider } from './providers/minio.ts';
export { ClickHouseProvider } from './providers/clickhouse.ts';
export { MariaDBProvider } from './providers/mariadb.ts';
export { RedisProvider } from './providers/redis.ts';

View File

@@ -16,6 +16,8 @@ import { MongoDBProvider } from './providers/mongodb.ts';
import { MinioProvider } from './providers/minio.ts'; import { MinioProvider } from './providers/minio.ts';
import { CaddyProvider } from './providers/caddy.ts'; import { CaddyProvider } from './providers/caddy.ts';
import { ClickHouseProvider } from './providers/clickhouse.ts'; import { ClickHouseProvider } from './providers/clickhouse.ts';
import { MariaDBProvider } from './providers/mariadb.ts';
import { RedisProvider } from './providers/redis.ts';
import { logger } from '../../logging.ts'; import { logger } from '../../logging.ts';
import { getErrorMessage } from '../../utils/error.ts'; import { getErrorMessage } from '../../utils/error.ts';
import { credentialEncryption } from '../encryption.ts'; import { credentialEncryption } from '../encryption.ts';
@@ -41,6 +43,8 @@ export class PlatformServicesManager {
this.registerProvider(new MinioProvider(this.oneboxRef)); this.registerProvider(new MinioProvider(this.oneboxRef));
this.registerProvider(new CaddyProvider(this.oneboxRef)); this.registerProvider(new CaddyProvider(this.oneboxRef));
this.registerProvider(new ClickHouseProvider(this.oneboxRef)); this.registerProvider(new ClickHouseProvider(this.oneboxRef));
this.registerProvider(new MariaDBProvider(this.oneboxRef));
this.registerProvider(new RedisProvider(this.oneboxRef));
logger.info(`Platform services manager initialized with ${this.providers.size} providers`); logger.info(`Platform services manager initialized with ${this.providers.size} providers`);
} }
@@ -304,6 +308,60 @@ export class PlatformServicesManager {
logger.success(`ClickHouse provisioned for service '${service.name}'`); logger.success(`ClickHouse provisioned for service '${service.name}'`);
} }
// Provision Redis if requested
if (requirements.redis) {
logger.info(`Provisioning Redis for service '${service.name}'...`);
// Ensure Redis is running
const redisService = await this.ensureRunning('redis');
const provider = this.providers.get('redis')!;
// Provision cache resource
const result = await provider.provisionResource(service);
// Store resource record
const encryptedCreds = await credentialEncryption.encrypt(result.credentials);
this.oneboxRef.database.createPlatformResource({
platformServiceId: redisService.id!,
serviceId: service.id!,
resourceType: result.type,
resourceName: result.name,
credentialsEncrypted: encryptedCreds,
createdAt: Date.now(),
});
// Merge env vars
Object.assign(allEnvVars, result.envVars);
logger.success(`Redis provisioned for service '${service.name}'`);
}
// Provision MariaDB if requested
if (requirements.mariadb) {
logger.info(`Provisioning MariaDB for service '${service.name}'...`);
// Ensure MariaDB is running
const mariadbService = await this.ensureRunning('mariadb');
const provider = this.providers.get('mariadb')!;
// Provision database
const result = await provider.provisionResource(service);
// Store resource record
const encryptedCreds = await credentialEncryption.encrypt(result.credentials);
this.oneboxRef.database.createPlatformResource({
platformServiceId: mariadbService.id!,
serviceId: service.id!,
resourceType: result.type,
resourceName: result.name,
credentialsEncrypted: encryptedCreds,
createdAt: Date.now(),
});
// Merge env vars
Object.assign(allEnvVars, result.envVars);
logger.success(`MariaDB provisioned for service '${service.name}'`);
}
return allEnvVars; return allEnvVars;
} }

View File

@@ -194,12 +194,6 @@ export class ClickHouseProvider extends BasePlatformServiceProvider {
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const containerName = this.getContainerName(); const containerName = this.getContainerName();
// Get container host port for connection from host (overlay network IPs not accessible from host)
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 8123);
if (!hostPort) {
throw new Error('Could not get ClickHouse container host port');
}
// Generate resource names and credentials // Generate resource names and credentials
const dbName = this.generateResourceName(userService.name); const dbName = this.generateResourceName(userService.name);
const username = this.generateResourceName(userService.name); const username = this.generateResourceName(userService.name);
@@ -207,35 +201,16 @@ export class ClickHouseProvider extends BasePlatformServiceProvider {
logger.info(`Provisioning ClickHouse database '${dbName}' for service '${userService.name}'...`); logger.info(`Provisioning ClickHouse database '${dbName}' for service '${userService.name}'...`);
// Connect to ClickHouse via localhost and the mapped host port // Use docker exec to provision inside the container (avoids host port mapping issues)
const baseUrl = `http://127.0.0.1:${hostPort}`; const queries = [
`CREATE DATABASE IF NOT EXISTS ${dbName}`,
`CREATE USER IF NOT EXISTS ${username} IDENTIFIED BY '${password}'`,
`GRANT ALL ON ${dbName}.* TO ${username}`,
];
// Create database for (const query of queries) {
await this.executeQuery( await this.execClickHouseQuery(platformService.containerId, adminCreds, query);
baseUrl, }
adminCreds.username,
adminCreds.password,
`CREATE DATABASE IF NOT EXISTS ${dbName}`
);
logger.info(`Created ClickHouse database '${dbName}'`);
// Create user with access to this database
await this.executeQuery(
baseUrl,
adminCreds.username,
adminCreds.password,
`CREATE USER IF NOT EXISTS ${username} IDENTIFIED BY '${password}'`
);
logger.info(`Created ClickHouse user '${username}'`);
// Grant permissions on the database
await this.executeQuery(
baseUrl,
adminCreds.username,
adminCreds.password,
`GRANT ALL ON ${dbName}.* TO ${username}`
);
logger.info(`Granted permissions to user '${username}' on database '${dbName}'`);
logger.success(`ClickHouse database '${dbName}' provisioned with user '${username}'`); logger.success(`ClickHouse database '${dbName}' provisioned with user '${username}'`);
@@ -274,37 +249,11 @@ export class ClickHouseProvider extends BasePlatformServiceProvider {
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
// Get container host port for connection from host (overlay network IPs not accessible from host)
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 8123);
if (!hostPort) {
throw new Error('Could not get ClickHouse container host port');
}
logger.info(`Deprovisioning ClickHouse database '${resource.resourceName}'...`); logger.info(`Deprovisioning ClickHouse database '${resource.resourceName}'...`);
const baseUrl = `http://127.0.0.1:${hostPort}`;
try { try {
// Drop the user await this.execClickHouseQuery(platformService.containerId, adminCreds, `DROP USER IF EXISTS ${credentials.username}`);
try { await this.execClickHouseQuery(platformService.containerId, adminCreds, `DROP DATABASE IF EXISTS ${resource.resourceName}`);
await this.executeQuery(
baseUrl,
adminCreds.username,
adminCreds.password,
`DROP USER IF EXISTS ${credentials.username}`
);
logger.info(`Dropped ClickHouse user '${credentials.username}'`);
} catch (e) {
logger.warn(`Could not drop ClickHouse user: ${getErrorMessage(e)}`);
}
// Drop the database
await this.executeQuery(
baseUrl,
adminCreds.username,
adminCreds.password,
`DROP DATABASE IF EXISTS ${resource.resourceName}`
);
logger.success(`ClickHouse database '${resource.resourceName}' dropped`); logger.success(`ClickHouse database '${resource.resourceName}' dropped`);
} catch (e) { } catch (e) {
logger.error(`Failed to deprovision ClickHouse database: ${getErrorMessage(e)}`); logger.error(`Failed to deprovision ClickHouse database: ${getErrorMessage(e)}`);
@@ -313,26 +262,27 @@ export class ClickHouseProvider extends BasePlatformServiceProvider {
} }
/** /**
* Execute a ClickHouse SQL query via HTTP interface * Execute a ClickHouse SQL query via docker exec inside the container
*/ */
private async executeQuery( private async execClickHouseQuery(
baseUrl: string, containerId: string,
username: string, adminCreds: { username: string; password: string },
password: string,
query: string query: string
): Promise<string> { ): Promise<string> {
const url = `${baseUrl}/?user=${encodeURIComponent(username)}&password=${encodeURIComponent(password)}`; const result = await this.oneboxRef.docker.execInContainer(
containerId,
[
'clickhouse-client',
'--user', adminCreds.username,
'--password', adminCreds.password,
'--query', query,
]
);
const response = await fetch(url, { if (result.exitCode !== 0) {
method: 'POST', throw new Error(`ClickHouse query failed (exit ${result.exitCode}): ${result.stderr.substring(0, 200)}`);
body: query,
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`ClickHouse query failed: ${errorText}`);
} }
return await response.text(); return result.stdout;
} }
} }

View File

@@ -0,0 +1,279 @@
/**
* MariaDB Platform Service Provider
*/
import { BasePlatformServiceProvider } from './base.ts';
import type {
IService,
IPlatformResource,
IPlatformServiceConfig,
IProvisionedResource,
IEnvVarMapping,
TPlatformServiceType,
TPlatformResourceType,
} from '../../../types.ts';
import { logger } from '../../../logging.ts';
import { getErrorMessage } from '../../../utils/error.ts';
import { credentialEncryption } from '../../encryption.ts';
import type { Onebox } from '../../onebox.ts';
export class MariaDBProvider extends BasePlatformServiceProvider {
readonly type: TPlatformServiceType = 'mariadb';
readonly displayName = 'MariaDB';
readonly resourceTypes: TPlatformResourceType[] = ['database'];
constructor(oneboxRef: Onebox) {
super(oneboxRef);
}
getDefaultConfig(): IPlatformServiceConfig {
return {
image: 'mariadb:11',
port: 3306,
volumes: ['/var/lib/onebox/mariadb:/var/lib/mysql'],
environment: {
MARIADB_ROOT_PASSWORD: '',
// Password will be generated and stored encrypted
},
};
}
getEnvVarMappings(): IEnvVarMapping[] {
return [
{ envVar: 'MARIADB_HOST', credentialPath: 'host' },
{ envVar: 'MARIADB_PORT', credentialPath: 'port' },
{ envVar: 'MARIADB_DATABASE', credentialPath: 'database' },
{ envVar: 'MARIADB_USER', credentialPath: 'username' },
{ envVar: 'MARIADB_PASSWORD', credentialPath: 'password' },
{ envVar: 'MARIADB_URI', credentialPath: 'connectionString' },
];
}
async deployContainer(): Promise<string> {
const config = this.getDefaultConfig();
const containerName = this.getContainerName();
const dataDir = '/var/lib/onebox/mariadb';
logger.info(`Deploying MariaDB platform service as ${containerName}...`);
// Check if we have existing data and stored credentials
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
let adminCredentials: { username: string; password: string };
let dataExists = false;
// Check if data directory has existing MariaDB data
try {
const stat = await Deno.stat(`${dataDir}/ibdata1`);
dataExists = stat.isFile;
logger.info(`MariaDB data directory exists with ibdata1 file`);
} catch {
// ibdata1 file doesn't exist, this is a fresh install
dataExists = false;
}
if (dataExists && platformService?.adminCredentialsEncrypted) {
// Reuse existing credentials from database
logger.info('Reusing existing MariaDB credentials (data directory already initialized)');
adminCredentials = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
} else {
// Generate new credentials for fresh deployment
logger.info('Generating new MariaDB admin credentials');
adminCredentials = {
username: 'root',
password: credentialEncryption.generatePassword(32),
};
// If data exists but we don't have credentials, we need to wipe the data
if (dataExists) {
logger.warn('MariaDB data exists but no credentials in database - wiping data directory');
try {
await Deno.remove(dataDir, { recursive: true });
} catch (e) {
logger.error(`Failed to wipe MariaDB data directory: ${getErrorMessage(e)}`);
throw new Error('Cannot deploy MariaDB: data directory exists without credentials');
}
}
}
// Ensure data directory exists
try {
await Deno.mkdir(dataDir, { recursive: true });
} catch (e) {
// Directory might already exist
if (!(e instanceof Deno.errors.AlreadyExists)) {
logger.warn(`Could not create MariaDB data directory: ${getErrorMessage(e)}`);
}
}
// Create container using Docker API
const envVars = [
`MARIADB_ROOT_PASSWORD=${adminCredentials.password}`,
];
// Use Docker to create the container
const containerId = await this.oneboxRef.docker.createPlatformContainer({
name: containerName,
image: config.image,
port: config.port,
env: envVars,
volumes: config.volumes,
network: this.getNetworkName(),
});
// Store encrypted admin credentials (only update if new or changed)
const encryptedCreds = await credentialEncryption.encrypt(adminCredentials);
if (platformService) {
this.oneboxRef.database.updatePlatformService(platformService.id!, {
containerId,
adminCredentialsEncrypted: encryptedCreds,
status: 'starting',
});
}
logger.success(`MariaDB container created: ${containerId}`);
return containerId;
}
async stopContainer(containerId: string): Promise<void> {
logger.info(`Stopping MariaDB container ${containerId}...`);
await this.oneboxRef.docker.stopContainer(containerId);
logger.success('MariaDB container stopped');
}
async healthCheck(): Promise<boolean> {
try {
logger.info('MariaDB health check: starting...');
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService) {
logger.info('MariaDB health check: platform service not found in database');
return false;
}
if (!platformService.adminCredentialsEncrypted) {
logger.info('MariaDB health check: no admin credentials stored');
return false;
}
if (!platformService.containerId) {
logger.info('MariaDB health check: no container ID in database record');
return false;
}
logger.info(`MariaDB health check: using container ID ${platformService.containerId.substring(0, 12)}...`);
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
// Use docker exec to run health check inside the container
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
['mariadb-admin', 'ping', '-u', 'root', `-p${adminCreds.password}`]
);
if (result.exitCode === 0) {
logger.info('MariaDB health check: success');
return true;
} else {
logger.info(`MariaDB health check failed: exit code ${result.exitCode}, stderr: ${result.stderr.substring(0, 200)}`);
return false;
}
} catch (error) {
logger.info(`MariaDB health check exception: ${getErrorMessage(error)}`);
return false;
}
}
async provisionResource(userService: IService): Promise<IProvisionedResource> {
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService || !platformService.adminCredentialsEncrypted || !platformService.containerId) {
throw new Error('MariaDB platform service not found or not configured');
}
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const containerName = this.getContainerName();
// Generate resource names and credentials
const dbName = this.generateResourceName(userService.name);
const username = this.generateResourceName(userService.name);
const password = credentialEncryption.generatePassword(32);
logger.info(`Provisioning MariaDB database '${dbName}' for service '${userService.name}'...`);
// Create database and user via mariadb inside the container
const sql = [
`CREATE DATABASE IF NOT EXISTS \`${dbName}\`;`,
`CREATE USER IF NOT EXISTS '${username}'@'%' IDENTIFIED BY '${password.replace(/'/g, "\\'")}';`,
`GRANT ALL PRIVILEGES ON \`${dbName}\`.* TO '${username}'@'%';`,
`FLUSH PRIVILEGES;`,
].join(' ');
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
[
'mariadb',
'-u', 'root',
`-p${adminCreds.password}`,
'-e', sql,
]
);
if (result.exitCode !== 0) {
throw new Error(`Failed to provision MariaDB database: exit code ${result.exitCode}, output: ${result.stdout.substring(0, 200)} ${result.stderr.substring(0, 200)}`);
}
logger.success(`MariaDB database '${dbName}' provisioned with user '${username}'`);
// Build the credentials and env vars
const credentials: Record<string, string> = {
host: containerName,
port: '3306',
database: dbName,
username,
password,
connectionString: `mysql://${username}:${password}@${containerName}:3306/${dbName}`,
};
// Map credentials to env vars
const envVars: Record<string, string> = {};
for (const mapping of this.getEnvVarMappings()) {
if (credentials[mapping.credentialPath]) {
envVars[mapping.envVar] = credentials[mapping.credentialPath];
}
}
return {
type: 'database',
name: dbName,
credentials,
envVars,
};
}
async deprovisionResource(resource: IPlatformResource, credentials: Record<string, string>): Promise<void> {
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService || !platformService.adminCredentialsEncrypted || !platformService.containerId) {
throw new Error('MariaDB platform service not found or not configured');
}
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
logger.info(`Deprovisioning MariaDB database '${resource.resourceName}'...`);
const sql = [
`DROP USER IF EXISTS '${credentials.username}'@'%';`,
`DROP DATABASE IF EXISTS \`${resource.resourceName}\`;`,
].join(' ');
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
[
'mariadb',
'-u', 'root',
`-p${adminCreds.password}`,
'-e', sql,
]
);
if (result.exitCode !== 0) {
logger.warn(`MariaDB deprovision returned exit code ${result.exitCode}: ${result.stderr.substring(0, 200)}`);
}
logger.success(`MariaDB database '${resource.resourceName}' dropped`);
}
}

View File

@@ -196,84 +196,28 @@ export class MinioProvider extends BasePlatformServiceProvider {
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const containerName = this.getContainerName(); const containerName = this.getContainerName();
// Get container host port for connection from host (overlay network IPs not accessible from host) // Generate bucket name
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 9000);
if (!hostPort) {
throw new Error('Could not get MinIO container host port');
}
// Generate bucket name and credentials
const bucketName = this.generateBucketName(userService.name); const bucketName = this.generateBucketName(userService.name);
const accessKey = credentialEncryption.generateAccessKey(20);
const secretKey = credentialEncryption.generateSecretKey(40);
logger.info(`Provisioning MinIO bucket '${bucketName}' for service '${userService.name}'...`); logger.info(`Provisioning MinIO bucket '${bucketName}' for service '${userService.name}'...`);
// Connect to MinIO via localhost and the mapped host port (for provisioning from host) // Use docker exec with mc (MinIO Client) inside the container
const provisioningEndpoint = `http://127.0.0.1:${hostPort}`; // First configure mc alias for local server
await this.execMc(platformService.containerId, [
// Import AWS S3 client 'alias', 'set', 'local', 'http://localhost:9000',
const { S3Client, CreateBucketCommand, PutBucketPolicyCommand } = await import('npm:@aws-sdk/client-s3@3'); adminCreds.username, adminCreds.password,
]);
// Create S3 client with admin credentials - connect via host port
const s3Client = new S3Client({
endpoint: provisioningEndpoint,
region: 'us-east-1',
credentials: {
accessKeyId: adminCreds.username,
secretAccessKey: adminCreds.password,
},
forcePathStyle: true,
});
// Create the bucket // Create the bucket
try { const mbResult = await this.execMc(platformService.containerId, [
await s3Client.send(new CreateBucketCommand({ 'mb', '--ignore-existing', `local/${bucketName}`,
Bucket: bucketName, ]);
})); logger.info(`Created MinIO bucket '${bucketName}'`);
logger.info(`Created MinIO bucket '${bucketName}'`);
} catch (e: any) {
if (e.name !== 'BucketAlreadyOwnedByYou' && e.name !== 'BucketAlreadyExists') {
throw e;
}
logger.warn(`Bucket '${bucketName}' already exists`);
}
// Create service account/access key using MinIO Admin API // Set bucket policy to allow public read/write (services on the same network use root creds)
// MinIO Admin API requires mc client or direct API calls await this.execMc(platformService.containerId, [
// For simplicity, we'll use root credentials and bucket policy isolation 'anonymous', 'set', 'none', `local/${bucketName}`,
// In production, you'd use MinIO's Admin API to create service accounts ]);
// Set bucket policy to allow access only with this bucket's credentials
const bucketPolicy = {
Version: '2012-10-17',
Statement: [
{
Effect: 'Allow',
Principal: { AWS: ['*'] },
Action: ['s3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:ListBucket'],
Resource: [
`arn:aws:s3:::${bucketName}`,
`arn:aws:s3:::${bucketName}/*`,
],
},
],
};
try {
await s3Client.send(new PutBucketPolicyCommand({
Bucket: bucketName,
Policy: JSON.stringify(bucketPolicy),
}));
logger.info(`Set bucket policy for '${bucketName}'`);
} catch (e) {
logger.warn(`Could not set bucket policy: ${getErrorMessage(e)}`);
}
// Note: For proper per-service credentials, MinIO Admin API should be used
// For now, we're providing the bucket with root access
// TODO: Implement MinIO service account creation
logger.warn('Using root credentials for MinIO access. Consider implementing service accounts for production.');
// Use container name for the endpoint in credentials (user services run in same network) // Use container name for the endpoint in credentials (user services run in same network)
const serviceEndpoint = `http://${containerName}:9000`; const serviceEndpoint = `http://${containerName}:9000`;
@@ -281,7 +225,7 @@ export class MinioProvider extends BasePlatformServiceProvider {
const credentials: Record<string, string> = { const credentials: Record<string, string> = {
endpoint: serviceEndpoint, endpoint: serviceEndpoint,
bucket: bucketName, bucket: bucketName,
accessKey: adminCreds.username, // Using root for now accessKey: adminCreds.username,
secretKey: adminCreds.password, secretKey: adminCreds.password,
region: 'us-east-1', region: 'us-east-1',
}; };
@@ -312,57 +256,37 @@ export class MinioProvider extends BasePlatformServiceProvider {
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
// Get container host port for connection from host (overlay network IPs not accessible from host)
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 9000);
if (!hostPort) {
throw new Error('Could not get MinIO container host port');
}
logger.info(`Deprovisioning MinIO bucket '${resource.resourceName}'...`); logger.info(`Deprovisioning MinIO bucket '${resource.resourceName}'...`);
const { S3Client, DeleteBucketCommand, ListObjectsV2Command, DeleteObjectsCommand } = await import('npm:@aws-sdk/client-s3@3'); // Configure mc alias
await this.execMc(platformService.containerId, [
const s3Client = new S3Client({ 'alias', 'set', 'local', 'http://localhost:9000',
endpoint: `http://127.0.0.1:${hostPort}`, adminCreds.username, adminCreds.password,
region: 'us-east-1', ]);
credentials: {
accessKeyId: adminCreds.username,
secretAccessKey: adminCreds.password,
},
forcePathStyle: true,
});
try { try {
// First, delete all objects in the bucket // Remove all objects and the bucket
let continuationToken: string | undefined; await this.execMc(platformService.containerId, [
do { 'rb', '--force', `local/${resource.resourceName}`,
const listResponse = await s3Client.send(new ListObjectsV2Command({ ]);
Bucket: resource.resourceName,
ContinuationToken: continuationToken,
}));
if (listResponse.Contents && listResponse.Contents.length > 0) {
await s3Client.send(new DeleteObjectsCommand({
Bucket: resource.resourceName,
Delete: {
Objects: listResponse.Contents.map(obj => ({ Key: obj.Key! })),
},
}));
logger.info(`Deleted ${listResponse.Contents.length} objects from bucket`);
}
continuationToken = listResponse.IsTruncated ? listResponse.NextContinuationToken : undefined;
} while (continuationToken);
// Now delete the bucket
await s3Client.send(new DeleteBucketCommand({
Bucket: resource.resourceName,
}));
logger.success(`MinIO bucket '${resource.resourceName}' deleted`); logger.success(`MinIO bucket '${resource.resourceName}' deleted`);
} catch (e) { } catch (e) {
logger.error(`Failed to delete MinIO bucket: ${getErrorMessage(e)}`); logger.error(`Failed to delete MinIO bucket: ${getErrorMessage(e)}`);
throw e; throw e;
} }
} }
/**
* Execute mc (MinIO Client) command inside the container
*/
private async execMc(
containerId: string,
args: string[],
): Promise<{ stdout: string; stderr: string }> {
const result = await this.oneboxRef.docker.execInContainer(containerId, ['mc', ...args]);
if (result.exitCode !== 0) {
throw new Error(`mc command failed (exit ${result.exitCode}): ${result.stderr.substring(0, 200)}`);
}
return result;
}
} }

View File

@@ -28,7 +28,7 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
getDefaultConfig(): IPlatformServiceConfig { getDefaultConfig(): IPlatformServiceConfig {
return { return {
image: 'mongo:7', image: 'mongo:4.4',
port: 27017, port: 27017,
volumes: ['/var/lib/onebox/mongodb:/data/db'], volumes: ['/var/lib/onebox/mongodb:/data/db'],
environment: { environment: {
@@ -165,7 +165,7 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
// This avoids network issues with overlay networks // This avoids network issues with overlay networks
const result = await this.oneboxRef.docker.execInContainer( const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId, platformService.containerId,
['mongosh', '--eval', 'db.adminCommand("ping")', '--username', adminCreds.username, '--password', adminCreds.password, '--authenticationDatabase', 'admin', '--quiet'] ['mongo', '--eval', 'db.adminCommand("ping")', '--username', adminCreds.username, '--password', adminCreds.password, '--authenticationDatabase', 'admin', '--quiet']
); );
if (result.exitCode === 0) { if (result.exitCode === 0) {
@@ -190,12 +190,6 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const containerName = this.getContainerName(); const containerName = this.getContainerName();
// Get container host port for connection from host (overlay network IPs not accessible from host)
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 27017);
if (!hostPort) {
throw new Error('Could not get MongoDB container host port');
}
// Generate resource names and credentials // Generate resource names and credentials
const dbName = this.generateResourceName(userService.name); const dbName = this.generateResourceName(userService.name);
const username = this.generateResourceName(userService.name); const username = this.generateResourceName(userService.name);
@@ -203,32 +197,40 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
logger.info(`Provisioning MongoDB database '${dbName}' for service '${userService.name}'...`); logger.info(`Provisioning MongoDB database '${dbName}' for service '${userService.name}'...`);
// Connect to MongoDB via localhost and the mapped host port // Use docker exec to provision inside the container (avoids host port mapping issues)
const { MongoClient } = await import('npm:mongodb@6'); const escapedPassword = password.replace(/'/g, "'\\''");
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@127.0.0.1:${hostPort}/?authSource=admin`; const escapedAdminPassword = adminCreds.password.replace(/'/g, "'\\''");
const client = new MongoClient(adminUri); // Create database and user via mongo inside the container
await client.connect(); const mongoScript = `
db = db.getSiblingDB('${dbName}');
try { db.createCollection('_onebox_init');
// Create the database by switching to it (MongoDB creates on first write) db.createUser({
const db = client.db(dbName); user: '${username}',
pwd: '${escapedPassword}',
// Create a collection to ensure the database exists roles: [{ role: 'readWrite', db: '${dbName}' }]
await db.createCollection('_onebox_init');
// Create user with readWrite access to this database
await db.command({
createUser: username,
pwd: password,
roles: [{ role: 'readWrite', db: dbName }],
}); });
print('PROVISION_SUCCESS');
`;
logger.success(`MongoDB database '${dbName}' provisioned with user '${username}'`); const result = await this.oneboxRef.docker.execInContainer(
} finally { platformService.containerId,
await client.close(); [
'mongo',
'--username', adminCreds.username,
'--password', escapedAdminPassword,
'--authenticationDatabase', 'admin',
'--quiet',
'--eval', mongoScript,
]
);
if (result.exitCode !== 0 || !result.stdout.includes('PROVISION_SUCCESS')) {
throw new Error(`Failed to provision MongoDB database: exit code ${result.exitCode}, output: ${result.stdout.substring(0, 200)} ${result.stderr.substring(0, 200)}`);
} }
logger.success(`MongoDB database '${dbName}' provisioned with user '${username}'`);
// Build the credentials and env vars // Build the credentials and env vars
const credentials: Record<string, string> = { const credentials: Record<string, string> = {
host: containerName, host: containerName,
@@ -262,37 +264,33 @@ export class MongoDBProvider extends BasePlatformServiceProvider {
} }
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted); const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const escapedAdminPassword = adminCreds.password.replace(/'/g, "'\\''");
// Get container host port for connection from host (overlay network IPs not accessible from host)
const hostPort = await this.oneboxRef.docker.getContainerHostPort(platformService.containerId, 27017);
if (!hostPort) {
throw new Error('Could not get MongoDB container host port');
}
logger.info(`Deprovisioning MongoDB database '${resource.resourceName}'...`); logger.info(`Deprovisioning MongoDB database '${resource.resourceName}'...`);
const { MongoClient } = await import('npm:mongodb@6'); const mongoScript = `
const adminUri = `mongodb://${adminCreds.username}:${adminCreds.password}@127.0.0.1:${hostPort}/?authSource=admin`; db = db.getSiblingDB('${resource.resourceName}');
try { db.dropUser('${credentials.username}'); } catch(e) { print('User drop failed: ' + e); }
db.dropDatabase();
print('DEPROVISION_SUCCESS');
`;
const client = new MongoClient(adminUri); const result = await this.oneboxRef.docker.execInContainer(
await client.connect(); platformService.containerId,
[
'mongo',
'--username', adminCreds.username,
'--password', escapedAdminPassword,
'--authenticationDatabase', 'admin',
'--quiet',
'--eval', mongoScript,
]
);
try { if (result.exitCode !== 0) {
const db = client.db(resource.resourceName); logger.warn(`MongoDB deprovision returned exit code ${result.exitCode}: ${result.stderr.substring(0, 200)}`);
// Drop the user
try {
await db.command({ dropUser: credentials.username });
logger.info(`Dropped MongoDB user '${credentials.username}'`);
} catch (e) {
logger.warn(`Could not drop MongoDB user: ${getErrorMessage(e)}`);
}
// Drop the database
await db.dropDatabase();
logger.success(`MongoDB database '${resource.resourceName}' dropped`);
} finally {
await client.close();
} }
logger.success(`MongoDB database '${resource.resourceName}' dropped`);
} }
} }

View File

@@ -0,0 +1,283 @@
/**
* Redis Platform Service Provider
*/
import { BasePlatformServiceProvider } from './base.ts';
import type {
IService,
IPlatformResource,
IPlatformServiceConfig,
IProvisionedResource,
IEnvVarMapping,
TPlatformServiceType,
TPlatformResourceType,
} from '../../../types.ts';
import { logger } from '../../../logging.ts';
import { getErrorMessage } from '../../../utils/error.ts';
import { credentialEncryption } from '../../encryption.ts';
import type { Onebox } from '../../onebox.ts';
export class RedisProvider extends BasePlatformServiceProvider {
readonly type: TPlatformServiceType = 'redis';
readonly displayName = 'Redis';
readonly resourceTypes: TPlatformResourceType[] = ['cache'];
constructor(oneboxRef: Onebox) {
super(oneboxRef);
}
getDefaultConfig(): IPlatformServiceConfig {
return {
image: 'redis:7-alpine',
port: 6379,
volumes: ['/var/lib/onebox/redis:/data'],
environment: {},
};
}
getEnvVarMappings(): IEnvVarMapping[] {
return [
{ envVar: 'REDIS_HOST', credentialPath: 'host' },
{ envVar: 'REDIS_PORT', credentialPath: 'port' },
{ envVar: 'REDIS_PASSWORD', credentialPath: 'password' },
{ envVar: 'REDIS_DB', credentialPath: 'db' },
{ envVar: 'REDIS_URL', credentialPath: 'connectionString' },
];
}
async deployContainer(): Promise<string> {
const config = this.getDefaultConfig();
const containerName = this.getContainerName();
const dataDir = '/var/lib/onebox/redis';
logger.info(`Deploying Redis platform service as ${containerName}...`);
// Check if we have existing data and stored credentials
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
let adminCredentials: { username: string; password: string };
let dataExists = false;
// Check if data directory has existing Redis data
try {
const stat = await Deno.stat(`${dataDir}/dump.rdb`);
dataExists = stat.isFile;
logger.info(`Redis data directory exists with dump.rdb file`);
} catch {
// Also check for appendonly file
try {
const stat = await Deno.stat(`${dataDir}/appendonly.aof`);
dataExists = stat.isFile;
logger.info(`Redis data directory exists with appendonly.aof file`);
} catch {
dataExists = false;
}
}
if (dataExists && platformService?.adminCredentialsEncrypted) {
// Reuse existing credentials from database
logger.info('Reusing existing Redis credentials (data directory already initialized)');
adminCredentials = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
} else {
// Generate new credentials for fresh deployment
logger.info('Generating new Redis admin credentials');
adminCredentials = {
username: 'default',
password: credentialEncryption.generatePassword(32),
};
// If data exists but we don't have credentials, we need to wipe the data
if (dataExists) {
logger.warn('Redis data exists but no credentials in database - wiping data directory');
try {
await Deno.remove(dataDir, { recursive: true });
} catch (e) {
logger.error(`Failed to wipe Redis data directory: ${getErrorMessage(e)}`);
throw new Error('Cannot deploy Redis: data directory exists without credentials');
}
}
}
// Ensure data directory exists
try {
await Deno.mkdir(dataDir, { recursive: true });
} catch (e) {
// Directory might already exist
if (!(e instanceof Deno.errors.AlreadyExists)) {
logger.warn(`Could not create Redis data directory: ${getErrorMessage(e)}`);
}
}
// Redis uses command args for password, not env vars
const containerId = await this.oneboxRef.docker.createPlatformContainer({
name: containerName,
image: config.image,
port: config.port,
env: [],
volumes: config.volumes,
network: this.getNetworkName(),
command: ['redis-server', '--requirepass', adminCredentials.password, '--appendonly', 'yes'],
});
// Store encrypted admin credentials (only update if new or changed)
const encryptedCreds = await credentialEncryption.encrypt(adminCredentials);
if (platformService) {
this.oneboxRef.database.updatePlatformService(platformService.id!, {
containerId,
adminCredentialsEncrypted: encryptedCreds,
status: 'starting',
});
}
logger.success(`Redis container created: ${containerId}`);
return containerId;
}
async stopContainer(containerId: string): Promise<void> {
logger.info(`Stopping Redis container ${containerId}...`);
await this.oneboxRef.docker.stopContainer(containerId);
logger.success('Redis container stopped');
}
async healthCheck(): Promise<boolean> {
try {
logger.info('Redis health check: starting...');
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService) {
logger.info('Redis health check: platform service not found in database');
return false;
}
if (!platformService.adminCredentialsEncrypted) {
logger.info('Redis health check: no admin credentials stored');
return false;
}
if (!platformService.containerId) {
logger.info('Redis health check: no container ID in database record');
return false;
}
logger.info(`Redis health check: using container ID ${platformService.containerId.substring(0, 12)}...`);
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
// Use docker exec to run health check inside the container
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
['redis-cli', '-a', adminCreds.password, 'ping']
);
if (result.exitCode === 0 && result.stdout.includes('PONG')) {
logger.info('Redis health check: success');
return true;
} else {
logger.info(`Redis health check failed: exit code ${result.exitCode}, stdout: ${result.stdout.substring(0, 200)}`);
return false;
}
} catch (error) {
logger.info(`Redis health check exception: ${getErrorMessage(error)}`);
return false;
}
}
async provisionResource(userService: IService): Promise<IProvisionedResource> {
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService || !platformService.adminCredentialsEncrypted) {
throw new Error('Redis platform service not found or not configured');
}
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const containerName = this.getContainerName();
// Determine the next available DB index (1-15, reserving 0 for admin)
const existingResources = this.oneboxRef.database.getPlatformResourcesByPlatformService(platformService.id!);
const usedIndexes = new Set<number>();
for (const resource of existingResources) {
try {
const creds = await credentialEncryption.decrypt(resource.credentialsEncrypted);
if (creds.db) {
usedIndexes.add(parseInt(creds.db, 10));
}
} catch {
// Skip resources with corrupt credentials
}
}
let dbIndex = -1;
for (let i = 1; i <= 15; i++) {
if (!usedIndexes.has(i)) {
dbIndex = i;
break;
}
}
if (dbIndex === -1) {
throw new Error('No available Redis database indexes (max 15 services per Redis instance)');
}
const resourceName = this.generateResourceName(userService.name);
logger.info(`Provisioning Redis database index ${dbIndex} for service '${userService.name}'...`);
// No server-side creation needed - Redis DB indexes exist implicitly
// Just verify connectivity
if (platformService.containerId) {
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
['redis-cli', '-a', adminCreds.password, '-n', String(dbIndex), 'ping']
);
if (result.exitCode !== 0 || !result.stdout.includes('PONG')) {
throw new Error(`Failed to verify Redis database ${dbIndex}: exit code ${result.exitCode}`);
}
}
logger.success(`Redis database index ${dbIndex} provisioned for service '${userService.name}'`);
// Build the credentials and env vars
const credentials: Record<string, string> = {
host: containerName,
port: '6379',
password: adminCreds.password,
db: String(dbIndex),
connectionString: `redis://:${adminCreds.password}@${containerName}:6379/${dbIndex}`,
};
// Map credentials to env vars
const envVars: Record<string, string> = {};
for (const mapping of this.getEnvVarMappings()) {
if (credentials[mapping.credentialPath]) {
envVars[mapping.envVar] = credentials[mapping.credentialPath];
}
}
return {
type: 'cache',
name: resourceName,
credentials,
envVars,
};
}
async deprovisionResource(resource: IPlatformResource, credentials: Record<string, string>): Promise<void> {
const platformService = this.oneboxRef.database.getPlatformServiceByType(this.type);
if (!platformService || !platformService.adminCredentialsEncrypted || !platformService.containerId) {
throw new Error('Redis platform service not found or not configured');
}
const adminCreds = await credentialEncryption.decrypt(platformService.adminCredentialsEncrypted);
const dbIndex = credentials.db || '0';
logger.info(`Deprovisioning Redis database index ${dbIndex} for resource '${resource.resourceName}'...`);
// Flush the specific database
const result = await this.oneboxRef.docker.execInContainer(
platformService.containerId,
['redis-cli', '-a', adminCreds.password, '-n', dbIndex, 'FLUSHDB']
);
if (result.exitCode !== 0) {
logger.warn(`Redis deprovision returned exit code ${result.exitCode}: ${result.stderr.substring(0, 200)}`);
}
logger.success(`Redis database index ${dbIndex} flushed for resource '${resource.resourceName}'`);
}
}

View File

@@ -2,7 +2,7 @@
* Onebox Registry Manager * Onebox Registry Manager
* *
* Manages the local Docker registry using: * Manages the local Docker registry using:
* - @push.rocks/smarts3 (S3-compatible server with filesystem storage) * - @push.rocks/smartstorage (S3-compatible server with filesystem storage)
* - @push.rocks/smartregistry (OCI-compliant Docker registry) * - @push.rocks/smartregistry (OCI-compliant Docker registry)
*/ */
@@ -27,7 +27,7 @@ export class RegistryManager {
} }
/** /**
* Initialize the registry (start smarts3 and smartregistry) * Initialize the registry (start smartstorage and smartregistry)
*/ */
async init(): Promise<void> { async init(): Promise<void> {
if (this.isInitialized) { if (this.isInitialized) {
@@ -39,10 +39,10 @@ export class RegistryManager {
const dataDir = this.options.dataDir || './.nogit/registry-data'; const dataDir = this.options.dataDir || './.nogit/registry-data';
const port = this.options.port || 4000; const port = this.options.port || 4000;
logger.info(`Starting smarts3 server on port ${port}...`); logger.info(`Starting smartstorage server on port ${port}...`);
// 1. Start smarts3 server (S3-compatible storage with filesystem backend) // 1. Start smartstorage server (S3-compatible storage with filesystem backend)
this.s3Server = await plugins.smarts3.Smarts3.createAndStart({ this.s3Server = await plugins.smartstorage.SmartStorage.createAndStart({
server: { server: {
port: port, port: port,
address: '0.0.0.0', address: '0.0.0.0',
@@ -53,16 +53,16 @@ export class RegistryManager {
}, },
}); });
logger.success(`smarts3 server started on port ${port}`); logger.success(`smartstorage server started on port ${port}`);
// 2. Configure smartregistry to use smarts3 // 2. Configure smartregistry to use smartstorage
logger.info('Initializing smartregistry...'); logger.info('Initializing smartregistry...');
this.registry = new plugins.smartregistry.SmartRegistry({ this.registry = new plugins.smartregistry.SmartRegistry({
storage: { storage: {
endpoint: 'localhost', endpoint: 'localhost',
port: port, port: port,
accessKey: 'onebox', // smarts3 doesn't validate credentials accessKey: 'onebox', // smartstorage doesn't validate credentials
accessSecret: 'onebox', accessSecret: 'onebox',
useSsl: false, useSsl: false,
region: 'us-east-1', region: 'us-east-1',
@@ -314,15 +314,15 @@ export class RegistryManager {
} }
/** /**
* Stop the registry and smarts3 server * Stop the registry and smartstorage server
*/ */
async stop(): Promise<void> { async stop(): Promise<void> {
if (this.s3Server) { if (this.s3Server) {
try { try {
await this.s3Server.stop(); await this.s3Server.stop();
logger.info('smarts3 server stopped'); logger.info('smartstorage server stopped');
} catch (error) { } catch (error) {
logger.error(`Error stopping smarts3: ${getErrorMessage(error)}`); logger.error(`Error stopping smartstorage: ${getErrorMessage(error)}`);
} }
} }

View File

@@ -15,6 +15,7 @@ export class OneboxServicesManager {
private oneboxRef: any; // Will be Onebox instance private oneboxRef: any; // Will be Onebox instance
private database: OneboxDatabase; private database: OneboxDatabase;
private docker: OneboxDockerManager; private docker: OneboxDockerManager;
private autoUpdateIntervalId: number | null = null;
constructor(oneboxRef: any) { constructor(oneboxRef: any) {
this.oneboxRef = oneboxRef; this.oneboxRef = oneboxRef;
@@ -49,11 +50,13 @@ export class OneboxServicesManager {
// Build platform requirements // Build platform requirements
const platformRequirements: IPlatformRequirements | undefined = const platformRequirements: IPlatformRequirements | undefined =
(options.enableMongoDB || options.enableS3 || options.enableClickHouse) (options.enableMongoDB || options.enableS3 || options.enableClickHouse || options.enableRedis || options.enableMariaDB)
? { ? {
mongodb: options.enableMongoDB, mongodb: options.enableMongoDB,
s3: options.enableS3, s3: options.enableS3,
clickhouse: options.enableClickHouse, clickhouse: options.enableClickHouse,
redis: options.enableRedis,
mariadb: options.enableMariaDB,
} }
: undefined; : undefined;
@@ -75,6 +78,9 @@ export class OneboxServicesManager {
autoUpdateOnPush: options.autoUpdateOnPush, autoUpdateOnPush: options.autoUpdateOnPush,
// Platform requirements // Platform requirements
platformRequirements, platformRequirements,
// App Store template tracking
appTemplateId: options.appTemplateId,
appTemplateVersion: options.appTemplateVersion,
}); });
// Provision platform resources if needed // Provision platform resources if needed
@@ -681,7 +687,7 @@ export class OneboxServicesManager {
*/ */
startAutoUpdateMonitoring(): void { startAutoUpdateMonitoring(): void {
// Check every 30 seconds // Check every 30 seconds
setInterval(async () => { this.autoUpdateIntervalId = setInterval(async () => {
try { try {
await this.checkForRegistryUpdates(); await this.checkForRegistryUpdates();
} catch (error) { } catch (error) {
@@ -692,6 +698,17 @@ export class OneboxServicesManager {
logger.info('Auto-update monitoring started (30s interval)'); logger.info('Auto-update monitoring started (30s interval)');
} }
/**
* Stop auto-update monitoring
*/
stopAutoUpdateMonitoring(): void {
if (this.autoUpdateIntervalId !== null) {
clearInterval(this.autoUpdateIntervalId);
this.autoUpdateIntervalId = null;
logger.debug('Auto-update monitoring stopped');
}
}
/** /**
* Check all services using onebox registry for updates * Check all services using onebox registry for updates
*/ */

243
ts/classes/systemd.ts Normal file
View File

@@ -0,0 +1,243 @@
/**
* Systemd Service Manager for Onebox
*
* Handles systemd unit file installation, enabling, starting, stopping,
* and status checking. Modeled on nupst's direct systemctl approach —
* no external library dependencies.
*/
import { logger } from '../logging.ts';
import { getErrorMessage } from '../utils/error.ts';
const SERVICE_NAME = 'onebox';
const SERVICE_FILE_PATH = '/etc/systemd/system/onebox.service';
const SERVICE_UNIT_TEMPLATE = `[Unit]
Description=Onebox - Self-hosted container platform
After=network-online.target docker.service
Wants=network-online.target
Requires=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/onebox systemd start-daemon
Restart=always
RestartSec=10
WorkingDirectory=/var/lib/onebox
Environment=PATH=/usr/bin:/usr/local/bin
Environment=HOME=/root
Environment=DENO_DIR=/root/.cache/deno
[Install]
WantedBy=multi-user.target
`;
export class OneboxSystemd {
/**
* Install and enable the systemd service
*/
async enable(): Promise<void> {
try {
// Ensure Docker is installed before writing unit file (it requires docker.service)
await this.ensureDocker();
// Write the unit file
logger.info('Writing systemd unit file...');
await Deno.writeTextFile(SERVICE_FILE_PATH, SERVICE_UNIT_TEMPLATE);
logger.info(`Unit file written to ${SERVICE_FILE_PATH}`);
// Reload systemd daemon
await this.runSystemctl(['daemon-reload']);
// Enable the service
const result = await this.runSystemctl(['enable', `${SERVICE_NAME}.service`]);
if (!result.success) {
throw new Error(`Failed to enable service: ${result.stderr}`);
}
logger.success('Onebox systemd service enabled');
logger.info('Start with: onebox systemd start');
} catch (error) {
logger.error(`Failed to enable service: ${getErrorMessage(error)}`);
throw error;
}
}
/**
* Stop, disable, and remove the systemd service
*/
async disable(): Promise<void> {
try {
// Stop the service (ignore errors if not running)
await this.runSystemctl(['stop', `${SERVICE_NAME}.service`]);
// Disable the service
await this.runSystemctl(['disable', `${SERVICE_NAME}.service`]);
// Remove the unit file
try {
await Deno.remove(SERVICE_FILE_PATH);
logger.info(`Removed ${SERVICE_FILE_PATH}`);
} catch {
// File might not exist
}
// Reload systemd daemon
await this.runSystemctl(['daemon-reload']);
logger.success('Onebox systemd service disabled and removed');
} catch (error) {
logger.error(`Failed to disable service: ${getErrorMessage(error)}`);
throw error;
}
}
/**
* Start the service via systemctl
*/
async start(): Promise<void> {
const result = await this.runSystemctl(['start', `${SERVICE_NAME}.service`]);
if (!result.success) {
logger.error(`Failed to start service: ${result.stderr}`);
throw new Error(`Failed to start onebox service`);
}
logger.success('Onebox service started');
}
/**
* Stop the service via systemctl
*/
async stop(): Promise<void> {
const result = await this.runSystemctl(['stop', `${SERVICE_NAME}.service`]);
if (!result.success) {
logger.error(`Failed to stop service: ${result.stderr}`);
throw new Error(`Failed to stop onebox service`);
}
logger.success('Onebox service stopped');
}
/**
* Get and display service status
*/
async getStatus(): Promise<string> {
const result = await this.runSystemctl(['status', `${SERVICE_NAME}.service`]);
const output = result.stdout;
let status: string;
if (output.includes('active (running)')) {
status = 'running';
} else if (output.includes('inactive') || output.includes('dead')) {
status = 'stopped';
} else if (output.includes('failed')) {
status = 'failed';
} else if (!result.success && result.stderr.includes('could not be found')) {
status = 'not-installed';
} else {
status = 'unknown';
}
// Print the raw systemctl output for full details
if (output.trim()) {
console.log(output);
}
return status;
}
/**
* Show service logs via journalctl
*/
async showLogs(): Promise<void> {
const cmd = new Deno.Command('journalctl', {
args: ['-u', `${SERVICE_NAME}.service`, '-f'],
stdout: 'inherit',
stderr: 'inherit',
});
await cmd.output();
}
/**
* Check if the service unit file is installed
*/
async isInstalled(): Promise<boolean> {
try {
await Deno.stat(SERVICE_FILE_PATH);
return true;
} catch {
return false;
}
}
/**
* Ensure Docker is installed, installing it if necessary
*/
private async ensureDocker(): Promise<void> {
try {
const cmd = new Deno.Command('docker', {
args: ['--version'],
stdout: 'piped',
stderr: 'piped',
});
const result = await cmd.output();
if (result.success) {
const version = new TextDecoder().decode(result.stdout).trim();
logger.info(`Docker found: ${version}`);
return;
}
} catch {
// docker command not found
}
logger.info('Docker not found. Installing Docker...');
const installCmd = new Deno.Command('bash', {
args: ['-c', 'curl -fsSL https://get.docker.com | sh'],
stdin: 'inherit',
stdout: 'inherit',
stderr: 'inherit',
});
const installResult = await installCmd.output();
if (!installResult.success) {
throw new Error('Failed to install Docker. Please install it manually: curl -fsSL https://get.docker.com | sh');
}
logger.success('Docker installed successfully');
// Initialize Docker Swarm
logger.info('Initializing Docker Swarm...');
const swarmCmd = new Deno.Command('docker', {
args: ['swarm', 'init'],
stdout: 'piped',
stderr: 'piped',
});
const swarmResult = await swarmCmd.output();
if (swarmResult.success) {
logger.success('Docker Swarm initialized');
} else {
const stderr = new TextDecoder().decode(swarmResult.stderr);
if (stderr.includes('already part of a swarm')) {
logger.info('Docker Swarm already initialized');
} else {
logger.warn(`Docker Swarm init warning: ${stderr.trim()}`);
}
}
}
/**
* Run a systemctl command and return results
*/
private async runSystemctl(
args: string[]
): Promise<{ success: boolean; stdout: string; stderr: string }> {
const cmd = new Deno.Command('systemctl', {
args,
stdout: 'piped',
stderr: 'piped',
});
const result = await cmd.output();
return {
success: result.success,
stdout: new TextDecoder().decode(result.stdout),
stderr: new TextDecoder().decode(result.stderr),
};
}
}

166
ts/cli.ts
View File

@@ -7,6 +7,7 @@ import { projectInfo } from './info.ts';
import { getErrorMessage } from './utils/error.ts'; import { getErrorMessage } from './utils/error.ts';
import { Onebox } from './classes/onebox.ts'; import { Onebox } from './classes/onebox.ts';
import { OneboxDaemon } from './classes/daemon.ts'; import { OneboxDaemon } from './classes/daemon.ts';
import { OneboxSystemd } from './classes/systemd.ts';
export async function runCli(): Promise<void> { export async function runCli(): Promise<void> {
const args = Deno.args; const args = Deno.args;
@@ -25,6 +26,19 @@ export async function runCli(): Promise<void> {
const subcommand = args[1]; const subcommand = args[1];
try { try {
// === LIGHTWEIGHT COMMANDS (no init()) ===
if (command === 'systemd') {
await handleSystemdCommand(subcommand, args.slice(2));
return;
}
if (command === 'upgrade') {
await handleUpgradeCommand();
return;
}
// === HEAVY COMMANDS (require full init()) ===
// Server command has special handling (doesn't shut down) // Server command has special handling (doesn't shut down)
if (command === 'server') { if (command === 'server') {
const onebox = new Onebox(); const onebox = new Onebox();
@@ -60,10 +74,6 @@ export async function runCli(): Promise<void> {
await handleNginxCommand(onebox, subcommand, args.slice(2)); await handleNginxCommand(onebox, subcommand, args.slice(2));
break; break;
case 'daemon':
await handleDaemonCommand(onebox, subcommand, args.slice(2));
break;
case 'config': case 'config':
await handleConfigCommand(onebox, subcommand, args.slice(2)); await handleConfigCommand(onebox, subcommand, args.slice(2));
break; break;
@@ -278,7 +288,7 @@ async function handleServerCommand(onebox: Onebox, args: string[]) {
await OneboxDaemon.ensureNoDaemon(); await OneboxDaemon.ensureNoDaemon();
} catch (error) { } catch (error) {
logger.error('Cannot start in ephemeral mode: Daemon is already running'); logger.error('Cannot start in ephemeral mode: Daemon is already running');
logger.info('Stop the daemon first: onebox daemon stop'); logger.info('Stop the daemon first: onebox systemd stop');
logger.info('Or run without --ephemeral to use the existing daemon'); logger.info('Or run without --ephemeral to use the existing daemon');
Deno.exit(1); Deno.exit(1);
} }
@@ -286,8 +296,8 @@ async function handleServerCommand(onebox: Onebox, args: string[]) {
logger.info('Starting Onebox server...'); logger.info('Starting Onebox server...');
// Start HTTP server // Start OpsServer (serves new UI + TypedRequest API)
await onebox.httpServer.start(port); await onebox.opsServer.start(port);
// Start monitoring if requested // Start monitoring if requested
if (monitor) { if (monitor) {
@@ -308,7 +318,7 @@ async function handleServerCommand(onebox: Onebox, args: string[]) {
if (monitor) { if (monitor) {
onebox.daemon.stopMonitoring(); onebox.daemon.stopMonitoring();
} }
await onebox.httpServer.stop(); await onebox.opsServer.stop();
await onebox.shutdown(); await onebox.shutdown();
Deno.exit(0); Deno.exit(0);
}; };
@@ -322,39 +332,49 @@ async function handleServerCommand(onebox: Onebox, args: string[]) {
} }
} }
// Daemon commands // Systemd service commands (lightweight — no Onebox init)
async function handleDaemonCommand(onebox: Onebox, subcommand: string, _args: string[]) { async function handleSystemdCommand(subcommand: string, _args: string[]) {
const systemd = new OneboxSystemd();
switch (subcommand) { switch (subcommand) {
case 'install': case 'enable':
await onebox.daemon.installService(); await systemd.enable();
break;
case 'disable':
await systemd.disable();
break; break;
case 'start': case 'start':
await onebox.startDaemon(); await systemd.start();
break; break;
case 'stop': case 'stop':
await onebox.stopDaemon(); await systemd.stop();
break; break;
case 'logs': { case 'status': {
const command = new Deno.Command('journalctl', { const status = await systemd.getStatus();
args: ['-u', 'smartdaemon_onebox', '-f'], logger.info(`Service status: ${status}`);
stdout: 'inherit',
stderr: 'inherit',
});
await command.output();
break; break;
} }
case 'status': { case 'logs':
const status = await onebox.daemon.getServiceStatus(); await systemd.showLogs();
logger.info(`Daemon status: ${status}`); break;
case 'start-daemon': {
// This is what systemd's ExecStart calls — full init + daemon loop
const onebox = new Onebox();
await onebox.init();
await onebox.daemon.start();
// start() blocks (keepAlive loop) until SIGTERM/SIGINT
break; break;
} }
default: default:
logger.error(`Unknown daemon subcommand: ${subcommand}`); logger.error(`Unknown systemd subcommand: ${subcommand}`);
logger.info('Available: enable, disable, start, stop, status, logs');
} }
} }
@@ -386,6 +406,78 @@ async function handleStatusCommand(onebox: Onebox) {
console.log(JSON.stringify(status, null, 2)); console.log(JSON.stringify(status, null, 2));
} }
// Upgrade command - self-update onebox to latest version
async function handleUpgradeCommand(): Promise<void> {
// Check if running as root
if (Deno.uid() !== 0) {
logger.error('This command must be run as root to upgrade Onebox.');
logger.info('Try: sudo onebox upgrade');
Deno.exit(1);
}
logger.info('Checking for updates...');
try {
// Get current version
const currentVersion = projectInfo.version;
// Fetch latest version from Gitea API
const apiUrl = 'https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/latest';
const curlCmd = new Deno.Command('curl', {
args: ['-sSL', apiUrl],
stdout: 'piped',
stderr: 'piped',
});
const curlResult = await curlCmd.output();
const response = new TextDecoder().decode(curlResult.stdout);
const release = JSON.parse(response);
const latestVersion = release.tag_name as string; // e.g., "v1.11.0"
// Normalize versions for comparison (ensure both have "v" prefix)
const normalizedCurrent = currentVersion.startsWith('v')
? currentVersion
: `v${currentVersion}`;
const normalizedLatest = latestVersion.startsWith('v')
? latestVersion
: `v${latestVersion}`;
console.log(` Current version: ${normalizedCurrent}`);
console.log(` Latest version: ${normalizedLatest}`);
console.log('');
// Compare normalized versions
if (normalizedCurrent === normalizedLatest) {
logger.success('Already up to date!');
return;
}
logger.info(`New version available: ${latestVersion}`);
logger.info('Downloading and installing...');
console.log('');
// Download and run the install script
const installUrl = 'https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh';
const installCmd = new Deno.Command('bash', {
args: ['-c', `curl -sSL ${installUrl} | bash`],
stdin: 'inherit',
stdout: 'inherit',
stderr: 'inherit',
});
const installResult = await installCmd.output();
if (!installResult.success) {
logger.error('Upgrade failed');
Deno.exit(1);
}
console.log('');
logger.success(`Upgraded to ${latestVersion}`);
} catch (error) {
logger.error(`Upgrade failed: ${getErrorMessage(error)}`);
Deno.exit(1);
}
}
// Helpers // Helpers
function getArg(args: string[], flag: string): string { function getArg(args: string[], flag: string): string {
const arg = args.find((a) => a.startsWith(`${flag}=`)); const arg = args.find((a) => a.startsWith(`${flag}=`));
@@ -430,17 +522,21 @@ Commands:
nginx test nginx test
nginx status nginx status
daemon install systemd enable Install and enable systemd service
daemon start systemd disable Stop, disable, and remove systemd service
daemon stop systemd start Start onebox via systemctl
daemon logs systemd stop Stop onebox via systemctl
daemon status systemd status Show systemd service status
systemd logs Follow service logs (journalctl)
config show config show
config set <key> <value> config set <key> <value>
status status
upgrade
Upgrade Onebox to the latest version (requires root)
Options: Options:
--help, -h Show this help message --help, -h Show this help message
--version, -v Show version --version, -v Show version
@@ -451,15 +547,15 @@ Development Workflow:
onebox service add ... # In another terminal onebox service add ... # In another terminal
Production Workflow: Production Workflow:
onebox daemon install # Install systemd service onebox systemd enable # Install and enable systemd service
onebox daemon start # Start daemon onebox systemd start # Start via systemctl
onebox service add ... # CLI uses daemon onebox service add ... # CLI manages services
Examples: Examples:
onebox server --ephemeral # Start dev server onebox server --ephemeral # Start dev server
onebox service add myapp --image nginx:latest --domain app.example.com --port 80 onebox service add myapp --image nginx:latest --domain app.example.com --port 80
onebox registry add --url registry.example.com --username user --password pass onebox registry add --url registry.example.com --username user --password pass
onebox daemon install onebox systemd enable
onebox daemon start onebox systemd start
`); `);
} }

View File

@@ -25,6 +25,7 @@ import type {
import type { TBindValue } from './types.ts'; import type { TBindValue } from './types.ts';
import { logger } from '../logging.ts'; import { logger } from '../logging.ts';
import { getErrorMessage } from '../utils/error.ts'; import { getErrorMessage } from '../utils/error.ts';
import { MigrationRunner } from './migrations/index.ts';
// Import repositories // Import repositories
import { import {
@@ -71,7 +72,8 @@ export class OneboxDatabase {
await this.createTables(); await this.createTables();
// Run migrations if needed // Run migrations if needed
await this.runMigrations(); const runner = new MigrationRunner(this.query.bind(this));
runner.run();
// Initialize repositories with bound query function // Initialize repositories with bound query function
const queryFn = this.query.bind(this); const queryFn = this.query.bind(this);
@@ -241,724 +243,6 @@ export class OneboxDatabase {
/** /**
* Run database migrations * Run database migrations
*/ */
private async runMigrations(): Promise<void> {
if (!this.db) throw new Error('Database not initialized');
try {
const currentVersion = this.getMigrationVersion();
logger.info(`Current database migration version: ${currentVersion}`);
// Migration 1: Initial schema
if (currentVersion === 0) {
logger.info('Setting initial migration version to 1');
this.setMigrationVersion(1);
}
// Migration 2: Convert timestamp columns from INTEGER to REAL
const updatedVersion = this.getMigrationVersion();
if (updatedVersion < 2) {
logger.info('Running migration 2: Converting timestamps to REAL...');
// SSL certificates
this.query(`
CREATE TABLE ssl_certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO ssl_certificates_new SELECT * FROM ssl_certificates`);
this.query(`DROP TABLE ssl_certificates`);
this.query(`ALTER TABLE ssl_certificates_new RENAME TO ssl_certificates`);
// Services
this.query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT NOT NULL,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL DEFAULT 'stopped',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO services_new SELECT * FROM services`);
this.query(`DROP TABLE services`);
this.query(`ALTER TABLE services_new RENAME TO services`);
// Registries
this.query(`
CREATE TABLE registries_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
url TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
password_encrypted TEXT NOT NULL,
created_at REAL NOT NULL
)
`);
this.query(`INSERT INTO registries_new SELECT * FROM registries`);
this.query(`DROP TABLE registries`);
this.query(`ALTER TABLE registries_new RENAME TO registries`);
// Nginx configs
this.query(`
CREATE TABLE nginx_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain TEXT NOT NULL,
port INTEGER NOT NULL,
ssl_enabled INTEGER NOT NULL DEFAULT 0,
config_template TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO nginx_configs_new SELECT * FROM nginx_configs`);
this.query(`DROP TABLE nginx_configs`);
this.query(`ALTER TABLE nginx_configs_new RENAME TO nginx_configs`);
// DNS records
this.query(`
CREATE TABLE dns_records_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
value TEXT NOT NULL,
cloudflare_id TEXT,
zone_id TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO dns_records_new SELECT * FROM dns_records`);
this.query(`DROP TABLE dns_records`);
this.query(`ALTER TABLE dns_records_new RENAME TO dns_records`);
// Metrics
this.query(`
CREATE TABLE metrics_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
cpu_percent REAL NOT NULL,
memory_used INTEGER NOT NULL,
memory_limit INTEGER NOT NULL,
network_rx_bytes INTEGER NOT NULL,
network_tx_bytes INTEGER NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO metrics_new SELECT * FROM metrics`);
this.query(`DROP TABLE metrics`);
this.query(`ALTER TABLE metrics_new RENAME TO metrics`);
this.query(`CREATE INDEX IF NOT EXISTS idx_metrics_service_timestamp ON metrics(service_id, timestamp DESC)`);
// Logs
this.query(`
CREATE TABLE logs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
message TEXT NOT NULL,
level TEXT NOT NULL,
source TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO logs_new SELECT * FROM logs`);
this.query(`DROP TABLE logs`);
this.query(`ALTER TABLE logs_new RENAME TO logs`);
this.query(`CREATE INDEX IF NOT EXISTS idx_logs_service_timestamp ON logs(service_id, timestamp DESC)`);
// Users
this.query(`
CREATE TABLE users_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
role TEXT NOT NULL DEFAULT 'user',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO users_new SELECT * FROM users`);
this.query(`DROP TABLE users`);
this.query(`ALTER TABLE users_new RENAME TO users`);
// Settings
this.query(`
CREATE TABLE settings_new (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO settings_new SELECT * FROM settings`);
this.query(`DROP TABLE settings`);
this.query(`ALTER TABLE settings_new RENAME TO settings`);
// Migrations table itself
this.query(`
CREATE TABLE migrations_new (
version INTEGER PRIMARY KEY,
applied_at REAL NOT NULL
)
`);
this.query(`INSERT INTO migrations_new SELECT * FROM migrations`);
this.query(`DROP TABLE migrations`);
this.query(`ALTER TABLE migrations_new RENAME TO migrations`);
this.setMigrationVersion(2);
logger.success('Migration 2 completed: All timestamps converted to REAL');
}
// Migration 3: Domain management tables
const version3 = this.getMigrationVersion();
if (version3 < 3) {
logger.info('Running migration 3: Creating domain management tables...');
this.query(`
CREATE TABLE domains (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
dns_provider TEXT,
cloudflare_zone_id TEXT,
is_obsolete INTEGER NOT NULL DEFAULT 0,
default_wildcard INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`
CREATE TABLE certificates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
this.query(`
CREATE TABLE cert_requirements (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain_id INTEGER NOT NULL,
subdomain TEXT NOT NULL,
certificate_id INTEGER,
status TEXT NOT NULL DEFAULT 'pending',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE,
FOREIGN KEY (certificate_id) REFERENCES certificates(id) ON DELETE SET NULL
)
`);
interface OldSslCert {
id?: number;
domain?: string;
cert_path?: string;
key_path?: string;
full_chain_path?: string;
expiry_date?: number;
issuer?: string;
created_at?: number;
updated_at?: number;
[key: number]: unknown;
}
const existingCerts = this.query<OldSslCert>('SELECT * FROM ssl_certificates');
const now = Date.now();
const domainMap = new Map<string, number>();
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
if (!domainMap.has(domain)) {
this.query(
'INSERT INTO domains (domain, dns_provider, is_obsolete, default_wildcard, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)',
[domain, null, 0, 1, now, now]
);
const result = this.query<{ id?: number; [key: number]: unknown }>('SELECT last_insert_rowid() as id');
const domainId = result[0].id ?? (result[0] as Record<number, unknown>)[0];
domainMap.set(domain, Number(domainId));
}
}
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
const domainId = domainMap.get(domain);
this.query(
`INSERT INTO certificates (
domain_id, cert_domain, is_wildcard, cert_path, key_path, full_chain_path,
expiry_date, issuer, is_valid, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[
domainId,
domain,
0,
String(cert.cert_path ?? (cert as Record<number, unknown>)[2]),
String(cert.key_path ?? (cert as Record<number, unknown>)[3]),
String(cert.full_chain_path ?? (cert as Record<number, unknown>)[4]),
Number(cert.expiry_date ?? (cert as Record<number, unknown>)[5]),
String(cert.issuer ?? (cert as Record<number, unknown>)[6]),
1,
Number(cert.created_at ?? (cert as Record<number, unknown>)[7]),
Number(cert.updated_at ?? (cert as Record<number, unknown>)[8])
]
);
}
this.query('DROP TABLE ssl_certificates');
this.query('CREATE INDEX IF NOT EXISTS idx_domains_cloudflare_zone ON domains(cloudflare_zone_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
this.query('CREATE INDEX IF NOT EXISTS idx_cert_requirements_service ON cert_requirements(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_cert_requirements_domain ON cert_requirements(domain_id)');
this.setMigrationVersion(3);
logger.success('Migration 3 completed: Domain management tables created');
}
// Migration 4: Add Onebox Registry support columns
const version4 = this.getMigrationVersion();
if (version4 < 4) {
logger.info('Running migration 4: Adding Onebox Registry columns to services table...');
this.query(`ALTER TABLE services ADD COLUMN use_onebox_registry INTEGER DEFAULT 0`);
this.query(`ALTER TABLE services ADD COLUMN registry_repository TEXT`);
this.query(`ALTER TABLE services ADD COLUMN registry_token TEXT`);
this.query(`ALTER TABLE services ADD COLUMN registry_image_tag TEXT DEFAULT 'latest'`);
this.query(`ALTER TABLE services ADD COLUMN auto_update_on_push INTEGER DEFAULT 0`);
this.query(`ALTER TABLE services ADD COLUMN image_digest TEXT`);
this.setMigrationVersion(4);
logger.success('Migration 4 completed: Onebox Registry columns added to services table');
}
// Migration 5: Registry tokens table
const version5 = this.getMigrationVersion();
if (version5 < 5) {
logger.info('Running migration 5: Creating registry_tokens table...');
this.query(`
CREATE TABLE registry_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
token_hash TEXT NOT NULL UNIQUE,
token_type TEXT NOT NULL,
scope TEXT NOT NULL,
expires_at REAL,
created_at REAL NOT NULL,
last_used_at REAL,
created_by TEXT NOT NULL
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_registry_tokens_type ON registry_tokens(token_type)');
this.query('CREATE INDEX IF NOT EXISTS idx_registry_tokens_hash ON registry_tokens(token_hash)');
this.setMigrationVersion(5);
logger.success('Migration 5 completed: Registry tokens table created');
}
// Migration 6: Drop registry_token column from services table
const version6 = this.getMigrationVersion();
if (version6 < 6) {
logger.info('Running migration 6: Dropping registry_token column from services table...');
this.query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
use_onebox_registry INTEGER DEFAULT 0,
registry_repository TEXT,
registry_image_tag TEXT DEFAULT 'latest',
auto_update_on_push INTEGER DEFAULT 0,
image_digest TEXT
)
`);
this.query(`
INSERT INTO services_new (
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
)
SELECT
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
FROM services
`);
this.query('DROP TABLE services');
this.query('ALTER TABLE services_new RENAME TO services');
this.query('CREATE INDEX IF NOT EXISTS idx_services_name ON services(name)');
this.query('CREATE INDEX IF NOT EXISTS idx_services_status ON services(status)');
this.setMigrationVersion(6);
logger.success('Migration 6 completed: registry_token column dropped from services table');
}
// Migration 7: Platform services tables
const version7 = this.getMigrationVersion();
if (version7 < 7) {
logger.info('Running migration 7: Creating platform services tables...');
this.query(`
CREATE TABLE platform_services (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'stopped',
container_id TEXT,
config TEXT NOT NULL DEFAULT '{}',
admin_credentials_encrypted TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`
CREATE TABLE platform_resources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_service_id INTEGER NOT NULL,
service_id INTEGER NOT NULL,
resource_type TEXT NOT NULL,
resource_name TEXT NOT NULL,
credentials_encrypted TEXT NOT NULL,
created_at REAL NOT NULL,
FOREIGN KEY (platform_service_id) REFERENCES platform_services(id) ON DELETE CASCADE,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`ALTER TABLE services ADD COLUMN platform_requirements TEXT DEFAULT '{}'`);
this.query('CREATE INDEX IF NOT EXISTS idx_platform_services_type ON platform_services(type)');
this.query('CREATE INDEX IF NOT EXISTS idx_platform_resources_service ON platform_resources(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_platform_resources_platform ON platform_resources(platform_service_id)');
this.setMigrationVersion(7);
logger.success('Migration 7 completed: Platform services tables created');
}
// Migration 8: Convert certificates table to store PEM content
const version8 = this.getMigrationVersion();
if (version8 < 8) {
logger.info('Running migration 8: Converting certificates table to store PEM content...');
this.query(`
CREATE TABLE certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_pem TEXT NOT NULL DEFAULT '',
key_pem TEXT NOT NULL DEFAULT '',
fullchain_pem TEXT NOT NULL DEFAULT '',
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
this.query(`
INSERT INTO certificates_new (id, domain_id, cert_domain, is_wildcard, cert_pem, key_pem, fullchain_pem, expiry_date, issuer, is_valid, created_at, updated_at)
SELECT id, domain_id, cert_domain, is_wildcard, '', '', '', expiry_date, issuer, 0, created_at, updated_at FROM certificates
`);
this.query('DROP TABLE certificates');
this.query('ALTER TABLE certificates_new RENAME TO certificates');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
this.setMigrationVersion(8);
logger.success('Migration 8 completed: Certificates table now stores PEM content');
}
// Migration 9: Backup system tables
const version9 = this.getMigrationVersion();
if (version9 < 9) {
logger.info('Running migration 9: Creating backup system tables...');
// Add include_image_in_backup column to services table
this.query(`ALTER TABLE services ADD COLUMN include_image_in_backup INTEGER DEFAULT 1`);
// Create backups table
this.query(`
CREATE TABLE backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
this.setMigrationVersion(9);
logger.success('Migration 9 completed: Backup system tables created');
}
// Migration 10: Backup schedules table and extend backups table
const version10 = this.getMigrationVersion();
if (version10 < 10) {
logger.info('Running migration 10: Creating backup schedules table...');
// Create backup_schedules table
this.query(`
CREATE TABLE backup_schedules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
// Extend backups table with retention_tier and schedule_id columns
this.query('ALTER TABLE backups ADD COLUMN retention_tier TEXT');
this.query('ALTER TABLE backups ADD COLUMN schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL');
this.setMigrationVersion(10);
logger.success('Migration 10 completed: Backup schedules table created');
}
// Migration 11: Add scope columns for global/pattern backup schedules
const version11 = this.getMigrationVersion();
if (version11 < 11) {
logger.info('Running migration 11: Adding scope columns to backup_schedules...');
// Recreate backup_schedules table with nullable service_id/service_name and new scope columns
this.query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Copy existing schedules (all are service-specific)
this.query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
)
SELECT
id, 'service', NULL, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
FROM backup_schedules
`);
this.query('DROP TABLE backup_schedules');
this.query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)');
this.setMigrationVersion(11);
logger.success('Migration 11 completed: Scope columns added to backup_schedules');
}
// Migration 12: GFS retention policy - replace retention_tier with per-tier retention counts
const version12 = this.getMigrationVersion();
if (version12 < 12) {
logger.info('Running migration 12: Updating backup system for GFS retention policy...');
// Recreate backup_schedules table with new retention columns
this.query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_hourly INTEGER NOT NULL DEFAULT 0,
retention_daily INTEGER NOT NULL DEFAULT 7,
retention_weekly INTEGER NOT NULL DEFAULT 4,
retention_monthly INTEGER NOT NULL DEFAULT 12,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Migrate existing data - convert old retention_tier to new format
// daily -> D:7, weekly -> W:4, monthly -> M:12, yearly -> M:12 (yearly becomes long monthly retention)
this.query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_hourly, retention_daily, retention_weekly, retention_monthly,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
)
SELECT
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
0, -- retention_hourly
CASE WHEN retention_tier = 'daily' THEN 7 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly') THEN 4 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly', 'monthly') THEN 12
WHEN retention_tier = 'yearly' THEN 24 ELSE 12 END,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
FROM backup_schedules
`);
this.query('DROP TABLE backup_schedules');
this.query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)');
// Recreate backups table without retention_tier column
this.query(`
CREATE TABLE backups_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`
INSERT INTO backups_new (
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
)
SELECT
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
FROM backups
`);
this.query('DROP TABLE backups');
this.query('ALTER TABLE backups_new RENAME TO backups');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_schedule ON backups(schedule_id)');
this.setMigrationVersion(12);
logger.success('Migration 12 completed: GFS retention policy schema updated');
}
} catch (error) {
logger.error(`Migration failed: ${getErrorMessage(error)}`);
if (error instanceof Error && error.stack) {
logger.error(`Stack: ${error.stack}`);
}
throw error;
}
}
/**
* Get current migration version
*/
private getMigrationVersion(): number {
if (!this.db) throw new Error('Database not initialized');
try {
const result = this.query<{ version?: number | null; [key: number]: unknown }>('SELECT MAX(version) as version FROM migrations');
if (result.length === 0) return 0;
const versionValue = result[0].version ?? (result[0] as Record<number, unknown>)[0];
return versionValue !== null && versionValue !== undefined ? Number(versionValue) : 0;
} catch (error) {
logger.warn(`Error getting migration version: ${getErrorMessage(error)}, defaulting to 0`);
return 0;
}
}
/**
* Set migration version
*/
private setMigrationVersion(version: number): void {
if (!this.db) throw new Error('Database not initialized');
this.query('INSERT INTO migrations (version, applied_at) VALUES (?, ?)', [
version,
Date.now(),
]);
logger.debug(`Migration version set to ${version}`);
}
/** /**
* Close database connection * Close database connection
*/ */
@@ -1323,6 +607,10 @@ export class OneboxDatabase {
return this.backupRepo.getBySchedule(scheduleId); return this.backupRepo.getBySchedule(scheduleId);
} }
getBackupBySnapshotId(snapshotId: string): IBackup | null {
return this.backupRepo.getBySnapshotId(snapshotId);
}
// ============ Backup Schedules (delegated to repository) ============ // ============ Backup Schedules (delegated to repository) ============
createBackupSchedule(schedule: Omit<IBackupSchedule, 'id'>): IBackupSchedule { createBackupSchedule(schedule: Omit<IBackupSchedule, 'id'>): IBackupSchedule {

View File

@@ -0,0 +1,22 @@
/**
* Abstract base class for database migrations.
* All migrations must extend this class and implement the abstract members.
*/
import type { TQueryFunction } from '../types.ts';
export abstract class BaseMigration {
/** The migration version number (must be unique and sequential) */
abstract readonly version: number;
/** A short description of what this migration does */
abstract readonly description: string;
/** Execute the migration's SQL statements */
abstract up(query: TQueryFunction): void;
/** Returns a human-readable name for logging */
getName(): string {
return `Migration ${this.version}: ${this.description}`;
}
}

View File

@@ -0,0 +1,2 @@
export { BaseMigration } from './base-migration.ts';
export { MigrationRunner } from './migration-runner.ts';

View File

@@ -0,0 +1,12 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration001Initial extends BaseMigration {
readonly version = 1;
readonly description = 'Initial schema';
up(_query: TQueryFunction): void {
// Initial schema is created by createTables() in the database class.
// This migration just marks the initial version.
}
}

View File

@@ -0,0 +1,170 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration002TimestampsToReal extends BaseMigration {
readonly version = 2;
readonly description = 'Convert timestamp columns from INTEGER to REAL';
up(query: TQueryFunction): void {
// SSL certificates
query(`
CREATE TABLE ssl_certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO ssl_certificates_new SELECT * FROM ssl_certificates`);
query(`DROP TABLE ssl_certificates`);
query(`ALTER TABLE ssl_certificates_new RENAME TO ssl_certificates`);
// Services
query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT NOT NULL,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL DEFAULT 'stopped',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO services_new SELECT * FROM services`);
query(`DROP TABLE services`);
query(`ALTER TABLE services_new RENAME TO services`);
// Registries
query(`
CREATE TABLE registries_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
url TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
password_encrypted TEXT NOT NULL,
created_at REAL NOT NULL
)
`);
query(`INSERT INTO registries_new SELECT * FROM registries`);
query(`DROP TABLE registries`);
query(`ALTER TABLE registries_new RENAME TO registries`);
// Nginx configs
query(`
CREATE TABLE nginx_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain TEXT NOT NULL,
port INTEGER NOT NULL,
ssl_enabled INTEGER NOT NULL DEFAULT 0,
config_template TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO nginx_configs_new SELECT * FROM nginx_configs`);
query(`DROP TABLE nginx_configs`);
query(`ALTER TABLE nginx_configs_new RENAME TO nginx_configs`);
// DNS records
query(`
CREATE TABLE dns_records_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
value TEXT NOT NULL,
cloudflare_id TEXT,
zone_id TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO dns_records_new SELECT * FROM dns_records`);
query(`DROP TABLE dns_records`);
query(`ALTER TABLE dns_records_new RENAME TO dns_records`);
// Metrics
query(`
CREATE TABLE metrics_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
cpu_percent REAL NOT NULL,
memory_used INTEGER NOT NULL,
memory_limit INTEGER NOT NULL,
network_rx_bytes INTEGER NOT NULL,
network_tx_bytes INTEGER NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO metrics_new SELECT * FROM metrics`);
query(`DROP TABLE metrics`);
query(`ALTER TABLE metrics_new RENAME TO metrics`);
query(`CREATE INDEX IF NOT EXISTS idx_metrics_service_timestamp ON metrics(service_id, timestamp DESC)`);
// Logs
query(`
CREATE TABLE logs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
message TEXT NOT NULL,
level TEXT NOT NULL,
source TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO logs_new SELECT * FROM logs`);
query(`DROP TABLE logs`);
query(`ALTER TABLE logs_new RENAME TO logs`);
query(`CREATE INDEX IF NOT EXISTS idx_logs_service_timestamp ON logs(service_id, timestamp DESC)`);
// Users
query(`
CREATE TABLE users_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
role TEXT NOT NULL DEFAULT 'user',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO users_new SELECT * FROM users`);
query(`DROP TABLE users`);
query(`ALTER TABLE users_new RENAME TO users`);
// Settings
query(`
CREATE TABLE settings_new (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO settings_new SELECT * FROM settings`);
query(`DROP TABLE settings`);
query(`ALTER TABLE settings_new RENAME TO settings`);
// Migrations table itself
query(`
CREATE TABLE migrations_new (
version INTEGER PRIMARY KEY,
applied_at REAL NOT NULL
)
`);
query(`INSERT INTO migrations_new SELECT * FROM migrations`);
query(`DROP TABLE migrations`);
query(`ALTER TABLE migrations_new RENAME TO migrations`);
}
}

View File

@@ -0,0 +1,125 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration003DomainManagement extends BaseMigration {
readonly version = 3;
readonly description = 'Domain management tables';
up(query: TQueryFunction): void {
query(`
CREATE TABLE domains (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
dns_provider TEXT,
cloudflare_zone_id TEXT,
is_obsolete INTEGER NOT NULL DEFAULT 0,
default_wildcard INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`
CREATE TABLE certificates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
query(`
CREATE TABLE cert_requirements (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain_id INTEGER NOT NULL,
subdomain TEXT NOT NULL,
certificate_id INTEGER,
status TEXT NOT NULL DEFAULT 'pending',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE,
FOREIGN KEY (certificate_id) REFERENCES certificates(id) ON DELETE SET NULL
)
`);
// Migrate data from old ssl_certificates table
interface OldSslCert {
id?: number;
domain?: string;
cert_path?: string;
key_path?: string;
full_chain_path?: string;
expiry_date?: number;
issuer?: string;
created_at?: number;
updated_at?: number;
[key: number]: unknown;
}
const existingCerts = query<OldSslCert>('SELECT * FROM ssl_certificates');
const now = Date.now();
const domainMap = new Map<string, number>();
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
if (!domainMap.has(domain)) {
query(
'INSERT INTO domains (domain, dns_provider, is_obsolete, default_wildcard, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)',
[domain, null, 0, 1, now, now],
);
const result = query<{ id?: number; [key: number]: unknown }>(
'SELECT last_insert_rowid() as id',
);
const domainId = result[0].id ?? (result[0] as Record<number, unknown>)[0];
domainMap.set(domain, Number(domainId));
}
}
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
const domainId = domainMap.get(domain);
query(
`INSERT INTO certificates (
domain_id, cert_domain, is_wildcard, cert_path, key_path, full_chain_path,
expiry_date, issuer, is_valid, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[
domainId,
domain,
0,
String(cert.cert_path ?? (cert as Record<number, unknown>)[2]),
String(cert.key_path ?? (cert as Record<number, unknown>)[3]),
String(cert.full_chain_path ?? (cert as Record<number, unknown>)[4]),
Number(cert.expiry_date ?? (cert as Record<number, unknown>)[5]),
String(cert.issuer ?? (cert as Record<number, unknown>)[6]),
1,
Number(cert.created_at ?? (cert as Record<number, unknown>)[7]),
Number(cert.updated_at ?? (cert as Record<number, unknown>)[8]),
],
);
}
query('DROP TABLE ssl_certificates');
query('CREATE INDEX IF NOT EXISTS idx_domains_cloudflare_zone ON domains(cloudflare_zone_id)');
query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
query(
'CREATE INDEX IF NOT EXISTS idx_cert_requirements_service ON cert_requirements(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_cert_requirements_domain ON cert_requirements(domain_id)',
);
}
}

View File

@@ -0,0 +1,16 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration004RegistryColumns extends BaseMigration {
readonly version = 4;
readonly description = 'Add Onebox Registry columns to services table';
up(query: TQueryFunction): void {
query(`ALTER TABLE services ADD COLUMN use_onebox_registry INTEGER DEFAULT 0`);
query(`ALTER TABLE services ADD COLUMN registry_repository TEXT`);
query(`ALTER TABLE services ADD COLUMN registry_token TEXT`);
query(`ALTER TABLE services ADD COLUMN registry_image_tag TEXT DEFAULT 'latest'`);
query(`ALTER TABLE services ADD COLUMN auto_update_on_push INTEGER DEFAULT 0`);
query(`ALTER TABLE services ADD COLUMN image_digest TEXT`);
}
}

View File

@@ -0,0 +1,30 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration005RegistryTokens extends BaseMigration {
readonly version = 5;
readonly description = 'Registry tokens table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE registry_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
token_hash TEXT NOT NULL UNIQUE,
token_type TEXT NOT NULL,
scope TEXT NOT NULL,
expires_at REAL,
created_at REAL NOT NULL,
last_used_at REAL,
created_by TEXT NOT NULL
)
`);
query(
'CREATE INDEX IF NOT EXISTS idx_registry_tokens_type ON registry_tokens(token_type)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_registry_tokens_hash ON registry_tokens(token_hash)',
);
}
}

View File

@@ -0,0 +1,48 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration006DropRegistryToken extends BaseMigration {
readonly version = 6;
readonly description = 'Drop registry_token column from services table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
use_onebox_registry INTEGER DEFAULT 0,
registry_repository TEXT,
registry_image_tag TEXT DEFAULT 'latest',
auto_update_on_push INTEGER DEFAULT 0,
image_digest TEXT
)
`);
query(`
INSERT INTO services_new (
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
)
SELECT
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
FROM services
`);
query('DROP TABLE services');
query('ALTER TABLE services_new RENAME TO services');
query('CREATE INDEX IF NOT EXISTS idx_services_name ON services(name)');
query('CREATE INDEX IF NOT EXISTS idx_services_status ON services(status)');
}
}

View File

@@ -0,0 +1,49 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration007PlatformServices extends BaseMigration {
readonly version = 7;
readonly description = 'Platform services tables';
up(query: TQueryFunction): void {
query(`
CREATE TABLE platform_services (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'stopped',
container_id TEXT,
config TEXT NOT NULL DEFAULT '{}',
admin_credentials_encrypted TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`
CREATE TABLE platform_resources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_service_id INTEGER NOT NULL,
service_id INTEGER NOT NULL,
resource_type TEXT NOT NULL,
resource_name TEXT NOT NULL,
credentials_encrypted TEXT NOT NULL,
created_at REAL NOT NULL,
FOREIGN KEY (platform_service_id) REFERENCES platform_services(id) ON DELETE CASCADE,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`ALTER TABLE services ADD COLUMN platform_requirements TEXT DEFAULT '{}'`);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_services_type ON platform_services(type)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_resources_service ON platform_resources(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_resources_platform ON platform_resources(platform_service_id)',
);
}
}

View File

@@ -0,0 +1,41 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration008CertPemContent extends BaseMigration {
readonly version = 8;
readonly description = 'Convert certificates table to store PEM content';
up(query: TQueryFunction): void {
query(`
CREATE TABLE certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_pem TEXT NOT NULL DEFAULT '',
key_pem TEXT NOT NULL DEFAULT '',
fullchain_pem TEXT NOT NULL DEFAULT '',
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO certificates_new (id, domain_id, cert_domain, is_wildcard, cert_pem, key_pem, fullchain_pem, expiry_date, issuer, is_valid, created_at, updated_at)
SELECT id, domain_id, cert_domain, is_wildcard, '', '', '', expiry_date, issuer, 0, created_at, updated_at FROM certificates
`);
query('DROP TABLE certificates');
query('ALTER TABLE certificates_new RENAME TO certificates');
query(
'CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)',
);
}
}

View File

@@ -0,0 +1,29 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration009BackupSystem extends BaseMigration {
readonly version = 9;
readonly description = 'Backup system tables';
up(query: TQueryFunction): void {
query(`ALTER TABLE services ADD COLUMN include_image_in_backup INTEGER DEFAULT 1`);
query(`
CREATE TABLE backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
}
}

View File

@@ -0,0 +1,39 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration010BackupSchedules extends BaseMigration {
readonly version = 10;
readonly description = 'Backup schedules table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE backup_schedules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query('ALTER TABLE backups ADD COLUMN retention_tier TEXT');
query(
'ALTER TABLE backups ADD COLUMN schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL',
);
}
}

View File

@@ -0,0 +1,54 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration011ScopeColumns extends BaseMigration {
readonly version = 11;
readonly description = 'Add scope columns to backup_schedules';
up(query: TQueryFunction): void {
query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
)
SELECT
id, 'service', NULL, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
FROM backup_schedules
`);
query('DROP TABLE backup_schedules');
query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)',
);
}
}

View File

@@ -0,0 +1,97 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration012GfsRetention extends BaseMigration {
readonly version = 12;
readonly description = 'GFS retention policy schema';
up(query: TQueryFunction): void {
// Recreate backup_schedules with GFS retention columns
query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_hourly INTEGER NOT NULL DEFAULT 0,
retention_daily INTEGER NOT NULL DEFAULT 7,
retention_weekly INTEGER NOT NULL DEFAULT 4,
retention_monthly INTEGER NOT NULL DEFAULT 12,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Migrate existing data - convert old retention_tier to new format
query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_hourly, retention_daily, retention_weekly, retention_monthly,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
)
SELECT
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
0,
CASE WHEN retention_tier = 'daily' THEN 7 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly') THEN 4 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly', 'monthly') THEN 12
WHEN retention_tier = 'yearly' THEN 24 ELSE 12 END,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
FROM backup_schedules
`);
query('DROP TABLE backup_schedules');
query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)',
);
// Recreate backups table without retention_tier column
query(`
CREATE TABLE backups_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO backups_new (
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
)
SELECT
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
FROM backups
`);
query('DROP TABLE backups');
query('ALTER TABLE backups_new RENAME TO backups');
query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
query('CREATE INDEX IF NOT EXISTS idx_backups_schedule ON backups(schedule_id)');
}
}

View File

@@ -0,0 +1,12 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration013AppTemplateVersion extends BaseMigration {
readonly version = 13;
readonly description = 'Add app template tracking columns to services';
up(query: TQueryFunction): void {
query('ALTER TABLE services ADD COLUMN app_template_id TEXT');
query('ALTER TABLE services ADD COLUMN app_template_version TEXT');
}
}

View File

@@ -0,0 +1,13 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration014ContainerArchive extends BaseMigration {
readonly version = 14;
readonly description = 'Add containerarchive snapshot tracking to backups';
up(query: TQueryFunction): void {
query('ALTER TABLE backups ADD COLUMN snapshot_id TEXT');
query('ALTER TABLE backups ADD COLUMN stored_size_bytes INTEGER DEFAULT 0');
query('CREATE INDEX IF NOT EXISTS idx_backups_snapshot ON backups(snapshot_id)');
}
}

View File

@@ -0,0 +1,104 @@
/**
* Migration runner - discovers, orders, and executes database migrations.
* Mirrors the pattern from @serve.zone/nupst.
*/
import type { TQueryFunction } from '../types.ts';
import { logger } from '../../logging.ts';
import { getErrorMessage } from '../../utils/error.ts';
import { Migration001Initial } from './migration-001-initial.ts';
import { Migration002TimestampsToReal } from './migration-002-timestamps-to-real.ts';
import { Migration003DomainManagement } from './migration-003-domain-management.ts';
import { Migration004RegistryColumns } from './migration-004-registry-columns.ts';
import { Migration005RegistryTokens } from './migration-005-registry-tokens.ts';
import { Migration006DropRegistryToken } from './migration-006-drop-registry-token.ts';
import { Migration007PlatformServices } from './migration-007-platform-services.ts';
import { Migration008CertPemContent } from './migration-008-cert-pem-content.ts';
import { Migration009BackupSystem } from './migration-009-backup-system.ts';
import { Migration010BackupSchedules } from './migration-010-backup-schedules.ts';
import { Migration011ScopeColumns } from './migration-011-scope-columns.ts';
import { Migration012GfsRetention } from './migration-012-gfs-retention.ts';
import { Migration013AppTemplateVersion } from './migration-013-app-template-version.ts';
import { Migration014ContainerArchive } from './migration-014-containerarchive.ts';
import type { BaseMigration } from './base-migration.ts';
export class MigrationRunner {
private query: TQueryFunction;
private migrations: BaseMigration[];
constructor(query: TQueryFunction) {
this.query = query;
// Register all migrations in order
this.migrations = [
new Migration001Initial(),
new Migration002TimestampsToReal(),
new Migration003DomainManagement(),
new Migration004RegistryColumns(),
new Migration005RegistryTokens(),
new Migration006DropRegistryToken(),
new Migration007PlatformServices(),
new Migration008CertPemContent(),
new Migration009BackupSystem(),
new Migration010BackupSchedules(),
new Migration011ScopeColumns(),
new Migration012GfsRetention(),
new Migration013AppTemplateVersion(),
new Migration014ContainerArchive(),
].sort((a, b) => a.version - b.version);
}
/** Run all pending migrations */
run(): void {
try {
const currentVersion = this.getMigrationVersion();
logger.info(`Current database migration version: ${currentVersion}`);
let applied = 0;
for (const migration of this.migrations) {
if (migration.version <= currentVersion) continue;
logger.info(`Running ${migration.getName()}...`);
migration.up(this.query);
this.setMigrationVersion(migration.version);
logger.success(`${migration.getName()} completed`);
applied++;
}
if (applied > 0) {
logger.success(`Applied ${applied} migration(s)`);
}
} catch (error) {
logger.error(`Migration failed: ${getErrorMessage(error)}`);
if (error instanceof Error && error.stack) {
logger.error(`Stack: ${error.stack}`);
}
throw error;
}
}
/** Get current migration version from the migrations table */
private getMigrationVersion(): number {
try {
const result = this.query<{ version?: number | null; [key: number]: unknown }>(
'SELECT MAX(version) as version FROM migrations',
);
if (result.length === 0) return 0;
const versionValue = result[0].version ?? (result[0] as Record<number, unknown>)[0];
return versionValue !== null && versionValue !== undefined ? Number(versionValue) : 0;
} catch {
// Table might not exist yet on fresh databases
return 0;
}
}
/** Record a migration version as applied */
private setMigrationVersion(version: number): void {
this.query('INSERT INTO migrations (version, applied_at) VALUES (?, ?)', [
version,
Date.now(),
]);
}
}

View File

@@ -20,8 +20,9 @@ export class BackupRepository extends BaseRepository {
this.query( this.query(
`INSERT INTO backups ( `INSERT INTO backups (
service_id, service_name, filename, size_bytes, created_at, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id includes_image, platform_resources, checksum, schedule_id,
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, snapshot_id, stored_size_bytes
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[ [
backup.serviceId, backup.serviceId,
backup.serviceName, backup.serviceName,
@@ -32,6 +33,8 @@ export class BackupRepository extends BaseRepository {
JSON.stringify(backup.platformResources), JSON.stringify(backup.platformResources),
backup.checksum, backup.checksum,
backup.scheduleId ?? null, backup.scheduleId ?? null,
backup.snapshotId ?? null,
backup.storedSizeBytes ?? 0,
] ]
); );
@@ -78,6 +81,14 @@ export class BackupRepository extends BaseRepository {
return rows.map((row) => this.rowToBackup(row)); return rows.map((row) => this.rowToBackup(row));
} }
getBySnapshotId(snapshotId: string): IBackup | null {
const rows = this.query(
'SELECT * FROM backups WHERE snapshot_id = ?',
[snapshotId]
);
return rows.length > 0 ? this.rowToBackup(rows[0]) : null;
}
private rowToBackup(row: any): IBackup { private rowToBackup(row: any): IBackup {
let platformResources: TPlatformServiceType[] = []; let platformResources: TPlatformServiceType[] = [];
const platformResourcesRaw = row.platform_resources; const platformResourcesRaw = row.platform_resources;
@@ -94,7 +105,9 @@ export class BackupRepository extends BaseRepository {
serviceId: Number(row.service_id), serviceId: Number(row.service_id),
serviceName: String(row.service_name), serviceName: String(row.service_name),
filename: String(row.filename), filename: String(row.filename),
snapshotId: row.snapshot_id ? String(row.snapshot_id) : undefined,
sizeBytes: Number(row.size_bytes), sizeBytes: Number(row.size_bytes),
storedSizeBytes: row.stored_size_bytes ? Number(row.stored_size_bytes) : undefined,
createdAt: Number(row.created_at), createdAt: Number(row.created_at),
includesImage: Boolean(row.includes_image), includesImage: Boolean(row.includes_image),
platformResources, platformResources,

View File

@@ -17,8 +17,9 @@ export class ServiceRepository extends BaseRepository {
name, image, registry, env_vars, port, domain, container_id, status, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, created_at, updated_at,
use_onebox_registry, registry_repository, registry_image_tag, use_onebox_registry, registry_repository, registry_image_tag,
auto_update_on_push, image_digest, platform_requirements auto_update_on_push, image_digest, platform_requirements,
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, app_template_id, app_template_version
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[ [
service.name, service.name,
service.image, service.image,
@@ -36,6 +37,8 @@ export class ServiceRepository extends BaseRepository {
service.autoUpdateOnPush ? 1 : 0, service.autoUpdateOnPush ? 1 : 0,
service.imageDigest || null, service.imageDigest || null,
JSON.stringify(service.platformRequirements || {}), JSON.stringify(service.platformRequirements || {}),
service.appTemplateId || null,
service.appTemplateVersion || null,
] ]
); );
@@ -123,6 +126,14 @@ export class ServiceRepository extends BaseRepository {
fields.push('include_image_in_backup = ?'); fields.push('include_image_in_backup = ?');
values.push(updates.includeImageInBackup ? 1 : 0); values.push(updates.includeImageInBackup ? 1 : 0);
} }
if (updates.appTemplateId !== undefined) {
fields.push('app_template_id = ?');
values.push(updates.appTemplateId);
}
if (updates.appTemplateVersion !== undefined) {
fields.push('app_template_version = ?');
values.push(updates.appTemplateVersion);
}
fields.push('updated_at = ?'); fields.push('updated_at = ?');
values.push(Date.now()); values.push(Date.now());
@@ -179,6 +190,8 @@ export class ServiceRepository extends BaseRepository {
includeImageInBackup: row.include_image_in_backup !== undefined includeImageInBackup: row.include_image_in_backup !== undefined
? Boolean(row.include_image_in_backup) ? Boolean(row.include_image_in_backup)
: true, // Default to true : true, // Default to true
appTemplateId: row.app_template_id ? String(row.app_template_id) : undefined,
appTemplateVersion: row.app_template_version ? String(row.app_template_version) : undefined,
}; };
} }
} }

View File

@@ -12,6 +12,7 @@ export { OneboxReverseProxy } from './classes/reverseproxy.ts';
export { OneboxDnsManager } from './classes/dns.ts'; export { OneboxDnsManager } from './classes/dns.ts';
export { OneboxSslManager } from './classes/ssl.ts'; export { OneboxSslManager } from './classes/ssl.ts';
export { OneboxDaemon } from './classes/daemon.ts'; export { OneboxDaemon } from './classes/daemon.ts';
export { OneboxSystemd } from './classes/systemd.ts';
export { OneboxHttpServer } from './classes/httpserver.ts'; export { OneboxHttpServer } from './classes/httpserver.ts';
export { OneboxApiClient } from './classes/apiclient.ts'; export { OneboxApiClient } from './classes/apiclient.ts';

View File

@@ -0,0 +1,80 @@
import * as plugins from '../plugins.ts';
import { logger } from '../logging.ts';
import type { Onebox } from '../classes/onebox.ts';
import * as handlers from './handlers/index.ts';
import { files as bundledFiles } from '../../ts_bundled/bundle.ts';
export class OpsServer {
public oneboxRef: Onebox;
public typedrouter = new plugins.typedrequest.TypedRouter();
public server!: plugins.typedserver.utilityservers.UtilityWebsiteServer;
// Handler instances
public adminHandler!: handlers.AdminHandler;
public statusHandler!: handlers.StatusHandler;
public servicesHandler!: handlers.ServicesHandler;
public platformHandler!: handlers.PlatformHandler;
public sslHandler!: handlers.SslHandler;
public domainsHandler!: handlers.DomainsHandler;
public dnsHandler!: handlers.DnsHandler;
public registryHandler!: handlers.RegistryHandler;
public networkHandler!: handlers.NetworkHandler;
public backupsHandler!: handlers.BackupsHandler;
public schedulesHandler!: handlers.SchedulesHandler;
public settingsHandler!: handlers.SettingsHandler;
public logsHandler!: handlers.LogsHandler;
public workspaceHandler!: handlers.WorkspaceHandler;
public appStoreHandler!: handlers.AppStoreHandler;
constructor(oneboxRef: Onebox) {
this.oneboxRef = oneboxRef;
}
public async start(port = 3000) {
this.server = new plugins.typedserver.utilityservers.UtilityWebsiteServer({
domain: 'localhost',
feedMetadata: undefined,
bundledContent: bundledFiles,
});
// Chain typedrouters: server -> opsServer -> individual handlers
this.server.typedrouter.addTypedRouter(this.typedrouter);
// Set up all handlers
await this.setupHandlers();
await this.server.start(port);
logger.success(`OpsServer started on http://localhost:${port}`);
}
private async setupHandlers(): Promise<void> {
// AdminHandler requires async initialization for JWT key generation
this.adminHandler = new handlers.AdminHandler(this);
await this.adminHandler.initialize();
// All other handlers self-register in their constructors
this.statusHandler = new handlers.StatusHandler(this);
this.servicesHandler = new handlers.ServicesHandler(this);
this.platformHandler = new handlers.PlatformHandler(this);
this.sslHandler = new handlers.SslHandler(this);
this.domainsHandler = new handlers.DomainsHandler(this);
this.dnsHandler = new handlers.DnsHandler(this);
this.registryHandler = new handlers.RegistryHandler(this);
this.networkHandler = new handlers.NetworkHandler(this);
this.backupsHandler = new handlers.BackupsHandler(this);
this.schedulesHandler = new handlers.SchedulesHandler(this);
this.settingsHandler = new handlers.SettingsHandler(this);
this.logsHandler = new handlers.LogsHandler(this);
this.workspaceHandler = new handlers.WorkspaceHandler(this);
this.appStoreHandler = new handlers.AppStoreHandler(this);
logger.success('OpsServer TypedRequest handlers initialized');
}
public async stop() {
if (this.server) {
await this.server.stop();
logger.success('OpsServer stopped');
}
}
}

View File

@@ -0,0 +1,175 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
export interface IJwtData {
userId: string;
status: 'loggedIn' | 'loggedOut';
expiresAt: number;
}
export class AdminHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
public smartjwtInstance!: plugins.smartjwt.SmartJwt<IJwtData>;
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
}
public async initialize(): Promise<void> {
this.smartjwtInstance = new plugins.smartjwt.SmartJwt();
await this.smartjwtInstance.init();
await this.smartjwtInstance.createNewKeyPair();
this.registerHandlers();
}
private registerHandlers(): void {
// Login
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_AdminLoginWithUsernameAndPassword>(
'adminLoginWithUsernameAndPassword',
async (dataArg) => {
try {
const user = this.opsServerRef.oneboxRef.database.getUserByUsername(dataArg.username);
if (!user) {
throw new plugins.typedrequest.TypedResponseError('Invalid credentials');
}
// Verify password (base64 comparison to match existing DB scheme)
const passwordHash = btoa(dataArg.password);
if (passwordHash !== user.passwordHash) {
throw new plugins.typedrequest.TypedResponseError('Invalid credentials');
}
const expiresAt = Date.now() + 24 * 3600 * 1000;
const userId = String(user.id || user.username);
const jwt = await this.smartjwtInstance.createJWT({
userId,
status: 'loggedIn',
expiresAt,
});
logger.info(`User logged in: ${user.username}`);
return {
identity: {
jwt,
userId,
username: user.username,
expiresAt,
role: user.role,
},
};
} catch (error) {
if (error instanceof plugins.typedrequest.TypedResponseError) throw error;
throw new plugins.typedrequest.TypedResponseError('Login failed');
}
},
),
);
// Logout
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_AdminLogout>(
'adminLogout',
async (_dataArg) => {
return { ok: true };
},
),
);
// Verify Identity
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_VerifyIdentity>(
'verifyIdentity',
async (dataArg) => {
if (!dataArg.identity?.jwt) {
return { valid: false };
}
try {
const jwtData = await this.smartjwtInstance.verifyJWTAndGetData(dataArg.identity.jwt);
if (jwtData.expiresAt < Date.now()) return { valid: false };
if (jwtData.status !== 'loggedIn') return { valid: false };
return {
valid: true,
identity: {
jwt: dataArg.identity.jwt,
userId: jwtData.userId,
username: dataArg.identity.username,
expiresAt: jwtData.expiresAt,
role: dataArg.identity.role,
},
};
} catch {
return { valid: false };
}
},
),
);
// Change Password
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_ChangePassword>(
'changePassword',
async (dataArg) => {
await this.requireValidIdentity(dataArg);
const user = this.opsServerRef.oneboxRef.database.getUserByUsername(dataArg.identity.username);
if (!user) {
throw new plugins.typedrequest.TypedResponseError('User not found');
}
const currentHash = btoa(dataArg.currentPassword);
if (currentHash !== user.passwordHash) {
throw new plugins.typedrequest.TypedResponseError('Current password is incorrect');
}
const newHash = btoa(dataArg.newPassword);
this.opsServerRef.oneboxRef.database.updateUserPassword(user.username, newHash);
logger.info(`Password changed for user: ${user.username}`);
return { ok: true };
},
),
);
}
private async requireValidIdentity(dataArg: { identity: interfaces.data.IIdentity }): Promise<void> {
const passed = await this.validIdentityGuard.exec({ identity: dataArg.identity });
if (!passed) {
throw new plugins.typedrequest.TypedResponseError('Valid identity required');
}
}
// Guard for valid identity
public validIdentityGuard = new plugins.smartguard.Guard<{
identity: interfaces.data.IIdentity;
}>(
async (dataArg) => {
if (!dataArg.identity?.jwt) return false;
try {
const jwtData = await this.smartjwtInstance.verifyJWTAndGetData(dataArg.identity.jwt);
if (jwtData.expiresAt < Date.now()) return false;
if (jwtData.status !== 'loggedIn') return false;
if (dataArg.identity.expiresAt !== jwtData.expiresAt) return false;
if (dataArg.identity.userId !== jwtData.userId) return false;
return true;
} catch {
return false;
}
},
{ failedHint: 'identity is not valid', name: 'validIdentityGuard' },
);
// Guard for admin identity
public adminIdentityGuard = new plugins.smartguard.Guard<{
identity: interfaces.data.IIdentity;
}>(
async (dataArg) => {
const isValid = await this.validIdentityGuard.exec(dataArg);
if (!isValid) return false;
return dataArg.identity.role === 'admin';
},
{ failedHint: 'user is not admin', name: 'adminIdentityGuard' },
);
}

View File

@@ -0,0 +1,104 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class AppStoreHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
// Get app templates (catalog)
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetAppTemplates>(
'getAppTemplates',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const apps = await this.opsServerRef.oneboxRef.appStore.getApps();
return { apps };
},
),
);
// Get app config for a specific version
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetAppConfig>(
'getAppConfig',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const config = await this.opsServerRef.oneboxRef.appStore.getAppVersionConfig(
dataArg.appId,
dataArg.version,
);
const appMeta = await this.opsServerRef.oneboxRef.appStore.getAppMeta(dataArg.appId);
return { config, appMeta };
},
),
);
// Get services with available upgrades
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetUpgradeableServices>(
'getUpgradeableServices',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const services = await this.opsServerRef.oneboxRef.appStore.getUpgradeableServices();
return { services };
},
),
);
// Upgrade a service to a new template version
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_UpgradeService>(
'upgradeService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const existingService = this.opsServerRef.oneboxRef.database.getServiceByName(dataArg.serviceName);
if (!existingService) {
throw new plugins.typedrequest.TypedResponseError(`Service not found: ${dataArg.serviceName}`);
}
if (!existingService.appTemplateId) {
throw new plugins.typedrequest.TypedResponseError('Service was not deployed from an app template');
}
if (!existingService.appTemplateVersion) {
throw new plugins.typedrequest.TypedResponseError('Service has no tracked template version');
}
logger.info(`Upgrading service '${dataArg.serviceName}' from v${existingService.appTemplateVersion} to v${dataArg.targetVersion}`);
// Execute migration
const migrationResult = await this.opsServerRef.oneboxRef.appStore.executeMigration(
existingService,
existingService.appTemplateVersion,
dataArg.targetVersion,
);
if (!migrationResult.success) {
throw new plugins.typedrequest.TypedResponseError(
`Migration failed: ${migrationResult.warnings.join('; ')}`,
);
}
// Apply the upgrade
const updatedService = await this.opsServerRef.oneboxRef.appStore.applyUpgrade(
dataArg.serviceName,
migrationResult,
dataArg.targetVersion,
);
return {
service: updatedService,
warnings: migrationResult.warnings,
};
},
),
);
}
}

View File

@@ -0,0 +1,93 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class BackupsHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetBackups>(
'getBackups',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const backups = this.opsServerRef.oneboxRef.backupManager.listBackups();
return { backups };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetBackup>(
'getBackup',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const backup = this.opsServerRef.oneboxRef.database.getBackupById(dataArg.backupId);
if (!backup) {
throw new plugins.typedrequest.TypedResponseError('Backup not found');
}
return { backup };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DeleteBackup>(
'deleteBackup',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.backupManager.deleteBackup(dataArg.backupId);
return { ok: true };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_RestoreBackup>(
'restoreBackup',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const rawResult = await this.opsServerRef.oneboxRef.backupManager.restoreBackup(
dataArg.backupId,
dataArg.options,
);
return {
result: {
service: {
name: rawResult.service.name,
status: rawResult.service.status,
},
platformResourcesRestored: rawResult.platformResourcesRestored,
warnings: rawResult.warnings,
},
};
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DownloadBackup>(
'downloadBackup',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const backup = this.opsServerRef.oneboxRef.database.getBackupById(dataArg.backupId);
if (!backup) {
throw new plugins.typedrequest.TypedResponseError('Backup not found');
}
// Return a download URL that the client can fetch directly
const filename = backup.filename || `${backup.serviceName}-${backup.createdAt}.tar.enc`;
return {
downloadUrl: `/api/backups/${dataArg.backupId}/download`,
filename,
};
},
),
);
}
}

View File

@@ -0,0 +1,65 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class DnsHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetDnsRecords>(
'getDnsRecords',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const records = this.opsServerRef.oneboxRef.dns.listDNSRecords();
return { records };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_CreateDnsRecord>(
'createDnsRecord',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.dns.addDNSRecord(dataArg.domain, dataArg.value);
const records = this.opsServerRef.oneboxRef.dns.listDNSRecords();
const record = records.find((r: any) => r.domain === dataArg.domain);
return { record: record! };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DeleteDnsRecord>(
'deleteDnsRecord',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.dns.removeDNSRecord(dataArg.domain);
return { ok: true };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_SyncDns>(
'syncDns',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
if (!this.opsServerRef.oneboxRef.dns.isConfigured()) {
throw new plugins.typedrequest.TypedResponseError('DNS manager not configured');
}
await this.opsServerRef.oneboxRef.dns.syncFromCloudflare();
const records = this.opsServerRef.oneboxRef.dns.listDNSRecords();
return { records };
},
),
);
}
}

View File

@@ -0,0 +1,101 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class DomainsHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private buildDomainViews(): interfaces.data.IDomainDetail[] {
const domains = this.opsServerRef.oneboxRef.database.getAllDomains();
const allServices = this.opsServerRef.oneboxRef.database.getAllServices();
return domains.map((domain: any) => {
const certificates = this.opsServerRef.oneboxRef.database.getCertificatesByDomain(domain.id!);
const requirements = this.opsServerRef.oneboxRef.database.getCertRequirementsByDomain(domain.id!);
const serviceCount = allServices.filter((service: any) => {
if (!service.domain) return false;
const baseDomain = service.domain.split('.').slice(-2).join('.');
return baseDomain === domain.domain;
}).length;
let certificateStatus: 'valid' | 'expiring-soon' | 'expired' | 'pending' | 'none' = 'none';
let daysRemaining: number | null = null;
const validCerts = certificates.filter((cert: any) => cert.isValid && cert.expiryDate > Date.now());
if (validCerts.length > 0) {
const latestCert = validCerts.reduce((latest: any, cert: any) =>
cert.expiryDate > latest.expiryDate ? cert : latest
);
daysRemaining = Math.floor((latestCert.expiryDate - Date.now()) / (24 * 60 * 60 * 1000));
certificateStatus = daysRemaining <= 30 ? 'expiring-soon' : 'valid';
} else if (certificates.some((cert: any) => !cert.isValid)) {
certificateStatus = 'expired';
} else if (requirements.some((req: any) => req.status === 'pending')) {
certificateStatus = 'pending';
}
return {
domain,
certificates,
requirements,
serviceCount,
certificateStatus,
daysRemaining,
};
});
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetDomains>(
'getDomains',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const domains = this.buildDomainViews();
return { domains };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetDomain>(
'getDomain',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const domain = this.opsServerRef.oneboxRef.database.getDomainByName(dataArg.domainName);
if (!domain) {
throw new plugins.typedrequest.TypedResponseError('Domain not found');
}
const views = this.buildDomainViews();
const domainView = views.find((v) => v.domain.domain === dataArg.domainName);
if (!domainView) {
throw new plugins.typedrequest.TypedResponseError('Domain not found');
}
return { domain: domainView };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_SyncDomains>(
'syncDomains',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
if (!this.opsServerRef.oneboxRef.cloudflareDomainSync) {
throw new plugins.typedrequest.TypedResponseError('Cloudflare domain sync not configured');
}
await this.opsServerRef.oneboxRef.cloudflareDomainSync.syncZones();
const domains = this.buildDomainViews();
return { domains };
},
),
);
}
}

View File

@@ -0,0 +1,15 @@
export * from './admin.handler.ts';
export * from './status.handler.ts';
export * from './services.handler.ts';
export * from './platform.handler.ts';
export * from './ssl.handler.ts';
export * from './domains.handler.ts';
export * from './dns.handler.ts';
export * from './registry.handler.ts';
export * from './network.handler.ts';
export * from './backups.handler.ts';
export * from './schedules.handler.ts';
export * from './settings.handler.ts';
export * from './logs.handler.ts';
export * from './workspace.handler.ts';
export * from './appstore.handler.ts';

View File

@@ -0,0 +1,219 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class LogsHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
// Service log stream
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceLogStream>(
'getServiceLogStream',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.database.getServiceByName(dataArg.serviceName);
if (!service) {
throw new plugins.typedrequest.TypedResponseError('Service not found');
}
const virtualStream = new plugins.typedrequest.VirtualStream<Uint8Array>();
const encoder = new TextEncoder();
// Get container and start streaming in background
(async () => {
try {
let container = await this.opsServerRef.oneboxRef.docker.getContainerById(service.containerID!);
if (!container) {
// Try finding by service label
const containers = await this.opsServerRef.oneboxRef.docker.listAllContainers();
const serviceContainer = containers.find((c: any) => {
const labels = c.Labels || {};
return labels['com.docker.swarm.service.id'] === service.containerID;
});
if (serviceContainer) {
container = await this.opsServerRef.oneboxRef.docker.getContainerById(serviceContainer.Id);
}
}
if (!container) {
virtualStream.sendData(encoder.encode(JSON.stringify({ error: 'Container not found' })));
return;
}
const logStream = await container.streamLogs({
stdout: true,
stderr: true,
timestamps: true,
tail: 100,
});
let buffer = new Uint8Array(0);
logStream.on('data', (chunk: Uint8Array) => {
// Append to buffer
const newBuffer = new Uint8Array(buffer.length + chunk.length);
newBuffer.set(buffer);
newBuffer.set(chunk, buffer.length);
buffer = newBuffer;
// Process Docker multiplexed frames
while (buffer.length >= 8) {
const frameSize = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
if (buffer.length < 8 + frameSize) break;
const frameData = buffer.slice(8, 8 + frameSize);
try {
virtualStream.sendData(frameData);
} catch {
logStream.destroy();
return;
}
buffer = buffer.slice(8 + frameSize);
}
});
logStream.on('error', (error: Error) => {
logger.error(`Log stream error for ${dataArg.serviceName}: ${error.message}`);
});
} catch (error) {
logger.error(`Failed to start log stream: ${error}`);
}
})();
return { logStream: virtualStream as any };
},
),
);
// Platform service log stream
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetPlatformServiceLogStream>(
'getPlatformServiceLogStream',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const platformService = this.opsServerRef.oneboxRef.database.getPlatformServiceByType(
dataArg.serviceType,
);
if (!platformService || !platformService.containerId) {
throw new plugins.typedrequest.TypedResponseError('Platform service has no container');
}
const virtualStream = new plugins.typedrequest.VirtualStream<Uint8Array>();
(async () => {
try {
const container = await this.opsServerRef.oneboxRef.docker.getContainerById(
platformService.containerId!,
);
if (!container) return;
const logStream = await container.streamLogs({
stdout: true,
stderr: true,
timestamps: true,
tail: 100,
});
let buffer = new Uint8Array(0);
logStream.on('data', (chunk: Uint8Array) => {
const newBuffer = new Uint8Array(buffer.length + chunk.length);
newBuffer.set(buffer);
newBuffer.set(chunk, buffer.length);
buffer = newBuffer;
while (buffer.length >= 8) {
const frameSize = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
if (buffer.length < 8 + frameSize) break;
const frameData = buffer.slice(8, 8 + frameSize);
try {
virtualStream.sendData(frameData);
} catch {
logStream.destroy();
return;
}
buffer = buffer.slice(8 + frameSize);
}
});
} catch (error) {
logger.error(`Failed to start platform log stream: ${error}`);
}
})();
return { logStream: virtualStream as any };
},
),
);
// Network log stream
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetNetworkLogStream>(
'getNetworkLogStream',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const virtualStream = new plugins.typedrequest.VirtualStream<Uint8Array>();
const encoder = new TextEncoder();
const clientId = crypto.randomUUID();
// Create a mock WebSocket-like object for the CaddyLogReceiver
const mockSocket = {
readyState: 1, // WebSocket.OPEN
send: (data: string) => {
try {
virtualStream.sendData(encoder.encode(data));
} catch {
this.opsServerRef.oneboxRef.caddyLogReceiver.removeClient(clientId);
}
},
};
const filter = dataArg.filter || {};
this.opsServerRef.oneboxRef.caddyLogReceiver.addClient(
clientId,
mockSocket as any,
filter,
);
return { logStream: virtualStream as any };
},
),
);
// Event stream (general updates)
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetEventStream>(
'getEventStream',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const virtualStream = new plugins.typedrequest.VirtualStream<Uint8Array>();
const encoder = new TextEncoder();
// Send initial connection message
virtualStream.sendData(
encoder.encode(
JSON.stringify({
type: 'connected',
message: 'Connected to Onebox event stream',
timestamp: Date.now(),
}),
),
);
return { eventStream: virtualStream as any };
},
),
);
}
}

View File

@@ -0,0 +1,124 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
import type { TPlatformServiceType } from '../../types.ts';
export class NetworkHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private getPlatformServicePort(type: TPlatformServiceType): number {
const ports: Record<TPlatformServiceType, number> = {
mongodb: 27017,
minio: 9000,
redis: 6379,
postgresql: 5432,
rabbitmq: 5672,
caddy: 80,
clickhouse: 8123,
mariadb: 3306,
};
return ports[type] || 0;
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetNetworkTargets>(
'getNetworkTargets',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const targets: interfaces.data.INetworkTarget[] = [];
// Services
const services = this.opsServerRef.oneboxRef.services.listServices();
for (const svc of services) {
targets.push({
type: 'service',
name: svc.name,
domain: svc.domain || null,
targetHost: (svc as any).containerIP || svc.containerID || 'unknown',
targetPort: svc.port || 80,
status: svc.status,
});
}
// Registry
const registryStatus = this.opsServerRef.oneboxRef.registry.getStatus();
if (registryStatus.running) {
targets.push({
type: 'registry',
name: 'onebox-registry',
domain: null,
targetHost: 'localhost',
targetPort: registryStatus.port,
status: 'running',
});
}
// Platform services
const platformServices = this.opsServerRef.oneboxRef.platformServices.getAllPlatformServices();
for (const ps of platformServices) {
const provider = this.opsServerRef.oneboxRef.platformServices.getProvider(ps.type);
targets.push({
type: 'platform',
name: provider?.displayName || ps.type,
domain: null,
targetHost: 'localhost',
targetPort: this.getPlatformServicePort(ps.type),
status: ps.status,
});
}
return { targets };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetNetworkStats>(
'getNetworkStats',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const proxyStatus = this.opsServerRef.oneboxRef.reverseProxy.getStatus() as any;
const logReceiverStats = this.opsServerRef.oneboxRef.caddyLogReceiver.getStats();
return {
stats: {
proxy: {
running: proxyStatus.running ?? proxyStatus.http?.running ?? false,
httpPort: proxyStatus.httpPort ?? proxyStatus.http?.port ?? 80,
httpsPort: proxyStatus.httpsPort ?? proxyStatus.https?.port ?? 443,
routes: proxyStatus.routes ?? 0,
certificates: proxyStatus.certificates ?? proxyStatus.https?.certificates ?? 0,
},
logReceiver: {
running: logReceiverStats.running,
port: logReceiverStats.port,
clients: logReceiverStats.clients,
connections: logReceiverStats.connections,
sampleRate: logReceiverStats.sampleRate,
recentLogsCount: logReceiverStats.recentLogsCount,
},
},
};
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetTrafficStats>(
'getTrafficStats',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const trafficStats = this.opsServerRef.oneboxRef.caddyLogReceiver.getTrafficStats(60);
return { stats: trafficStats };
},
),
);
}
}

View File

@@ -0,0 +1,329 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class PlatformHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
private activeLogStreams = new Map<string, boolean>();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
this.startLogStreaming();
}
/**
* Start streaming logs from all running containers (platform + user services)
* and push new entries to connected dashboard clients via TypedSocket
*/
private async startLogStreaming(): Promise<void> {
const checkAndStream = async () => {
// Stream platform service containers
const platformServices = this.opsServerRef.oneboxRef.database.getAllPlatformServices();
for (const service of platformServices) {
if (service.status !== 'running' || !service.containerId) continue;
const key = `platform:${service.type}`;
if (this.activeLogStreams.has(key)) continue;
this.activeLogStreams.set(key, true);
logger.info(`Starting log stream for platform service: ${service.type}`);
try {
await this.opsServerRef.oneboxRef.docker.streamContainerLogs(
service.containerId,
(line: string, isError: boolean) => {
this.pushPlatformLogToClients(service.type as interfaces.data.TPlatformServiceType, line, isError);
}
);
} catch (err) {
logger.warn(`Log stream failed for ${service.type}: ${(err as Error).message}`);
this.activeLogStreams.delete(key);
}
}
// Stream user service containers
const userServices = this.opsServerRef.oneboxRef.services.listServices();
for (const service of userServices) {
if (service.status !== 'running' || !service.containerID) continue;
const key = `service:${service.name}`;
if (this.activeLogStreams.has(key)) continue;
this.activeLogStreams.set(key, true);
logger.info(`Starting log stream for user service: ${service.name}`);
try {
await this.opsServerRef.oneboxRef.docker.streamContainerLogs(
service.containerID,
(line: string, isError: boolean) => {
this.pushServiceLogToClients(service.name, line, isError);
}
);
} catch (err) {
logger.warn(`Log stream failed for ${service.name}: ${(err as Error).message}`);
this.activeLogStreams.delete(key);
}
}
};
// Initial check after a short delay (let services start first)
setTimeout(() => checkAndStream(), 5000);
// Re-check periodically for newly started services
setInterval(() => checkAndStream(), 15000);
}
private parseLogLine(line: string, isError: boolean): { timestamp: string; level: string; message: string } {
const tsMatch = line.match(/^(\d{4}-\d{2}-\d{2}T[\d:.]+Z?)\s+(.*)/);
const timestamp = tsMatch ? tsMatch[1] : new Date().toISOString();
const message = tsMatch ? tsMatch[2] : line;
const msgLower = message.toLowerCase();
const level = isError || msgLower.includes('error') || msgLower.includes('fatal')
? 'error'
: msgLower.includes('warn')
? 'warn'
: 'info';
return { timestamp, level, message };
}
private pushPlatformLogToClients(
serviceType: interfaces.data.TPlatformServiceType,
line: string,
isError: boolean,
): void {
const typedsocket = (this.opsServerRef.server as any)?.typedserver?.typedsocket;
if (!typedsocket) return;
const entry = this.parseLogLine(line, isError);
typedsocket.findAllTargetConnectionsByTag('role', 'ops_dashboard')
.then((connections: any[]) => {
for (const conn of connections) {
typedsocket.createTypedRequest<interfaces.requests.IReq_PushPlatformServiceLog>(
'pushPlatformServiceLog',
conn,
).fire({ serviceType, entry }).catch(() => {});
}
})
.catch(() => {});
}
private pushServiceLogToClients(
serviceName: string,
line: string,
isError: boolean,
): void {
const typedsocket = (this.opsServerRef.server as any)?.typedserver?.typedsocket;
if (!typedsocket) return;
const entry = this.parseLogLine(line, isError);
typedsocket.findAllTargetConnectionsByTag('role', 'ops_dashboard')
.then((connections: any[]) => {
for (const conn of connections) {
typedsocket.createTypedRequest<interfaces.requests.IReq_PushServiceLog>(
'pushServiceLog',
conn,
).fire({ serviceName, entry }).catch(() => {});
}
})
.catch(() => {});
}
private registerHandlers(): void {
// Get all platform services
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetPlatformServices>(
'getPlatformServices',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const platformServices = this.opsServerRef.oneboxRef.platformServices.getAllPlatformServices();
const providers = this.opsServerRef.oneboxRef.platformServices.getAllProviders();
const result = providers.map((provider: any) => {
const service = platformServices.find((s: any) => s.type === provider.type);
const isCore = 'isCore' in provider && (provider as any).isCore === true;
let status: string = service?.status || 'not-deployed';
if (provider.type === 'caddy') {
const proxyStatus = this.opsServerRef.oneboxRef.reverseProxy.getStatus() as any;
status = (proxyStatus.running ?? proxyStatus.http?.running) ? 'running' : 'stopped';
}
return {
type: provider.type,
displayName: provider.displayName,
resourceTypes: provider.resourceTypes,
status: status as interfaces.data.TPlatformServiceStatus,
containerId: service?.containerId,
isCore,
createdAt: service?.createdAt,
updatedAt: service?.updatedAt,
};
});
return { platformServices: result as interfaces.data.IPlatformService[] };
},
),
);
// Get specific platform service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetPlatformService>(
'getPlatformService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const provider = this.opsServerRef.oneboxRef.platformServices.getProvider(dataArg.serviceType);
if (!provider) {
throw new plugins.typedrequest.TypedResponseError(`Unknown platform service type: ${dataArg.serviceType}`);
}
const service = this.opsServerRef.oneboxRef.database.getPlatformServiceByType(dataArg.serviceType);
const isCore = 'isCore' in provider && (provider as any).isCore === true;
let rawStatus: string = service?.status || 'not-deployed';
if (dataArg.serviceType === 'caddy') {
const proxyStatus = this.opsServerRef.oneboxRef.reverseProxy.getStatus() as any;
rawStatus = (proxyStatus.running ?? proxyStatus.http?.running) ? 'running' : 'stopped';
}
return {
platformService: {
type: provider.type,
displayName: provider.displayName,
resourceTypes: provider.resourceTypes,
status: rawStatus as interfaces.data.TPlatformServiceStatus,
containerId: service?.containerId,
isCore,
createdAt: service?.createdAt,
updatedAt: service?.updatedAt,
} as interfaces.data.IPlatformService,
};
},
),
);
// Start platform service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_StartPlatformService>(
'startPlatformService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const provider = this.opsServerRef.oneboxRef.platformServices.getProvider(dataArg.serviceType);
if (!provider) {
throw new plugins.typedrequest.TypedResponseError(`Unknown platform service type: ${dataArg.serviceType}`);
}
logger.info(`Starting platform service: ${dataArg.serviceType}`);
const service = await this.opsServerRef.oneboxRef.platformServices.ensureRunning(dataArg.serviceType);
return {
platformService: {
type: service.type,
displayName: provider.displayName,
resourceTypes: provider.resourceTypes,
status: service.status,
containerId: service.containerId,
},
};
},
),
);
// Stop platform service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_StopPlatformService>(
'stopPlatformService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const provider = this.opsServerRef.oneboxRef.platformServices.getProvider(dataArg.serviceType);
if (!provider) {
throw new plugins.typedrequest.TypedResponseError(`Unknown platform service type: ${dataArg.serviceType}`);
}
const isCore = 'isCore' in provider && (provider as any).isCore === true;
if (isCore) {
throw new plugins.typedrequest.TypedResponseError(
`${provider.displayName} is a core service and cannot be stopped`,
);
}
logger.info(`Stopping platform service: ${dataArg.serviceType}`);
await this.opsServerRef.oneboxRef.platformServices.stopPlatformService(dataArg.serviceType);
return {
platformService: {
type: dataArg.serviceType,
displayName: provider.displayName,
resourceTypes: provider.resourceTypes,
status: 'stopped' as const,
},
};
},
),
);
// Get platform service stats
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetPlatformServiceStats>(
'getPlatformServiceStats',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.database.getPlatformServiceByType(dataArg.serviceType);
if (!service || !service.containerId) {
throw new plugins.typedrequest.TypedResponseError('Platform service has no container');
}
const stats = await this.opsServerRef.oneboxRef.docker.getContainerStats(service.containerId);
if (!stats) {
throw new plugins.typedrequest.TypedResponseError('Could not retrieve container stats');
}
return { stats };
},
),
);
// Get platform service logs
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetPlatformServiceLogs>(
'getPlatformServiceLogs',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.database.getPlatformServiceByType(dataArg.serviceType);
if (!service || !service.containerId) {
throw new plugins.typedrequest.TypedResponseError('Platform service has no container');
}
const tail = dataArg.tail || 100;
const rawLogs = await this.opsServerRef.oneboxRef.docker.getContainerLogs(service.containerId, tail);
// Parse raw log output into structured entries
const logLines = (rawLogs.stdout + rawLogs.stderr)
.split('\n')
.filter((line: string) => line.trim());
const logs = logLines.map((line: string, index: number) => {
// Try to parse Docker timestamp from beginning of line
const tsMatch = line.match(/^(\d{4}-\d{2}-\d{2}T[\d:.]+Z?)\s+(.*)/);
const timestamp = tsMatch ? new Date(tsMatch[1]).getTime() : Date.now();
const message = tsMatch ? tsMatch[2] : line;
const msgLower = message.toLowerCase();
const isError = msgLower.includes('error') || msgLower.includes('fatal');
const isWarn = msgLower.includes('warn');
return {
id: index,
serviceId: 0,
timestamp,
message,
level: (isError ? 'error' : isWarn ? 'warn' : 'info') as 'info' | 'warn' | 'error' | 'debug',
source: 'stdout' as const,
};
});
return { logs };
},
),
);
}
}

View File

@@ -0,0 +1,147 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class RegistryHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
// Get registry tags
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetRegistryTags>(
'getRegistryTags',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const tags = await this.opsServerRef.oneboxRef.registry.getImageTags(dataArg.serviceName);
return { tags };
},
),
);
// Get registry tokens
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetRegistryTokens>(
'getRegistryTokens',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const rawTokens = this.opsServerRef.oneboxRef.database.getAllRegistryTokens();
const now = Date.now();
const tokens = rawTokens.map((token: any) => {
const isExpired = token.expiresAt !== null && token.expiresAt < now;
let scopeDisplay: string;
if (token.scope === 'all') {
scopeDisplay = 'All services';
} else if (Array.isArray(token.scope)) {
scopeDisplay = token.scope.length === 1 ? token.scope[0] : `${token.scope.length} services`;
} else {
scopeDisplay = 'Unknown';
}
return {
id: token.id!,
name: token.name,
type: token.type,
scope: token.scope,
scopeDisplay,
expiresAt: token.expiresAt,
createdAt: token.createdAt,
lastUsedAt: token.lastUsedAt,
createdBy: token.createdBy,
isExpired,
};
});
return { tokens };
},
),
);
// Create registry token
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_CreateRegistryToken>(
'createRegistryToken',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const config = dataArg.tokenConfig;
// Calculate expiration
const now = Date.now();
let expiresAt: number | null = null;
if (config.expiresIn !== 'never') {
const daysMap: Record<string, number> = { '30d': 30, '90d': 90, '365d': 365 };
const days = daysMap[config.expiresIn];
if (days) expiresAt = now + days * 24 * 60 * 60 * 1000;
}
// Generate token
const plainToken = crypto.randomUUID() + crypto.randomUUID();
const encoder = new TextEncoder();
const hashBuffer = await crypto.subtle.digest('SHA-256', encoder.encode(plainToken));
const hashArray = Array.from(new Uint8Array(hashBuffer));
const tokenHash = hashArray.map((b) => b.toString(16).padStart(2, '0')).join('');
const token = this.opsServerRef.oneboxRef.database.createRegistryToken({
name: config.name,
tokenHash,
type: config.type,
scope: config.scope,
expiresAt,
createdAt: now,
lastUsedAt: null,
createdBy: dataArg.identity.username,
});
let scopeDisplay: string;
if (token.scope === 'all') {
scopeDisplay = 'All services';
} else if (Array.isArray(token.scope)) {
scopeDisplay = token.scope.length === 1 ? token.scope[0] : `${token.scope.length} services`;
} else {
scopeDisplay = 'Unknown';
}
return {
result: {
token: {
id: token.id!,
name: token.name,
type: token.type,
scope: token.scope,
scopeDisplay,
expiresAt: token.expiresAt,
createdAt: token.createdAt,
lastUsedAt: token.lastUsedAt,
createdBy: token.createdBy,
isExpired: false,
},
plainToken,
},
};
},
),
);
// Delete registry token
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DeleteRegistryToken>(
'deleteRegistryToken',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const token = this.opsServerRef.oneboxRef.database.getRegistryTokenById(dataArg.tokenId);
if (!token) {
throw new plugins.typedrequest.TypedResponseError('Token not found');
}
this.opsServerRef.oneboxRef.database.deleteRegistryToken(dataArg.tokenId);
return { ok: true };
},
),
);
}
}

View File

@@ -0,0 +1,93 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class SchedulesHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetBackupSchedules>(
'getBackupSchedules',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const schedules = this.opsServerRef.oneboxRef.backupScheduler.getAllSchedules();
return { schedules };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_CreateBackupSchedule>(
'createBackupSchedule',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const schedule = await this.opsServerRef.oneboxRef.backupScheduler.createSchedule(
dataArg.scheduleConfig,
);
return { schedule };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetBackupSchedule>(
'getBackupSchedule',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const schedule = this.opsServerRef.oneboxRef.backupScheduler.getScheduleById(dataArg.scheduleId);
if (!schedule) {
throw new plugins.typedrequest.TypedResponseError('Schedule not found');
}
return { schedule };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_UpdateBackupSchedule>(
'updateBackupSchedule',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const schedule = await this.opsServerRef.oneboxRef.backupScheduler.updateSchedule(
dataArg.scheduleId,
dataArg.updates,
);
return { schedule };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DeleteBackupSchedule>(
'deleteBackupSchedule',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.backupScheduler.deleteSchedule(dataArg.scheduleId);
return { ok: true };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_TriggerBackupSchedule>(
'triggerBackupSchedule',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.backupScheduler.triggerBackup(dataArg.scheduleId);
// triggerBackup is void; the backup is created async by the scheduler
// Return the most recent backup for the schedule
const allBackups = this.opsServerRef.oneboxRef.backupManager.listBackups();
const latestBackup = allBackups.find((b: any) => b.scheduleId === dataArg.scheduleId);
return { backup: latestBackup! };
},
),
);
}
}

View File

@@ -0,0 +1,244 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class ServicesHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
// Get all services
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServices>(
'getServices',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const services = this.opsServerRef.oneboxRef.services.listServices();
return { services };
},
),
);
// Get single service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetService>(
'getService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
if (!service) {
throw new plugins.typedrequest.TypedResponseError('Service not found');
}
return { service };
},
),
);
// Create service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_CreateService>(
'createService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = await this.opsServerRef.oneboxRef.services.deployService(dataArg.serviceConfig);
return { service };
},
),
);
// Update service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_UpdateService>(
'updateService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = await this.opsServerRef.oneboxRef.services.updateService(
dataArg.serviceName,
dataArg.updates,
);
return { service };
},
),
);
// Delete service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_DeleteService>(
'deleteService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.services.removeService(dataArg.serviceName);
return { ok: true };
},
),
);
// Start service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_StartService>(
'startService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.services.startService(dataArg.serviceName);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
return { service: service! };
},
),
);
// Stop service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_StopService>(
'stopService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.services.stopService(dataArg.serviceName);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
return { service: service! };
},
),
);
// Restart service
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_RestartService>(
'restartService',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.services.restartService(dataArg.serviceName);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
return { service: service! };
},
),
);
// Get service logs
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceLogs>(
'getServiceLogs',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const logs = await this.opsServerRef.oneboxRef.services.getServiceLogs(dataArg.serviceName);
return { logs: String(logs) };
},
),
);
// Get service stats
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceStats>(
'getServiceStats',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
if (!service || !service.containerID) {
throw new plugins.typedrequest.TypedResponseError('Service has no container');
}
const stats = await this.opsServerRef.oneboxRef.docker.getContainerStats(service.containerID);
if (!stats) {
throw new plugins.typedrequest.TypedResponseError('Could not retrieve container stats');
}
return { stats };
},
),
);
// Get service metrics
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceMetrics>(
'getServiceMetrics',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
if (!service || !service.id) {
throw new plugins.typedrequest.TypedResponseError('Service not found');
}
const metrics = this.opsServerRef.oneboxRef.database.getMetrics(service.id, dataArg.limit || 60);
return { metrics };
},
),
);
// Get service platform resources
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServicePlatformResources>(
'getServicePlatformResources',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const rawResources = await this.opsServerRef.oneboxRef.services.getServicePlatformResources(
dataArg.serviceName,
);
const resources = rawResources.map((r: any) => ({
id: r.resource.id,
resourceType: r.resource.resourceType,
resourceName: r.resource.resourceName,
platformService: {
type: r.platformService.type,
name: r.platformService.name,
status: r.platformService.status,
},
envVars: Object.keys(r.credentials).reduce((acc: Record<string, string>, key: string) => {
const value = r.credentials[key];
if (key.toLowerCase().includes('password') || key.toLowerCase().includes('secret')) {
acc[key] = '********';
} else {
acc[key] = value;
}
return acc;
}, {}),
createdAt: r.resource.createdAt,
}));
return { resources };
},
),
);
// Get service backups
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceBackups>(
'getServiceBackups',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const backups = this.opsServerRef.oneboxRef.backupManager.listBackups(dataArg.serviceName);
return { backups };
},
),
);
// Create service backup
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_CreateServiceBackup>(
'createServiceBackup',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const result = await this.opsServerRef.oneboxRef.backupManager.createBackup(dataArg.serviceName);
return { backup: result.backup };
},
),
);
// Get service backup schedules
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetServiceBackupSchedules>(
'getServiceBackupSchedules',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const service = this.opsServerRef.oneboxRef.services.getService(dataArg.serviceName);
if (!service) {
throw new plugins.typedrequest.TypedResponseError('Service not found');
}
const schedules = this.opsServerRef.oneboxRef.backupScheduler.getSchedulesForService(
dataArg.serviceName,
);
return { schedules };
},
),
);
}
}

View File

@@ -0,0 +1,86 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class SettingsHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private getSettingsObject(): interfaces.data.ISettings {
const db = this.opsServerRef.oneboxRef.database;
const settingsMap = db.getAllSettings(); // Returns Record<string, string>
return {
cloudflareToken: settingsMap['cloudflareToken'] || '',
cloudflareZoneId: settingsMap['cloudflareZoneId'] || '',
autoRenewCerts: settingsMap['autoRenewCerts'] === 'true',
renewalThreshold: parseInt(settingsMap['renewalThreshold'] || '30', 10),
acmeEmail: settingsMap['acmeEmail'] || '',
httpPort: parseInt(settingsMap['httpPort'] || '80', 10),
httpsPort: parseInt(settingsMap['httpsPort'] || '443', 10),
forceHttps: settingsMap['forceHttps'] === 'true',
};
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetSettings>(
'getSettings',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const settings = this.getSettingsObject();
return { settings };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_UpdateSettings>(
'updateSettings',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const db = this.opsServerRef.oneboxRef.database;
const updates = dataArg.settings;
// Store each setting as key-value pair
for (const [key, value] of Object.entries(updates)) {
if (value !== undefined) {
db.setSetting(key, String(value));
}
}
const settings = this.getSettingsObject();
return { settings };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_SetBackupPassword>(
'setBackupPassword',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
this.opsServerRef.oneboxRef.database.setSetting('backupPassword', dataArg.password);
return { ok: true };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetBackupPasswordStatus>(
'getBackupPasswordStatus',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const backupPassword = this.opsServerRef.oneboxRef.database.getSetting('backupPassword');
const isConfigured = !!backupPassword;
return { status: { isConfigured } };
},
),
);
}
}

View File

@@ -0,0 +1,64 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class SslHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_ObtainCertificate>(
'obtainCertificate',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.ssl.obtainCertificate(dataArg.domain, false);
const certificate = this.opsServerRef.oneboxRef.ssl.getCertificate(dataArg.domain);
return { certificate: certificate as unknown as interfaces.data.ICertificate };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_ListCertificates>(
'listCertificates',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const certificates = this.opsServerRef.oneboxRef.ssl.listCertificates();
return { certificates: certificates as unknown as interfaces.data.ICertificate[] };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetCertificate>(
'getCertificate',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const certificate = this.opsServerRef.oneboxRef.ssl.getCertificate(dataArg.domain);
if (!certificate) {
throw new plugins.typedrequest.TypedResponseError('Certificate not found');
}
return { certificate: certificate as unknown as interfaces.data.ICertificate };
},
),
);
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_RenewCertificate>(
'renewCertificate',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
await this.opsServerRef.oneboxRef.ssl.renewCertificate(dataArg.domain);
const certificate = this.opsServerRef.oneboxRef.ssl.getCertificate(dataArg.domain);
return { certificate: certificate as unknown as interfaces.data.ICertificate };
},
),
);
}
}

View File

@@ -0,0 +1,26 @@
import * as plugins from '../../plugins.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
export class StatusHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
private registerHandlers(): void {
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_GetSystemStatus>(
'getSystemStatus',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const status = await this.opsServerRef.oneboxRef.getSystemStatus();
return { status: status as unknown as interfaces.data.ISystemStatus };
},
),
);
}
}

View File

@@ -0,0 +1,181 @@
import * as plugins from '../../plugins.ts';
import { logger } from '../../logging.ts';
import type { OpsServer } from '../classes.opsserver.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
import { requireValidIdentity } from '../helpers/guards.ts';
import { getErrorMessage } from '../../utils/error.ts';
export class WorkspaceHandler {
public typedrouter = new plugins.typedrequest.TypedRouter();
constructor(private opsServerRef: OpsServer) {
this.opsServerRef.typedrouter.addTypedRouter(this.typedrouter);
this.registerHandlers();
}
/**
* Resolve a service name to a container ID (handling Swarm service IDs)
*/
private async resolveContainerId(serviceName: string): Promise<string> {
const service = this.opsServerRef.oneboxRef.services.getService(serviceName);
if (!service || !service.containerID) {
throw new plugins.typedrequest.TypedResponseError(`Service not found or has no container: ${serviceName}`);
}
return service.containerID;
}
private registerHandlers(): void {
// Read file from container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceReadFile>(
'workspaceReadFile',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
['cat', dataArg.path],
);
if (result.exitCode !== 0) {
throw new plugins.typedrequest.TypedResponseError(`Failed to read file: ${result.stderr || 'File not found'}`);
}
return { content: result.stdout };
},
),
);
// Write file to container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceWriteFile>(
'workspaceWriteFile',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
// Use sh -c with printf to write content (handles special characters)
const escaped = dataArg.content.replace(/'/g, "'\\''");
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
['sh', '-c', `printf '%s' '${escaped}' > ${dataArg.path}`],
);
if (result.exitCode !== 0) {
throw new plugins.typedrequest.TypedResponseError(`Failed to write file: ${result.stderr}`);
}
return {};
},
),
);
// Read directory from container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceReadDir>(
'workspaceReadDir',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
// Use ls with -1 -F to get entries with type indicators (/ for dirs)
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
['ls', '-1', '-F', dataArg.path],
);
if (result.exitCode !== 0) {
throw new plugins.typedrequest.TypedResponseError(`Failed to read directory: ${result.stderr}`);
}
const entries = result.stdout
.split('\n')
.filter((line) => line.trim())
.map((line) => {
const isDir = line.endsWith('/');
const name = isDir ? line.slice(0, -1) : line.replace(/[*@=|]$/, '');
const basePath = dataArg.path.endsWith('/') ? dataArg.path : dataArg.path + '/';
return {
type: (isDir ? 'directory' : 'file') as 'file' | 'directory',
name,
path: basePath + name,
};
});
return { entries };
},
),
);
// Create directory in container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceMkdir>(
'workspaceMkdir',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
['mkdir', '-p', dataArg.path],
);
if (result.exitCode !== 0) {
throw new plugins.typedrequest.TypedResponseError(`Failed to create directory: ${result.stderr}`);
}
return {};
},
),
);
// Remove file/directory from container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceRm>(
'workspaceRm',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
const args = dataArg.recursive ? ['rm', '-rf', dataArg.path] : ['rm', '-f', dataArg.path];
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
args,
);
if (result.exitCode !== 0) {
throw new plugins.typedrequest.TypedResponseError(`Failed to remove: ${result.stderr}`);
}
return {};
},
),
);
// Check if path exists in container
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceExists>(
'workspaceExists',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
['test', '-e', dataArg.path],
);
return { exists: result.exitCode === 0 };
},
),
);
// Execute a command in the container (non-interactive)
this.typedrouter.addTypedHandler(
new plugins.typedrequest.TypedHandler<interfaces.requests.IReq_WorkspaceExec>(
'workspaceExec',
async (dataArg) => {
await requireValidIdentity(this.opsServerRef.adminHandler, dataArg);
const containerId = await this.resolveContainerId(dataArg.serviceName);
const cmd = dataArg.args
? [dataArg.command, ...dataArg.args]
: [dataArg.command];
const result = await this.opsServerRef.oneboxRef.docker.execInContainer(
containerId,
cmd,
);
return {
stdout: result.stdout,
stderr: result.stderr,
exitCode: result.exitCode,
};
},
),
);
logger.info('Workspace handler registered');
}
}

View File

@@ -0,0 +1,29 @@
import * as plugins from '../../plugins.ts';
import type { AdminHandler } from '../handlers/admin.handler.ts';
import * as interfaces from '../../../ts_interfaces/index.ts';
export async function requireValidIdentity<T extends { identity?: interfaces.data.IIdentity }>(
adminHandler: AdminHandler,
dataArg: T,
): Promise<void> {
if (!dataArg.identity) {
throw new plugins.typedrequest.TypedResponseError('No identity provided');
}
const passed = await adminHandler.validIdentityGuard.exec({ identity: dataArg.identity });
if (!passed) {
throw new plugins.typedrequest.TypedResponseError('Valid identity required');
}
}
export async function requireAdminIdentity<T extends { identity?: interfaces.data.IIdentity }>(
adminHandler: AdminHandler,
dataArg: T,
): Promise<void> {
if (!dataArg.identity) {
throw new plugins.typedrequest.TypedResponseError('No identity provided');
}
const passed = await adminHandler.adminIdentityGuard.exec({ identity: dataArg.identity });
if (!passed) {
throw new plugins.typedrequest.TypedResponseError('Admin access required');
}
}

1
ts/opsserver/index.ts Normal file
View File

@@ -0,0 +1 @@
export * from './classes.opsserver.ts';

View File

@@ -17,10 +17,6 @@ export { path, fs, http, encoding };
import { Database } from '@db/sqlite'; import { Database } from '@db/sqlite';
export const sqlite = { DB: Database }; export const sqlite = { DB: Database };
// Systemd Daemon Integration
import * as smartdaemon from '@push.rocks/smartdaemon';
export { smartdaemon };
// Docker API Client // Docker API Client
import { DockerHost } from '@apiclient.xyz/docker'; import { DockerHost } from '@apiclient.xyz/docker';
export const docker = { Docker: DockerHost }; export const docker = { Docker: DockerHost };
@@ -38,8 +34,8 @@ import * as smartregistry from '@push.rocks/smartregistry';
export { smartregistry }; export { smartregistry };
// S3-compatible storage server // S3-compatible storage server
import * as smarts3 from '@push.rocks/smarts3'; import * as smartstorage from '@push.rocks/smartstorage';
export { smarts3 }; export { smartstorage };
// Task scheduling and cron jobs // Task scheduling and cron jobs
import * as taskbuffer from '@push.rocks/taskbuffer'; import * as taskbuffer from '@push.rocks/taskbuffer';
@@ -61,3 +57,22 @@ export { crypto };
import * as nodeHttps from 'node:https'; import * as nodeHttps from 'node:https';
import * as nodeHttp from 'node:http'; import * as nodeHttp from 'node:http';
export { nodeHttps, nodeHttp }; export { nodeHttps, nodeHttp };
// TypedRequest/TypedServer infrastructure
import * as typedrequest from '@api.global/typedrequest';
import * as typedserver from '@api.global/typedserver';
export { typedrequest, typedserver };
// Auth & Guards
import * as smartguard from '@push.rocks/smartguard';
import * as smartjwt from '@push.rocks/smartjwt';
export { smartguard, smartjwt };
// Backup archive (content-addressed dedup storage)
import { ContainerArchive } from '@serve.zone/containerarchive';
export { ContainerArchive };
// Node.js compat for streaming
import * as nodeFs from 'node:fs';
import * as nodeStream from 'node:stream';
export { nodeFs, nodeStream };

View File

@@ -25,6 +25,9 @@ export interface IService {
platformRequirements?: IPlatformRequirements; platformRequirements?: IPlatformRequirements;
// Backup settings // Backup settings
includeImageInBackup?: boolean; includeImageInBackup?: boolean;
// App Store template tracking
appTemplateId?: string;
appTemplateVersion?: string;
} }
// Registry types // Registry types
@@ -75,7 +78,7 @@ export interface ITokenCreatedResponse {
} }
// Platform service types // Platform service types
export type TPlatformServiceType = 'mongodb' | 'minio' | 'redis' | 'postgresql' | 'rabbitmq' | 'caddy' | 'clickhouse'; export type TPlatformServiceType = 'mongodb' | 'minio' | 'redis' | 'postgresql' | 'rabbitmq' | 'caddy' | 'clickhouse' | 'mariadb';
export type TPlatformResourceType = 'database' | 'bucket' | 'cache' | 'queue'; export type TPlatformResourceType = 'database' | 'bucket' | 'cache' | 'queue';
export type TPlatformServiceStatus = 'stopped' | 'starting' | 'running' | 'stopping' | 'failed'; export type TPlatformServiceStatus = 'stopped' | 'starting' | 'running' | 'stopping' | 'failed';
@@ -113,6 +116,8 @@ export interface IPlatformRequirements {
mongodb?: boolean; mongodb?: boolean;
s3?: boolean; s3?: boolean;
clickhouse?: boolean; clickhouse?: boolean;
redis?: boolean;
mariadb?: boolean;
} }
export interface IProvisionedResource { export interface IProvisionedResource {
@@ -291,6 +296,11 @@ export interface IServiceDeployOptions {
enableMongoDB?: boolean; enableMongoDB?: boolean;
enableS3?: boolean; enableS3?: boolean;
enableClickHouse?: boolean; enableClickHouse?: boolean;
enableRedis?: boolean;
enableMariaDB?: boolean;
// App Store template tracking
appTemplateId?: string;
appTemplateVersion?: string;
} }
// HTTP API request/response types // HTTP API request/response types
@@ -346,7 +356,9 @@ export interface IBackup {
serviceId: number; serviceId: number;
serviceName: string; // Denormalized for display serviceName: string; // Denormalized for display
filename: string; filename: string;
snapshotId?: string; // ContainerArchive snapshot ID (new backups)
sizeBytes: number; sizeBytes: number;
storedSizeBytes?: number; // Actual stored size after dedup+compression
createdAt: number; createdAt: number;
includesImage: boolean; includesImage: boolean;
platformResources: TPlatformServiceType[]; // Which platform types were backed up platformResources: TPlatformServiceType[]; // Which platform types were backed up
@@ -389,7 +401,8 @@ export interface IBackupPlatformResource {
export interface IBackupResult { export interface IBackupResult {
backup: IBackup; backup: IBackup;
filePath: string; filePath?: string; // Legacy file-based backups only
snapshotId?: string; // ContainerArchive snapshot ID
} }
export interface IRestoreOptions { export interface IRestoreOptions {

11
ts_bundled/bundle.ts Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,16 @@
/**
* Auth-related data shapes for Onebox
*/
export interface IIdentity {
jwt: string;
userId: string;
username: string;
expiresAt: number;
role: 'admin' | 'user';
}
export interface IUser {
username: string;
role: 'admin' | 'user';
}

View File

@@ -0,0 +1,91 @@
/**
* Backup-related data shapes for Onebox
*/
import type { TPlatformServiceType } from './platform.ts';
export type TBackupRestoreMode = 'restore' | 'import' | 'clone';
export type TBackupScheduleScope = 'all' | 'pattern' | 'service';
export interface IRetentionPolicy {
hourly: number;
daily: number;
weekly: number;
monthly: number;
}
export const RETENTION_PRESETS = {
standard: { hourly: 0, daily: 7, weekly: 4, monthly: 12 },
frequent: { hourly: 24, daily: 7, weekly: 4, monthly: 12 },
minimal: { hourly: 0, daily: 3, weekly: 2, monthly: 6 },
longterm: { hourly: 0, daily: 14, weekly: 8, monthly: 24 },
} as const;
export type TRetentionPreset = keyof typeof RETENTION_PRESETS | 'custom';
export interface IBackup {
id?: number;
serviceId: number;
serviceName: string;
filename: string;
snapshotId?: string;
sizeBytes: number;
storedSizeBytes?: number;
createdAt: number;
includesImage: boolean;
platformResources: TPlatformServiceType[];
checksum: string;
scheduleId?: number;
}
export interface IBackupSchedule {
id?: number;
scopeType: TBackupScheduleScope;
scopePattern?: string;
serviceId?: number;
serviceName?: string;
cronExpression: string;
retention: IRetentionPolicy;
enabled: boolean;
lastRunAt: number | null;
nextRunAt: number | null;
lastStatus: 'success' | 'failed' | null;
lastError: string | null;
createdAt: number;
updatedAt: number;
}
export interface IBackupScheduleCreate {
scopeType: TBackupScheduleScope;
scopePattern?: string;
serviceName?: string;
cronExpression: string;
retention: IRetentionPolicy;
enabled?: boolean;
}
export interface IBackupScheduleUpdate {
cronExpression?: string;
retention?: IRetentionPolicy;
enabled?: boolean;
}
export interface IRestoreOptions {
mode: TBackupRestoreMode;
newServiceName?: string;
overwriteExisting?: boolean;
skipPlatformData?: boolean;
}
export interface IRestoreResult {
service: {
name: string;
status: string;
};
platformResourcesRestored: number;
warnings: string[];
}
export interface IBackupPasswordStatus {
isConfigured: boolean;
}

View File

@@ -0,0 +1,59 @@
/**
* Domain, DNS, and certificate data shapes for Onebox
*/
export interface IDomain {
id?: number;
domain: string;
dnsProvider: 'cloudflare' | 'manual' | null;
cloudflareZoneId?: string;
isObsolete: boolean;
defaultWildcard: boolean;
createdAt: number;
updatedAt: number;
}
export interface ICertificate {
id?: number;
domainId: number;
certDomain: string;
isWildcard: boolean;
certPem: string;
keyPem: string;
fullchainPem: string;
expiryDate: number;
issuer: string;
isValid: boolean;
createdAt: number;
updatedAt: number;
}
export interface ICertRequirement {
id?: number;
domainId: number;
serviceId: number;
subdomain: string;
status: 'pending' | 'active' | 'renewing' | 'failed';
certificateId?: number;
createdAt: number;
updatedAt: number;
}
export interface IDomainDetail {
domain: IDomain;
certificates: ICertificate[];
requirements: ICertRequirement[];
serviceCount: number;
certificateStatus: 'valid' | 'expiring-soon' | 'expired' | 'pending' | 'none';
daysRemaining: number | null;
}
export interface IDnsRecord {
id?: number;
domain: string;
type: 'A' | 'AAAA' | 'CNAME';
value: string;
cloudflareID?: string;
createdAt: number;
updatedAt: number;
}

View File

@@ -0,0 +1,9 @@
export * from './auth.ts';
export * from './service.ts';
export * from './platform.ts';
export * from './network.ts';
export * from './domain.ts';
export * from './registry.ts';
export * from './backup.ts';
export * from './settings.ts';
export * from './system.ts';

View File

@@ -0,0 +1,64 @@
/**
* Network-related data shapes for Onebox
*/
export type TNetworkTargetType = 'service' | 'registry' | 'platform';
export interface INetworkTarget {
type: TNetworkTargetType;
name: string;
domain: string | null;
targetHost: string;
targetPort: number;
status: string;
}
export interface INetworkStats {
proxy: {
running: boolean;
httpPort: number;
httpsPort: number;
routes: number;
certificates: number;
};
logReceiver: {
running: boolean;
port: number;
clients: number;
connections: number;
sampleRate: number;
recentLogsCount: number;
};
}
export interface ITrafficStats {
requestCount: number;
errorCount: number;
avgResponseTime: number;
totalBytes: number;
statusCounts: Record<string, number>;
requestsPerMinute: number;
errorRate: number;
}
export interface ICaddyAccessLog {
ts: number;
request: {
remote_ip: string;
method: string;
host: string;
uri: string;
proto: string;
};
status: number;
duration: number;
size: number;
}
export interface INetworkLogMessage {
type: 'connected' | 'access_log' | 'filter_updated';
clientId?: string;
filter?: { domain?: string; sampleRate?: number };
data?: ICaddyAccessLog;
timestamp: number;
}

View File

@@ -0,0 +1,39 @@
/**
* Platform service data shapes for Onebox
*/
export type TPlatformServiceType = 'mongodb' | 'minio' | 'redis' | 'postgresql' | 'rabbitmq' | 'caddy' | 'clickhouse' | 'mariadb';
export type TPlatformServiceStatus = 'not-deployed' | 'stopped' | 'starting' | 'running' | 'stopping' | 'failed';
export type TPlatformResourceType = 'database' | 'bucket' | 'cache' | 'queue';
export interface IPlatformRequirements {
mongodb?: boolean;
s3?: boolean;
clickhouse?: boolean;
redis?: boolean;
mariadb?: boolean;
}
export interface IPlatformService {
type: TPlatformServiceType;
displayName: string;
resourceTypes: TPlatformResourceType[];
status: TPlatformServiceStatus;
containerId?: string;
isCore?: boolean;
createdAt?: number;
updatedAt?: number;
}
export interface IPlatformResource {
id: number;
resourceType: TPlatformResourceType;
resourceName: string;
platformService: {
type: TPlatformServiceType;
name: string;
status: TPlatformServiceStatus;
};
envVars: Record<string, string>;
createdAt: number;
}

View File

@@ -0,0 +1,35 @@
/**
* Registry-related data shapes for Onebox
*/
export interface IRegistry {
id?: number;
url: string;
username: string;
createdAt: number;
}
export interface IRegistryToken {
id: number;
name: string;
type: 'global' | 'ci';
scope: 'all' | string[];
scopeDisplay: string;
expiresAt: number | null;
createdAt: number;
lastUsedAt: number | null;
createdBy: string;
isExpired: boolean;
}
export interface ICreateTokenRequest {
name: string;
type: 'global' | 'ci';
scope: 'all' | string[];
expiresIn: '30d' | '90d' | '365d' | 'never';
}
export interface ITokenCreatedResponse {
token: IRegistryToken;
plainToken: string;
}

View File

@@ -0,0 +1,89 @@
/**
* Service-related data shapes for Onebox
*/
import type { IPlatformRequirements } from './platform.ts';
export type TServiceStatus = 'stopped' | 'starting' | 'running' | 'stopping' | 'failed';
export interface IService {
id?: number;
name: string;
image: string;
registry?: string;
envVars: Record<string, string>;
port: number;
domain?: string;
containerID?: string;
status: TServiceStatus;
createdAt: number;
updatedAt: number;
// Onebox Registry fields
useOneboxRegistry?: boolean;
registryRepository?: string;
registryImageTag?: string;
autoUpdateOnPush?: boolean;
imageDigest?: string;
// Platform service requirements
platformRequirements?: IPlatformRequirements;
// Backup settings
includeImageInBackup?: boolean;
// App Store template tracking
appTemplateId?: string;
appTemplateVersion?: string;
}
export interface IServiceCreate {
name: string;
image: string;
port: number;
domain?: string;
envVars?: Record<string, string>;
useOneboxRegistry?: boolean;
registryImageTag?: string;
autoUpdateOnPush?: boolean;
enableMongoDB?: boolean;
enableS3?: boolean;
enableClickHouse?: boolean;
enableRedis?: boolean;
enableMariaDB?: boolean;
appTemplateId?: string;
appTemplateVersion?: string;
}
export interface IServiceUpdate {
image?: string;
registry?: string;
port?: number;
domain?: string;
envVars?: Record<string, string>;
}
export interface IContainerStats {
cpuPercent: number;
memoryUsed: number;
memoryLimit: number;
memoryPercent: number;
networkRx: number;
networkTx: number;
}
export interface IMetric {
id?: number;
serviceId: number;
timestamp: number;
cpuPercent: number;
memoryUsed: number;
memoryLimit: number;
networkRxBytes: number;
networkTxBytes: number;
}
export interface ILogEntry {
id?: number;
serviceId: number;
timestamp: number;
message: string;
level: 'info' | 'warn' | 'error' | 'debug';
source: 'stdout' | 'stderr';
}

View File

@@ -0,0 +1,14 @@
/**
* Settings data shapes for Onebox
*/
export interface ISettings {
cloudflareToken: string;
cloudflareZoneId: string;
autoRenewCerts: boolean;
renewalThreshold: number;
acmeEmail: string;
httpPort: number;
httpsPort: number;
forceHttps: boolean;
}

View File

@@ -0,0 +1,37 @@
/**
* System status data shapes for Onebox
*/
import type { TPlatformServiceType, TPlatformServiceStatus } from './platform.ts';
export interface ISystemStatus {
docker: {
running: boolean;
version: unknown;
cpuUsage: number;
memoryUsage: number;
memoryTotal: number;
networkIn: number;
networkOut: number;
};
reverseProxy: {
http: { running: boolean; port: number };
https: { running: boolean; port: number; certificates: number };
routes: number;
};
dns: { configured: boolean };
ssl: { configured: boolean; certificateCount: number };
services: { total: number; running: number; stopped: number };
platformServices: Array<{
type: TPlatformServiceType;
displayName: string;
status: TPlatformServiceStatus;
resourceCount: number;
}>;
certificateHealth: {
valid: number;
expiringSoon: number;
expired: number;
expiringDomains: Array<{ domain: string; daysRemaining: number }>;
};
}

9
ts_interfaces/index.ts Normal file
View File

@@ -0,0 +1,9 @@
export * from './plugins.ts';
// Data types
import * as data from './data/index.ts';
export { data };
// Request interfaces
import * as requests from './requests/index.ts';
export { requests };

6
ts_interfaces/plugins.ts Normal file
View File

@@ -0,0 +1,6 @@
// @apiglobal scope
import * as typedrequestInterfaces from '@api.global/typedrequest-interfaces';
export {
typedrequestInterfaces,
};

View File

@@ -0,0 +1,58 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_AdminLoginWithUsernameAndPassword extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_AdminLoginWithUsernameAndPassword
> {
method: 'adminLoginWithUsernameAndPassword';
request: {
username: string;
password: string;
};
response: {
identity?: data.IIdentity;
};
}
export interface IReq_AdminLogout extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_AdminLogout
> {
method: 'adminLogout';
request: {
identity: data.IIdentity;
};
response: {
ok: boolean;
};
}
export interface IReq_VerifyIdentity extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_VerifyIdentity
> {
method: 'verifyIdentity';
request: {
identity: data.IIdentity;
};
response: {
valid: boolean;
identity?: data.IIdentity;
};
}
export interface IReq_ChangePassword extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_ChangePassword
> {
method: 'changePassword';
request: {
identity: data.IIdentity;
currentPassword: string;
newPassword: string;
};
response: {
ok: boolean;
};
}

View File

@@ -0,0 +1,106 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface ICatalogApp {
id: string;
name: string;
description: string;
category: string;
iconName?: string;
iconUrl?: string;
latestVersion: string;
tags?: string[];
}
export interface IAppVersionConfig {
image: string;
port: number;
envVars?: Array<{ key: string; value: string; description: string; required?: boolean }>;
volumes?: string[];
platformRequirements?: {
mongodb?: boolean;
s3?: boolean;
clickhouse?: boolean;
redis?: boolean;
mariadb?: boolean;
};
minOneboxVersion?: string;
}
export interface IAppMeta {
id: string;
name: string;
description: string;
category: string;
iconName?: string;
latestVersion: string;
versions: string[];
maintainer?: string;
links?: Record<string, string>;
}
export interface IUpgradeableService {
serviceName: string;
appTemplateId: string;
currentVersion: string;
latestVersion: string;
hasMigration: boolean;
}
export interface IReq_GetAppTemplates extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetAppTemplates
> {
method: 'getAppTemplates';
request: {
identity: data.IIdentity;
};
response: {
apps: ICatalogApp[];
};
}
export interface IReq_GetAppConfig extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetAppConfig
> {
method: 'getAppConfig';
request: {
identity: data.IIdentity;
appId: string;
version: string;
};
response: {
config: IAppVersionConfig;
appMeta: IAppMeta;
};
}
export interface IReq_GetUpgradeableServices extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetUpgradeableServices
> {
method: 'getUpgradeableServices';
request: {
identity: data.IIdentity;
};
response: {
services: IUpgradeableService[];
};
}
export interface IReq_UpgradeService extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_UpgradeService
> {
method: 'upgradeService';
request: {
identity: data.IIdentity;
serviceName: string;
targetVersion: string;
};
response: {
service: data.IService;
warnings: string[];
};
}

View File

@@ -0,0 +1,86 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetBackupSchedules extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetBackupSchedules
> {
method: 'getBackupSchedules';
request: {
identity: data.IIdentity;
};
response: {
schedules: data.IBackupSchedule[];
};
}
export interface IReq_CreateBackupSchedule extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_CreateBackupSchedule
> {
method: 'createBackupSchedule';
request: {
identity: data.IIdentity;
scheduleConfig: data.IBackupScheduleCreate;
};
response: {
schedule: data.IBackupSchedule;
};
}
export interface IReq_GetBackupSchedule extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetBackupSchedule
> {
method: 'getBackupSchedule';
request: {
identity: data.IIdentity;
scheduleId: number;
};
response: {
schedule: data.IBackupSchedule;
};
}
export interface IReq_UpdateBackupSchedule extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_UpdateBackupSchedule
> {
method: 'updateBackupSchedule';
request: {
identity: data.IIdentity;
scheduleId: number;
updates: data.IBackupScheduleUpdate;
};
response: {
schedule: data.IBackupSchedule;
};
}
export interface IReq_DeleteBackupSchedule extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_DeleteBackupSchedule
> {
method: 'deleteBackupSchedule';
request: {
identity: data.IIdentity;
scheduleId: number;
};
response: {
ok: boolean;
};
}
export interface IReq_TriggerBackupSchedule extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_TriggerBackupSchedule
> {
method: 'triggerBackupSchedule';
request: {
identity: data.IIdentity;
scheduleId: number;
};
response: {
backup: data.IBackup;
};
}

View File

@@ -0,0 +1,73 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetBackups extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetBackups
> {
method: 'getBackups';
request: {
identity: data.IIdentity;
};
response: {
backups: data.IBackup[];
};
}
export interface IReq_GetBackup extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetBackup
> {
method: 'getBackup';
request: {
identity: data.IIdentity;
backupId: number;
};
response: {
backup: data.IBackup;
};
}
export interface IReq_DeleteBackup extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_DeleteBackup
> {
method: 'deleteBackup';
request: {
identity: data.IIdentity;
backupId: number;
};
response: {
ok: boolean;
};
}
export interface IReq_RestoreBackup extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_RestoreBackup
> {
method: 'restoreBackup';
request: {
identity: data.IIdentity;
backupId: number;
options: data.IRestoreOptions;
};
response: {
result: data.IRestoreResult;
};
}
export interface IReq_DownloadBackup extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_DownloadBackup
> {
method: 'downloadBackup';
request: {
identity: data.IIdentity;
backupId: number;
};
response: {
downloadUrl: string;
filename: string;
};
}

View File

@@ -0,0 +1,58 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetDnsRecords extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetDnsRecords
> {
method: 'getDnsRecords';
request: {
identity: data.IIdentity;
};
response: {
records: data.IDnsRecord[];
};
}
export interface IReq_CreateDnsRecord extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_CreateDnsRecord
> {
method: 'createDnsRecord';
request: {
identity: data.IIdentity;
domain: string;
type: 'A' | 'AAAA' | 'CNAME';
value: string;
};
response: {
record: data.IDnsRecord;
};
}
export interface IReq_DeleteDnsRecord extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_DeleteDnsRecord
> {
method: 'deleteDnsRecord';
request: {
identity: data.IIdentity;
domain: string;
};
response: {
ok: boolean;
};
}
export interface IReq_SyncDns extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_SyncDns
> {
method: 'syncDns';
request: {
identity: data.IIdentity;
};
response: {
records: data.IDnsRecord[];
};
}

View File

@@ -0,0 +1,42 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetDomains extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetDomains
> {
method: 'getDomains';
request: {
identity: data.IIdentity;
};
response: {
domains: data.IDomainDetail[];
};
}
export interface IReq_GetDomain extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetDomain
> {
method: 'getDomain';
request: {
identity: data.IIdentity;
domainName: string;
};
response: {
domain: data.IDomainDetail;
};
}
export interface IReq_SyncDomains extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_SyncDomains
> {
method: 'syncDomains';
request: {
identity: data.IIdentity;
};
response: {
domains: data.IDomainDetail[];
};
}

View File

@@ -0,0 +1,15 @@
export * from './admin.ts';
export * from './status.ts';
export * from './services.ts';
export * from './platform-services.ts';
export * from './ssl.ts';
export * from './domains.ts';
export * from './dns.ts';
export * from './registry.ts';
export * from './network.ts';
export * from './backups.ts';
export * from './backup-schedules.ts';
export * from './settings.ts';
export * from './logs.ts';
export * from './workspace.ts';
export * from './appstore.ts';

View File

@@ -0,0 +1,60 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetServiceLogStream extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetServiceLogStream
> {
method: 'getServiceLogStream';
request: {
identity: data.IIdentity;
serviceName: string;
};
response: {
logStream: plugins.typedrequestInterfaces.IVirtualStream;
};
}
export interface IReq_GetPlatformServiceLogStream extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetPlatformServiceLogStream
> {
method: 'getPlatformServiceLogStream';
request: {
identity: data.IIdentity;
serviceType: data.TPlatformServiceType;
};
response: {
logStream: plugins.typedrequestInterfaces.IVirtualStream;
};
}
export interface IReq_GetNetworkLogStream extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetNetworkLogStream
> {
method: 'getNetworkLogStream';
request: {
identity: data.IIdentity;
filter?: {
domain?: string;
sampleRate?: number;
};
};
response: {
logStream: plugins.typedrequestInterfaces.IVirtualStream;
};
}
export interface IReq_GetEventStream extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetEventStream
> {
method: 'getEventStream';
request: {
identity: data.IIdentity;
};
response: {
eventStream: plugins.typedrequestInterfaces.IVirtualStream;
};
}

View File

@@ -0,0 +1,41 @@
import * as plugins from '../plugins.ts';
import * as data from '../data/index.ts';
export interface IReq_GetNetworkTargets extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetNetworkTargets
> {
method: 'getNetworkTargets';
request: {
identity: data.IIdentity;
};
response: {
targets: data.INetworkTarget[];
};
}
export interface IReq_GetNetworkStats extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetNetworkStats
> {
method: 'getNetworkStats';
request: {
identity: data.IIdentity;
};
response: {
stats: data.INetworkStats;
};
}
export interface IReq_GetTrafficStats extends plugins.typedrequestInterfaces.implementsTR<
plugins.typedrequestInterfaces.ITypedRequest,
IReq_GetTrafficStats
> {
method: 'getTrafficStats';
request: {
identity: data.IIdentity;
};
response: {
stats: data.ITrafficStats;
};
}

Some files were not shown because too many files have changed in this diff Show More