Compare commits

..

25 Commits

Author SHA1 Message Date
dfb2d3b340 v1.13.8
Some checks failed
Publish to npm / npm-publish (push) Failing after 9s
CI / Build Test (Current Platform) (push) Failing after 15s
CI / Build All Platforms (push) Failing after 16s
CI / Type Check & Lint (push) Failing after 38s
Release / build-and-release (push) Failing after 31s
2026-03-15 18:12:48 +00:00
6a19ab05e3 fix(repo): no changes to commit 2026-03-15 18:12:48 +00:00
7b718da7a2 v1.13.7
Some checks failed
CI / Build All Platforms (push) Failing after 13m21s
CI / Build Test (Current Platform) (push) Failing after 13m23s
CI / Type Check & Lint (push) Failing after 13m25s
Release / build-and-release (push) Failing after 15s
Publish to npm / npm-publish (push) Failing after 6s
2026-03-15 16:38:24 +00:00
ebaf545418 fix(repo): no changes to commit 2026-03-15 16:38:24 +00:00
2cdfdaed55 v1.13.6
Some checks failed
CI / Build All Platforms (push) Has been cancelled
CI / Build Test (Current Platform) (push) Has been cancelled
Publish to npm / npm-publish (push) Has been cancelled
Release / build-and-release (push) Has been cancelled
CI / Type Check & Lint (push) Failing after 53m53s
2026-03-15 15:49:42 +00:00
2216804652 fix(ci): correct workflow container image registry path 2026-03-15 15:49:42 +00:00
1b177037f5 v1.13.5
Some checks failed
CI / Type Check & Lint (push) Failing after 1s
CI / Build Test (Current Platform) (push) Failing after 1s
CI / Build All Platforms (push) Failing after 1s
Publish to npm / npm-publish (push) Failing after 1s
Release / build-and-release (push) Failing after 1s
2026-03-15 15:47:21 +00:00
9d6590927c fix(workflows): switch Gitea workflow containers from ht-docker-dbase to ht-docker-node 2026-03-15 15:47:21 +00:00
eaf401200c v1.13.4
Some checks failed
CI / Type Check & Lint (push) Failing after 2s
CI / Build Test (Current Platform) (push) Failing after 1s
CI / Build All Platforms (push) Failing after 1s
Publish to npm / npm-publish (push) Failing after 1s
Release / build-and-release (push) Failing after 1s
2026-03-15 15:44:54 +00:00
e97a4d53ae fix(ci): run workflows in the shared build container and enable corepack for pnpm installs 2026-03-15 15:44:54 +00:00
ca2b3b25a5 v1.13.3
Some checks failed
Publish to npm / npm-publish (push) Failing after 7s
CI / Build Test (Current Platform) (push) Failing after 12s
CI / Build All Platforms (push) Failing after 13s
Release / build-and-release (push) Failing after 13s
CI / Type Check & Lint (push) Failing after 24s
2026-03-15 15:41:37 +00:00
19703de50d fix(build): replace custom Deno compile scripts with tsdeno-based binary builds in CI and release workflows 2026-03-15 15:41:37 +00:00
bcab4f274e v1.13.2
Some checks failed
CI / Type Check & Lint (push) Failing after 9s
CI / Build Test (Current Platform) (push) Failing after 9s
Release / build-and-release (push) Failing after 9s
Publish to npm / npm-publish (push) Failing after 1m54s
CI / Build All Platforms (push) Successful in 3m37s
2026-03-15 13:37:03 +00:00
64e947735f fix(scripts): install production dependencies before compiling binaries and exclude local node_modules from builds 2026-03-15 13:37:03 +00:00
1e05c08002 v1.13.1
Some checks failed
CI / Type Check & Lint (push) Failing after 8s
CI / Build Test (Current Platform) (push) Failing after 8s
CI / Build All Platforms (push) Failing after 9s
Publish to npm / npm-publish (push) Failing after 9s
Release / build-and-release (push) Failing after 9s
2026-03-15 13:31:26 +00:00
167df321f9 fix(deno): remove nodeModulesDir from Deno configuration 2026-03-15 13:31:25 +00:00
49998c4c32 add migration
Some checks failed
CI / Type Check & Lint (push) Failing after 36s
CI / Build Test (Current Platform) (push) Failing after 1m8s
CI / Build All Platforms (push) Successful in 8m29s
2026-03-15 12:45:13 +00:00
8045ec38df v1.13.0
Some checks failed
CI / Build Test (Current Platform) (push) Failing after 1m0s
CI / Type Check & Lint (push) Failing after 1m10s
Release / build-and-release (push) Failing after 4m38s
Publish to npm / npm-publish (push) Failing after 5m34s
CI / Build All Platforms (push) Successful in 10m5s
2026-03-15 12:24:48 +00:00
793fb18b43 feat(install): improve installer with version selection, service restart handling, and upgrade documentation 2026-03-15 12:24:48 +00:00
09534fd899 v1.12.1
Some checks failed
CI / Type Check & Lint (push) Failing after 45s
CI / Build Test (Current Platform) (push) Failing after 1m24s
CI / Build All Platforms (push) Failing after 3m27s
Publish to npm / npm-publish (push) Failing after 3m21s
Release / build-and-release (push) Failing after 4m45s
2026-03-15 12:07:15 +00:00
5f3783a5e9 fix(package.json): update package metadata 2026-03-15 12:07:15 +00:00
92555c5a5e v1.12.0
Some checks failed
Publish to npm / npm-publish (push) Failing after 4m33s
Release / build-and-release (push) Failing after 4m49s
2026-03-15 12:06:55 +00:00
ddc7fa4bee feat(cli,release): add self-upgrade command and automate CI, release, and npm publishing workflows 2026-03-15 12:06:55 +00:00
eceb5d99c8 v1.11.0 2026-03-03 11:57:41 +00:00
0631b7731f feat(services): map backend service data to UI components, add stats & logs parsing, fetch service stats, and fix logs request param 2026-03-03 11:57:41 +00:00
35 changed files with 1979 additions and 1096 deletions

View File

@@ -1,140 +0,0 @@
# Onebox Development Notes
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
### NEVER GUESS - ALWAYS READ THE ACTUAL CODE
**FUCKING ALWAYS look at the dependency actual code. Don't start fucking guessing stuff.**
run "pnpm run watch" when starting to do stuff, so the UI gets recompiled and the server automatically restarts on file changes.
When working with any dependency:
1. **READ the actual source code** in `node_modules/` or check the package documentation
2. **CHECK the exact API** - don't assume based on similar libraries
3. **VERIFY method names, return types, and property structures** before using them
4. **TEST with the actual implementation** - APIs change between versions
Common mistakes to avoid:
- ❌ Assuming API structure based on similar libraries
- ❌ Guessing method names or property paths
- ❌ Using outdated documentation without checking current version
- ✅ Read the actual TypeScript definitions in node_modules
- ✅ Check the package's README and changelog
- ✅ Test the actual behavior before implementing
## Architecture Changes
### Reverse Proxy Implementation
- **Replaced Nginx** with native Deno reverse proxy (`ts/classes/reverseproxy.ts`)
- Features:
- HTTP/HTTPS dual servers (ports 80/443)
- TLS/SSL certificate management with hot-reload
- WebSocket bidirectional proxying
- Dynamic routing from database
- SNI (Server Name Indication) support
### Code Organization
- Removed "onebox." prefix from all TypeScript files
- Organized into subfolders:
- `ts/classes/` - All class implementations
- `ts/` - Root level utilities (logging, types, plugins, cli, info)
### WebSocket Real-time Communication
- **Backend**: WebSocket endpoint at `/api/ws` (`ts/classes/httpserver.ts:96-174`)
- Connection management with client Set tracking
- Broadcast methods: `broadcast()`, `broadcastServiceUpdate()`, `broadcastServiceStatus()`
- Integrated with service lifecycle (start/stop/restart actions)
- Status monitoring loop broadcasts changes automatically
- **Frontend**: Angular WebSocket service (`ui/src/app/core/services/websocket.service.ts`)
- Auto-connects on app initialization
- Exponential backoff reconnection (max 5 attempts)
- RxJS Observable-based message streaming
- Components subscribe to real-time updates
- **Message Types**:
- `connected` - Initial connection confirmation
- `service_update` - Service lifecycle changes (action: created/updated/deleted/started/stopped)
- `service_status` - Real-time status changes from monitoring loop
- `system_status` - System-wide updates
- **Testing**: Use `.nogit/test-ws-updates.ts` to monitor WebSocket messages
### Docker Configuration
- **System Docker**: Uses root Docker at `/var/run/docker.sock` (NOT rootless)
- **Swarm Mode**: Enabled for service orchestration
- **API Access**: Interact with Docker via direct API calls to the socket
- ❌ DO NOT switch Docker CLI contexts
- ✅ Use curl/HTTP requests to `/var/run/docker.sock`
- **Network**: Overlay network `onebox-network` with `Attachable: true`
- **Services vs Containers**: All workloads run as Swarm services (not standalone containers)
## Debugging Tips
### Backend Logs
Use the background bash task to check server logs:
```bash
# Check for specific patterns (e.g., Login attempts)
BashOutput tool with filter: "Login|error|Error"
# Check all recent output
BashOutput tool without filter
```
The dev server runs with `--watch` so it auto-restarts on file changes.
### Frontend Testing
Use Playwright for UI testing:
```typescript
// Navigate to app
mcp__playwright__browser_navigate({ url: "http://localhost:3000" })
// Fill login form
mcp__playwright__browser_fill_form({
fields: [
{ name: "Username", type: "textbox", ref: "...", value: "admin" },
{ name: "Password", type: "textbox", ref: "...", value: "admin" }
]
})
// Click button
mcp__playwright__browser_click({ element: "Sign in button", ref: "..." })
// Check console errors
// Playwright automatically shows console messages in results
```
### Common Issues
#### Login Issue (Fixed)
**Problem**: `admin/admin` credentials returned "Invalid credentials"
**Root Cause**: `rowToUser()` function in database.ts was accessing rows as arrays `row[2]` instead of objects `row.password_hash`. The @db/sqlite library returns rows as objects with snake_case column names.
**Fix**: Updated `rowToUser()` to support both access patterns:
```typescript
private rowToUser(row: any): IUser {
return {
passwordHash: String(row.password_hash || row[2]),
// ... other fields
};
}
```
**Location**: `ts/classes/database.ts:506-515`
## Default Credentials
- Username: `admin`
- Password: `admin`
- ⚠️ Change immediately after first login!
## Development Server
```bash
# Main server (port 3000)
deno task dev
# Check server status
curl http://localhost:3000/api/status
```
## API Endpoints
- `POST /api/auth/login` - Login (returns JWT-like token)
- `GET /api/status` - System status (requires auth)
- `GET /api/services` - List services (requires auth)
- See `ts/classes/httpserver.ts` for full API

View File

@@ -0,0 +1,37 @@
## Onebox {{VERSION}}
Pre-compiled binaries for multiple platforms.
### Installation
#### Option 1: Via npm (recommended)
```bash
npm install -g @serve.zone/onebox
```
#### Option 2: Via installer script
```bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
```
#### Option 3: Direct binary download
Download the appropriate binary for your platform from the assets below and make it executable.
### Supported Platforms
- Linux x86_64 (x64)
- Linux ARM64 (aarch64)
- macOS x86_64 (Intel)
- macOS ARM64 (Apple Silicon)
- Windows x86_64
### Checksums
SHA256 checksums are provided in `SHA256SUMS.txt` for binary verification.
### npm Package
The npm package includes automatic binary detection and installation for your platform.

114
.gitea/workflows/ci.yml Normal file
View File

@@ -0,0 +1,114 @@
name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
check:
name: Type Check & Lint
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Install dependencies
run: deno install --entrypoint mod.ts
- name: Check TypeScript types
run: deno check mod.ts
- name: Lint code
run: deno lint
continue-on-error: true
- name: Format check
run: deno fmt --check
continue-on-error: true
build:
name: Build Test (Current Platform)
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install
- name: Compile for current platform
run: |
echo "Testing compilation for Linux x86_64..."
npx tsdeno compile --allow-all --no-check \
--output onebox-test \
--target x86_64-unknown-linux-gnu mod.ts
- name: Test binary execution
run: |
chmod +x onebox-test
./onebox-test --version
./onebox-test --help
build-all:
name: Build All Platforms
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install
- name: Compile all platform binaries
run: mkdir -p dist/binaries && npx tsdeno compile
- name: Upload all binaries as artifact
uses: actions/upload-artifact@v3
with:
name: onebox-binaries.zip
path: dist/binaries/*
retention-days: 30

View File

@@ -0,0 +1,131 @@
name: Publish to npm
on:
push:
tags:
- 'v*'
jobs:
npm-publish:
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Setup Node.js for npm publishing
uses: actions/setup-node@v4
with:
node-version: '18.x'
registry-url: 'https://registry.npmjs.org/'
- name: Get version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "version_number=${VERSION#v}" >> $GITHUB_OUTPUT
echo "Publishing version: $VERSION"
- name: Verify deno.json version matches tag
run: |
DENO_VERSION=$(grep -o '"version": "[^"]*"' deno.json | cut -d'"' -f4)
TAG_VERSION="${{ steps.version.outputs.version_number }}"
echo "deno.json version: $DENO_VERSION"
echo "Tag version: $TAG_VERSION"
if [ "$DENO_VERSION" != "$TAG_VERSION" ]; then
echo "ERROR: Version mismatch!"
echo "deno.json has version $DENO_VERSION but tag is $TAG_VERSION"
exit 1
fi
- name: Compile binaries for npm package
run: |
echo "Compiling binaries for npm package..."
deno task compile
echo ""
echo "Binary sizes:"
ls -lh dist/binaries/
- name: Generate SHA256 checksums
run: |
cd dist/binaries
sha256sum * > SHA256SUMS
cat SHA256SUMS
cd ../..
- name: Sync package.json version
run: |
VERSION="${{ steps.version.outputs.version_number }}"
echo "Syncing package.json to version ${VERSION}..."
npm version ${VERSION} --no-git-tag-version --allow-same-version
echo "package.json version: $(grep '"version"' package.json | head -1)"
- name: Create npm package
run: |
echo "Creating npm package..."
npm pack
echo ""
echo "Package created:"
ls -lh *.tgz
- name: Test local installation
run: |
echo "Testing local package installation..."
PACKAGE_FILE=$(ls *.tgz)
npm install -g ${PACKAGE_FILE}
echo ""
echo "Testing onebox command:"
onebox --version || echo "Note: Binary execution may fail in CI environment"
echo ""
echo "Checking installed files:"
npm ls -g @serve.zone/onebox || true
- name: Publish to npm
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
echo "Publishing to npm registry..."
npm publish --access public
echo ""
echo "Successfully published @serve.zone/onebox to npm!"
echo ""
echo "Package info:"
npm view @serve.zone/onebox
- name: Verify npm package
run: |
echo "Waiting for npm propagation..."
sleep 30
echo ""
echo "Verifying published package..."
npm view @serve.zone/onebox
echo ""
echo "Testing installation from npm:"
npm install -g @serve.zone/onebox
echo ""
echo "Package installed successfully!"
which onebox || echo "Binary location check skipped"
- name: Publish Summary
run: |
echo "================================================"
echo " npm Publish Complete!"
echo "================================================"
echo ""
echo "Package: @serve.zone/onebox"
echo "Version: ${{ steps.version.outputs.version }}"
echo ""
echo "Installation:"
echo " npm install -g @serve.zone/onebox"
echo ""
echo "Registry:"
echo " https://www.npmjs.com/package/@serve.zone/onebox"
echo ""

View File

@@ -0,0 +1,211 @@
name: Release
on:
push:
tags:
- 'v*'
jobs:
build-and-release:
runs-on: ubuntu-latest
container:
image: code.foss.global/host.today/ht-docker-node:latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Deno
uses: denoland/setup-deno@v1
with:
deno-version: v2.x
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install
- name: Get version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "version_number=${VERSION#v}" >> $GITHUB_OUTPUT
echo "Building version: $VERSION"
- name: Verify deno.json version matches tag
run: |
DENO_VERSION=$(grep -o '"version": "[^"]*"' deno.json | cut -d'"' -f4)
TAG_VERSION="${{ steps.version.outputs.version_number }}"
echo "deno.json version: $DENO_VERSION"
echo "Tag version: $TAG_VERSION"
if [ "$DENO_VERSION" != "$TAG_VERSION" ]; then
echo "ERROR: Version mismatch!"
echo "deno.json has version $DENO_VERSION but tag is $TAG_VERSION"
exit 1
fi
- name: Compile binaries for all platforms
run: mkdir -p dist/binaries && npx tsdeno compile
- name: Generate SHA256 checksums
run: |
cd dist/binaries
sha256sum * > SHA256SUMS.txt
cat SHA256SUMS.txt
cd ../..
- name: Extract changelog for this version
id: changelog
run: |
VERSION="${{ steps.version.outputs.version }}"
# Check if CHANGELOG.md exists
if [ ! -f CHANGELOG.md ] && [ ! -f changelog.md ]; then
echo "No changelog found, using default release notes"
cat > /tmp/release_notes.md << EOF
## Onebox $VERSION
Pre-compiled binaries for multiple platforms.
### Installation
Use the installation script:
\`\`\`bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
\`\`\`
Or download the binary for your platform and make it executable.
### Supported Platforms
- Linux x86_64 (x64)
- Linux ARM64 (aarch64)
- macOS x86_64 (Intel)
- macOS ARM64 (Apple Silicon)
- Windows x86_64
### Checksums
SHA256 checksums are provided in SHA256SUMS.txt
EOF
else
CHANGELOG_FILE=$([ -f CHANGELOG.md ] && echo "CHANGELOG.md" || echo "changelog.md")
awk "/## \[$VERSION\]/,/## \[/" "$CHANGELOG_FILE" | sed '$d' > /tmp/release_notes.md || cat > /tmp/release_notes.md << EOF
## Onebox $VERSION
See changelog.md for full details.
### Installation
Use the installation script:
\`\`\`bash
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
\`\`\`
EOF
fi
echo "Release notes:"
cat /tmp/release_notes.md
- name: Delete existing release if it exists
run: |
VERSION="${{ steps.version.outputs.version }}"
echo "Checking for existing release $VERSION..."
# Try to get existing release by tag
EXISTING_RELEASE_ID=$(curl -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/tags/$VERSION" \
| jq -r '.id // empty')
if [ -n "$EXISTING_RELEASE_ID" ]; then
echo "Found existing release (ID: $EXISTING_RELEASE_ID), deleting..."
curl -X DELETE -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$EXISTING_RELEASE_ID"
echo "Existing release deleted"
sleep 2
else
echo "No existing release found, proceeding with creation"
fi
- name: Create Gitea Release
run: |
VERSION="${{ steps.version.outputs.version }}"
RELEASE_NOTES=$(cat /tmp/release_notes.md)
# Create the release
echo "Creating release for $VERSION..."
RELEASE_ID=$(curl -X POST -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/json" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases" \
-d "{
\"tag_name\": \"$VERSION\",
\"name\": \"Onebox $VERSION\",
\"body\": $(jq -Rs . /tmp/release_notes.md),
\"draft\": false,
\"prerelease\": false
}" | jq -r '.id')
echo "Release created with ID: $RELEASE_ID"
# Upload binaries as release assets
for binary in dist/binaries/*; do
filename=$(basename "$binary")
echo "Uploading $filename..."
curl -X POST -s \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/octet-stream" \
--data-binary "@$binary" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$RELEASE_ID/assets?name=$filename"
done
echo "All assets uploaded successfully"
- name: Clean up old releases
run: |
echo "Cleaning up old releases (keeping only last 3)..."
# Fetch all releases sorted by creation date
RELEASES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases" | \
jq -r 'sort_by(.created_at) | reverse | .[3:] | .[].id')
# Delete old releases
if [ -n "$RELEASES" ]; then
echo "Found releases to delete:"
for release_id in $RELEASES; do
echo " Deleting release ID: $release_id"
curl -X DELETE -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/$release_id"
done
echo "Old releases deleted successfully"
else
echo "No old releases to delete (less than 4 releases total)"
fi
echo ""
- name: Release Summary
run: |
echo "================================================"
echo " Release ${{ steps.version.outputs.version }} Complete!"
echo "================================================"
echo ""
echo "Binaries published:"
ls -lh dist/binaries/
echo ""
echo "Release URL:"
echo "https://code.foss.global/serve.zone/onebox/releases/tag/${{ steps.version.outputs.version }}"
echo ""
echo "Installation command:"
echo "curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash"
echo ""

View File

@@ -1,5 +1,79 @@
# Changelog
## 2026-03-15 - 1.13.8 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.7 - fix(repo)
no changes to commit
## 2026-03-15 - 1.13.6 - fix(ci)
correct workflow container image registry path
- Update Gitea CI, release, and npm publish workflows to use the corrected ht-docker-node image path
- Align all workflow container references from hosttoday to host.today to prevent pipeline image resolution issues
## 2026-03-15 - 1.13.5 - fix(workflows)
switch Gitea workflow containers from ht-docker-dbase to ht-docker-node
- Updates the CI, release, and npm publish workflows to use the Node-focused container image consistently.
- Aligns workflow runtime images with the project's Node and Deno build and publish steps.
## 2026-03-15 - 1.13.4 - fix(ci)
run workflows in the shared build container and enable corepack for pnpm installs
- adds the ht-docker-dbase container image to CI, release, and npm publish workflows
- enables corepack before pnpm install in build and release jobs to ensure package manager availability
## 2026-03-15 - 1.13.3 - fix(build)
replace custom Deno compile scripts with tsdeno-based binary builds in CI and release workflows
- adds @git.zone/tsdeno as a dev dependency and configures compile targets in npmextra.json
- updates CI and release workflows to install Node.js dependencies before running tsdeno compile
- removes the legacy scripts/compile-all.sh script and points the compile task to tsdeno compile
## 2026-03-15 - 1.13.2 - fix(scripts)
install production dependencies before compiling binaries and exclude local node_modules from builds
- Adds a dependency installation step using the application entrypoint before cross-platform compilation
- Updates all deno compile targets to use --node-modules-dir=none to avoid bundling local node_modules
## 2026-03-15 - 1.13.1 - fix(deno)
remove nodeModulesDir from Deno configuration
- Drops the explicit nodeModulesDir setting from deno.json.
- Keeps the package version unchanged at 1.13.0 while simplifying runtime configuration.
## 2026-03-15 - 1.13.0 - feat(install)
improve installer with version selection, service restart handling, and upgrade documentation
- Adds installer command-line options for help, specific version selection, and custom install directory.
- Fetches the latest release from the Gitea API when no version is provided and installs the matching platform binary.
- Preserves Onebox data directories, stops and restarts the systemd service during updates, and refreshes installation instructions in the README including upgrade usage.
## 2026-03-15 - 1.12.1 - fix(package.json)
update package metadata
- Single metadata-only file changed (+1, -1)
- No source code or runtime behavior modified; safe patch release
## 2026-03-15 - 1.12.0 - feat(cli,release)
add self-upgrade command and automate CI, release, and npm publishing workflows
- adds a new `onebox upgrade` CLI command that checks the latest release and reinstalls the current binary via the installer script
- introduces Gitea CI workflows for type checks, build verification, multi-platform binary compilation, release creation, and npm publishing
- adds a reusable release template describing installation options, supported platforms, and checksum availability
## 2026-03-03 - 1.11.0 - feat(services)
map backend service data to UI components, add stats & logs parsing, fetch service stats, and fix logs request param
- Fix: rename service logs request property from 'lines' to 'tail' when calling typedRequest
- Add data transformation helpers: formatBytes, parseImageString, mapStatus, toServiceDetail, toServiceStats, parseLogs
- Transform service list and detail props to match @serve.zone/catalog component interfaces (map status, image, repo/tag, timestamps, registry)
- Dispatch fetchServiceStatsAction on service click and surface transformed stats with default values to avoid nulls
- Parse and normalize logs into timestamp/message pairs for the detail view
## 2026-03-02 - 1.10.3 - fix(bin)
make bin/onebox-wrapper.js executable

View File

@@ -1,12 +1,11 @@
{
"name": "@serve.zone/onebox",
"version": "1.10.3",
"version": "1.13.8",
"exports": "./mod.ts",
"nodeModulesDir": "auto",
"tasks": {
"test": "deno test --allow-all test/",
"test:watch": "deno test --allow-all --watch test/",
"compile": "bash scripts/compile-all.sh",
"compile": "tsdeno compile",
"dev": "pnpm run watch"
},
"imports": {

View File

@@ -1,192 +1,308 @@
#!/bin/bash
# Onebox Installer Script
# Downloads and installs pre-compiled Onebox binary from Gitea releases
#
# Onebox installer script
# Usage:
# Direct piped installation (recommended):
# curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
#
# With version specification:
# curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0
#
# Options:
# -h, --help Show this help message
# --version VERSION Install specific version (e.g., v1.11.0)
# --install-dir DIR Installation directory (default: /opt/onebox)
set -e
# Configuration
REPO_URL="https://code.foss.global/serve.zone/onebox"
# Default values
SHOW_HELP=0
SPECIFIED_VERSION=""
INSTALL_DIR="/opt/onebox"
BIN_LINK="/usr/local/bin/onebox"
GITEA_BASE_URL="https://code.foss.global"
GITEA_REPO="serve.zone/onebox"
SERVICE_NAME="smartdaemon_onebox"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
SHOW_HELP=1
shift
;;
--version)
SPECIFIED_VERSION="$2"
shift 2
;;
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
*)
echo "Unknown option: $1"
echo "Use -h or --help for usage information"
exit 1
;;
esac
done
# Functions
error() {
echo -e "${RED}Error: $1${NC}" >&2
exit 1
}
info() {
echo -e "${GREEN}$1${NC}"
}
warn() {
echo -e "${YELLOW}$1${NC}"
}
# Detect platform and architecture
detect_platform() {
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m)
case "$OS" in
linux)
PLATFORM="linux"
;;
darwin)
PLATFORM="macos"
;;
*)
error "Unsupported operating system: $OS"
;;
esac
case "$ARCH" in
x86_64|amd64)
ARCH="x64"
;;
aarch64|arm64)
ARCH="arm64"
;;
*)
error "Unsupported architecture: $ARCH"
;;
esac
BINARY_NAME="onebox-${PLATFORM}-${ARCH}"
}
# Get latest version from Gitea API
get_latest_version() {
info "Fetching latest version..."
VERSION=$(curl -s "${REPO_URL}/releases" | grep -o '"tag_name":"v[^"]*' | head -1 | cut -d'"' -f4 | cut -c2-)
if [ -z "$VERSION" ]; then
warn "Could not fetch latest version, using 'main' branch"
VERSION="main"
else
info "Latest version: v${VERSION}"
fi
}
if [ $SHOW_HELP -eq 1 ]; then
echo "Onebox Installer Script"
echo "Downloads and installs pre-compiled Onebox binary"
echo ""
echo "Usage: $0 [options]"
echo ""
echo "Options:"
echo " -h, --help Show this help message"
echo " --version VERSION Install specific version (e.g., v1.11.0)"
echo " --install-dir DIR Installation directory (default: /opt/onebox)"
echo ""
echo "Examples:"
echo " # Install latest version"
echo " curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash"
echo ""
echo " # Install specific version"
echo " curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0"
exit 0
fi
# Check if running as root
check_root() {
if [ "$EUID" -ne 0 ]; then
error "This script must be run as root (use sudo)"
fi
if [ "$EUID" -ne 0 ]; then
echo "Please run as root (sudo bash install.sh or pipe to sudo bash)"
exit 1
fi
# Helper function to detect OS and architecture
detect_platform() {
local os=$(uname -s)
local arch=$(uname -m)
# Map OS
case "$os" in
Linux)
os_name="linux"
;;
Darwin)
os_name="macos"
;;
MINGW*|MSYS*|CYGWIN*)
os_name="windows"
;;
*)
echo "Error: Unsupported operating system: $os"
echo "Supported: Linux, macOS, Windows"
exit 1
;;
esac
# Map architecture
case "$arch" in
x86_64|amd64)
arch_name="x64"
;;
aarch64|arm64)
arch_name="arm64"
;;
*)
echo "Error: Unsupported architecture: $arch"
echo "Supported: x86_64/amd64 (x64), aarch64/arm64 (arm64)"
exit 1
;;
esac
# Construct binary name
if [ "$os_name" = "windows" ]; then
echo "onebox-${os_name}-${arch_name}.exe"
else
echo "onebox-${os_name}-${arch_name}"
fi
}
# Get latest release version from Gitea API
get_latest_version() {
echo "Fetching latest release version from Gitea..." >&2
local api_url="${GITEA_BASE_URL}/api/v1/repos/${GITEA_REPO}/releases/latest"
local response=$(curl -sSL "$api_url" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$response" ]; then
echo "Error: Failed to fetch latest release information from Gitea API" >&2
echo "URL: $api_url" >&2
exit 1
fi
# Extract tag_name from JSON response
local version=$(echo "$response" | grep -o '"tag_name":"[^"]*"' | cut -d'"' -f4)
if [ -z "$version" ]; then
echo "Error: Could not determine latest version from API response" >&2
exit 1
fi
echo "$version"
}
# Main installation process
echo "================================================"
echo " Onebox Installation Script"
echo "================================================"
echo ""
# Detect platform
BINARY_NAME=$(detect_platform)
echo "Detected platform: $BINARY_NAME"
echo ""
# Determine version to install
if [ -n "$SPECIFIED_VERSION" ]; then
VERSION="$SPECIFIED_VERSION"
echo "Installing specified version: $VERSION"
else
VERSION=$(get_latest_version)
echo "Installing latest version: $VERSION"
fi
echo ""
# Construct download URL
DOWNLOAD_URL="${GITEA_BASE_URL}/${GITEA_REPO}/releases/download/${VERSION}/${BINARY_NAME}"
echo "Download URL: $DOWNLOAD_URL"
echo ""
# Check if service is running and stop it
SERVICE_WAS_RUNNING=0
if systemctl is-enabled --quiet "$SERVICE_NAME" 2>/dev/null || systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
SERVICE_WAS_RUNNING=1
if systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
echo "Stopping Onebox service..."
systemctl stop "$SERVICE_NAME"
fi
fi
# Clean installation directory - ensure only binary exists
if [ -d "$INSTALL_DIR" ]; then
echo "Cleaning installation directory: $INSTALL_DIR"
rm -rf "$INSTALL_DIR"
fi
# Create fresh installation directory
echo "Creating installation directory: $INSTALL_DIR"
mkdir -p "$INSTALL_DIR"
# Download binary
download_binary() {
info "Downloading Onebox ${VERSION} for ${PLATFORM}-${ARCH}..."
echo "Downloading Onebox binary..."
TEMP_FILE="$INSTALL_DIR/onebox.download"
curl -sSL "$DOWNLOAD_URL" -o "$TEMP_FILE"
# Create temp directory
TMP_DIR=$(mktemp -d)
TMP_FILE="${TMP_DIR}/${BINARY_NAME}"
if [ $? -ne 0 ]; then
echo "Error: Failed to download binary from $DOWNLOAD_URL"
echo ""
echo "Please check:"
echo " 1. Your internet connection"
echo " 2. The specified version exists: ${GITEA_BASE_URL}/${GITEA_REPO}/releases"
echo " 3. The platform binary is available for this release"
rm -f "$TEMP_FILE"
exit 1
fi
# Try release download first
if [ "$VERSION" != "main" ]; then
DOWNLOAD_URL="${REPO_URL}/releases/download/v${VERSION}/${BINARY_NAME}"
else
DOWNLOAD_URL="${REPO_URL}/raw/branch/main/dist/binaries/${BINARY_NAME}"
fi
# Check if download was successful (file exists and not empty)
if [ ! -s "$TEMP_FILE" ]; then
echo "Error: Downloaded file is empty or does not exist"
rm -f "$TEMP_FILE"
exit 1
fi
if ! curl -L -f -o "$TMP_FILE" "$DOWNLOAD_URL"; then
error "Failed to download binary from $DOWNLOAD_URL"
fi
# Move to final location
BINARY_PATH="$INSTALL_DIR/onebox"
mv "$TEMP_FILE" "$BINARY_PATH"
# Verify download
if [ ! -f "$TMP_FILE" ] || [ ! -s "$TMP_FILE" ]; then
error "Downloaded file is empty or missing"
fi
if [ $? -ne 0 ] || [ ! -f "$BINARY_PATH" ]; then
echo "Error: Failed to move binary to $BINARY_PATH"
rm -f "$TEMP_FILE" 2>/dev/null
exit 1
fi
info "✓ Download complete"
}
# Make executable
chmod +x "$BINARY_PATH"
# Install binary
install_binary() {
info "Installing Onebox to ${INSTALL_DIR}..."
if [ $? -ne 0 ]; then
echo "Error: Failed to make binary executable"
exit 1
fi
# Create install directory
mkdir -p "$INSTALL_DIR"
echo "Binary installed successfully to: $BINARY_PATH"
echo ""
# Copy binary
cp "$TMP_FILE" "${INSTALL_DIR}/onebox"
chmod +x "${INSTALL_DIR}/onebox"
# Check if /usr/local/bin is in PATH
if [[ ":$PATH:" == *":/usr/local/bin:"* ]]; then
BIN_DIR="/usr/local/bin"
else
BIN_DIR="/usr/bin"
fi
# Create symlink
ln -sf "${INSTALL_DIR}/onebox" "$BIN_LINK"
# Create symlink for global access
ln -sf "$BINARY_PATH" "$BIN_DIR/onebox"
echo "Symlink created: $BIN_DIR/onebox -> $BINARY_PATH"
echo ""
# Cleanup temp files
rm -rf "$TMP_DIR"
# Create data directories
mkdir -p /var/lib/onebox
mkdir -p /var/www/certbot
info "✓ Installation complete"
}
# Restart service if it was running before update
if [ $SERVICE_WAS_RUNNING -eq 1 ]; then
echo "Restarting Onebox service..."
systemctl restart "$SERVICE_NAME"
echo "Service restarted successfully."
echo ""
fi
# Initialize database and config
initialize() {
info "Initializing Onebox..."
echo "================================================"
echo " Onebox Installation Complete!"
echo "================================================"
echo ""
echo "Installation details:"
echo " Binary location: $BINARY_PATH"
echo " Symlink location: $BIN_DIR/onebox"
echo " Version: $VERSION"
echo ""
# Create data directory
mkdir -p /var/lib/onebox
# Create certbot directory for ACME challenges
mkdir -p /var/www/certbot
info "✓ Initialization complete"
}
# Print success message
print_success() {
echo ""
info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
info " Onebox installed successfully!"
info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Next steps:"
echo ""
echo "1. Configure Cloudflare (optional):"
echo " onebox config set cloudflareAPIKey <key>"
echo " onebox config set cloudflareEmail <email>"
echo " onebox config set cloudflareZoneID <zone-id>"
echo " onebox config set serverIP <your-server-ip>"
echo ""
echo "2. Configure ACME email:"
echo " onebox config set acmeEmail <your@email.com>"
echo ""
echo "3. Install daemon:"
echo " onebox daemon install"
echo ""
echo "4. Start daemon:"
echo " onebox daemon start"
echo ""
echo "5. Deploy your first service:"
echo " onebox service add myapp --image nginx:latest --domain app.example.com"
echo ""
echo "Web UI: http://localhost:3000"
echo "Default credentials: admin / admin"
echo ""
}
# Main installation flow
main() {
info "Onebox Installer"
echo ""
check_root
detect_platform
get_latest_version
download_binary
install_binary
initialize
print_success
}
# Run main function
main
# Check if database exists (indicates existing installation)
if [ -f "/var/lib/onebox/onebox.db" ]; then
echo "Data directory: /var/lib/onebox (preserved)"
echo ""
echo "Your existing data has been preserved."
if [ $SERVICE_WAS_RUNNING -eq 1 ]; then
echo "The service has been restarted with your current settings."
else
echo "Start the service with: onebox daemon start"
fi
else
echo "Get started:"
echo ""
echo " onebox --version"
echo " onebox --help"
echo ""
echo " 1. Configure Cloudflare (optional):"
echo " onebox config set cloudflareAPIKey <key>"
echo " onebox config set cloudflareEmail <email>"
echo " onebox config set cloudflareZoneID <zone-id>"
echo " onebox config set serverIP <your-server-ip>"
echo ""
echo " 2. Configure ACME email:"
echo " onebox config set acmeEmail <your@email.com>"
echo ""
echo " 3. Install daemon:"
echo " onebox daemon install"
echo ""
echo " 4. Start daemon:"
echo " onebox daemon start"
echo ""
echo " 5. Deploy your first service:"
echo " onebox service add myapp --image nginx:latest --domain app.example.com"
echo ""
echo " Web UI: http://localhost:3000"
echo " Default credentials: admin / admin"
fi
echo ""

View File

@@ -11,6 +11,26 @@
}
]
},
"@git.zone/tsdeno": {
"compileTargets": [
{
"name": "onebox-linux-x64",
"entryPoint": "mod.ts",
"outDir": "dist/binaries",
"target": "x86_64-unknown-linux-gnu",
"permissions": ["--allow-all"],
"noCheck": true
},
{
"name": "onebox-linux-arm64",
"entryPoint": "mod.ts",
"outDir": "dist/binaries",
"target": "aarch64-unknown-linux-gnu",
"permissions": ["--allow-all"],
"noCheck": true
}
]
},
"@git.zone/tswatch": {
"bundles": [
{

View File

@@ -1,6 +1,6 @@
{
"name": "@serve.zone/onebox",
"version": "1.10.3",
"version": "1.13.8",
"description": "Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers",
"main": "mod.ts",
"type": "module",
@@ -61,6 +61,7 @@
},
"devDependencies": {
"@git.zone/tsbundle": "^2.9.0",
"@git.zone/tsdeno": "^1.1.1",
"@git.zone/tswatch": "^3.2.0"
}
}

69
pnpm-lock.yaml generated
View File

@@ -24,6 +24,9 @@ importers:
'@git.zone/tsbundle':
specifier: ^2.9.0
version: 2.9.0
'@git.zone/tsdeno':
specifier: ^1.1.1
version: 1.1.1
'@git.zone/tswatch':
specifier: ^3.2.0
version: 3.2.0(@tiptap/pm@2.27.2)
@@ -270,6 +273,10 @@ packages:
resolution: {integrity: sha512-itXX/oiJjrRHUlIGTHUEqSwPuGwsG4Cq8kh7aqFOm8mYzJwtXYE1gBqLJTWZma6gI5n+xAk5qTxTyfikuPgWQA==}
hasBin: true
'@git.zone/tsdeno@1.1.1':
resolution: {integrity: sha512-+ECRtHZFyG1U50lb2sJsy51940sFBUnmM7aEKcRPplz9uLm6i6uSVZJFgdGGVtzRvW646GvBvRpWzYPyXcMclw==}
hasBin: true
'@git.zone/tsrun@2.0.1':
resolution: {integrity: sha512-NEcnsjvlC1o3Z6SS3VhKCf6Ev+Sh4EAinmggslrIR/ppMrvjDbXNFXoyr3PB+GLeSAR0JRZ1fGvVYjpEzjBdIg==}
hasBin: true
@@ -500,6 +507,9 @@ packages:
'@push.rocks/smartexit@1.1.0':
resolution: {integrity: sha512-GD8VLIbxQuwvhPXwK4eH162XAYSj+M3wGKWGNO3i1iY4bj8P3BARcgsWx6/ntN3aCo5ygWtrevrfD5iecYY2Ng==}
'@push.rocks/smartexit@2.0.3':
resolution: {integrity: sha512-ZWpZ3Elorpv/rKtUcCUejUHG4BIE5B3QWysBAgb7lTcA7y0vGdFY32Y5/Q5tHpZM6PPxl/WTdUOYtSojQTq+pA==}
'@push.rocks/smartfeed@1.4.0':
resolution: {integrity: sha512-bvj/3cGQI6TbbjbqrgC1uufcqprd/VthefuIsS8KHiHyCqYD5Z6RTjrbQY9WOCsmub/dcuMavfXQZqe9g2+OrQ==}
@@ -515,6 +525,9 @@ packages:
'@push.rocks/smartfs@1.3.1':
resolution: {integrity: sha512-ZSduVS8tM+/erbyCTvRRvc9gLWwbpqN5xdIIkMr+gub7fowSeJb7tR2rnGwySa63DyimU0q2KTp79VV9YqGLeg==}
'@push.rocks/smartfs@1.5.0':
resolution: {integrity: sha512-QwMD44HgX3d9PPxUwR0uS+0PEMtesKvKbZR+s4pezL2er6oPneKJMLkO6TJPvJ38nug6Lmlk9Bu7UrwR2kS3Vw==}
'@push.rocks/smartguard@3.1.0':
resolution: {integrity: sha512-J23q84f1O+TwFGmd4lrO9XLHUh2DaLXo9PN/9VmTWYzTkQDv5JehmifXVI0esophXcCIfbdIu6hbt7/aHlDF4A==}
@@ -584,6 +597,9 @@ packages:
'@push.rocks/smartrouter@1.3.3':
resolution: {integrity: sha512-1+xZEnWlhzqLWAaJ1zFNhQ0zgbfCWQl1DBT72LygLxTs+P0K8AwJKgqo/IX6CT55kGCFnPAZIYSbVJlGsgrB0w==}
'@push.rocks/smartrust@1.3.2':
resolution: {integrity: sha512-HPzSJgDnKUdE5fkn2+BC9JvFXk7wl6aURAiHAXjHSCBLtzfgB7jEXjlg+K6CEfMjwQV7sy+hYABlq5DLXcFseQ==}
'@push.rocks/smartrx@3.0.10':
resolution: {integrity: sha512-USjIYcsSfzn14cwOsxgq/bBmWDTTzy3ouWAnW5NdMyRRzEbmeNrvmy6TRqNeDlJ2PsYNTt1rr/zGUqvIy72ITg==}
@@ -593,6 +609,9 @@ packages:
'@push.rocks/smartshell@3.3.0':
resolution: {integrity: sha512-m0w618H6YBs+vXGz1CgS4nPi5CUAnqRtckcS9/koGwfcIx1IpjqmiP47BoCTbdgcv0IPUxQVBG1IXTHPuZ8Z5g==}
'@push.rocks/smartshell@3.3.7':
resolution: {integrity: sha512-b3st2+FjHUVhZZRlXfw93+SQA0UMVlURqe55uVpWdjJX7jeGXTTeszuYygtiR99zC5iZ8WZhGDct3N2L1qc/qw==}
'@push.rocks/smartsitemap@2.0.4':
resolution: {integrity: sha512-76dYWG/o/EjV4vYCK7ZKM35T9xgrI+oHEiiIE6E2MDaFIU6QnSfciTfbscH5nc0vxx8Ah+I0HPEJO94BM2S39w==}
@@ -1565,6 +1584,10 @@ packages:
resolution: {integrity: sha512-6B3tLtFqtQS4ekarvLVMZ+X+VlvQekbe4taUkf/rhVO3d/h0M2rfARm/pXLcPEsjjMsFgrFgSrhQIxcSVrBz8w==}
engines: {node: '>=18'}
isexe@4.0.0:
resolution: {integrity: sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw==}
engines: {node: '>=20'}
jackspeak@4.2.3:
resolution: {integrity: sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg==}
engines: {node: 20 || >=22}
@@ -2276,6 +2299,11 @@ packages:
engines: {node: ^18.17.0 || >=20.5.0}
hasBin: true
which@6.0.1:
resolution: {integrity: sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg==}
engines: {node: ^20.17.0 || >=22.9.0}
hasBin: true
wrap-ansi@6.2.0:
resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==}
engines: {node: '>=8'}
@@ -2652,6 +2680,19 @@ snapshots:
- supports-color
- vue
'@git.zone/tsdeno@1.1.1':
dependencies:
'@push.rocks/early': 4.0.4
'@push.rocks/npmextra': 5.3.3
'@push.rocks/smartcli': 4.0.20
'@push.rocks/smartfs': 1.5.0
'@push.rocks/smartshell': 3.3.7
transitivePeerDependencies:
- '@nuxt/kit'
- react
- supports-color
- vue
'@git.zone/tsrun@2.0.1':
dependencies:
'@push.rocks/smartfile': 13.1.2
@@ -3001,6 +3042,11 @@ snapshots:
'@push.rocks/smartpromise': 4.2.3
tree-kill: 1.2.2
'@push.rocks/smartexit@2.0.3':
dependencies:
'@push.rocks/lik': 6.3.1
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartfeed@1.4.0':
dependencies:
'@tsclass/tsclass': 9.3.0
@@ -3047,6 +3093,11 @@ snapshots:
dependencies:
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartfs@1.5.0':
dependencies:
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartrust': 1.3.2
'@push.rocks/smartguard@3.1.0':
dependencies:
'@push.rocks/smartpromise': 4.2.3
@@ -3194,6 +3245,10 @@ snapshots:
'@push.rocks/smartrx': 3.0.10
path-to-regexp: 8.3.0
'@push.rocks/smartrust@1.3.2':
dependencies:
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartrx@3.0.10':
dependencies:
'@push.rocks/smartpromise': 4.2.3
@@ -3221,6 +3276,14 @@ snapshots:
tree-kill: 1.2.2
which: 5.0.0
'@push.rocks/smartshell@3.3.7':
dependencies:
'@push.rocks/smartdelay': 3.0.5
'@push.rocks/smartexit': 2.0.3
'@push.rocks/smartpromise': 4.2.3
'@types/which': 3.0.4
which: 6.0.1
'@push.rocks/smartsitemap@2.0.4':
dependencies:
'@push.rocks/smartcache': 1.0.18
@@ -4281,6 +4344,8 @@ snapshots:
isexe@3.1.5: {}
isexe@4.0.0: {}
jackspeak@4.2.3:
dependencies:
'@isaacs/cliui': 9.0.0
@@ -5247,6 +5312,10 @@ snapshots:
dependencies:
isexe: 3.1.5
which@6.0.1:
dependencies:
isexe: 4.0.0
wrap-ansi@6.2.0:
dependencies:
ansi-styles: 4.3.0

View File

@@ -47,10 +47,11 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
### Installation
```bash
# Download the latest release for your platform
curl -sSL https://code.foss.global/serve.zone/onebox/releases/latest/download/onebox-linux-x64 -o onebox
chmod +x onebox
sudo mv onebox /usr/local/bin/
# One-line install (recommended)
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash
# Install a specific version
curl -sSL https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh | sudo bash -s -- --version v1.11.0
# Or install from npm
pnpm install -g @serve.zone/onebox
@@ -242,6 +243,13 @@ onebox config set cloudflareZoneID your-zone-id
onebox status
```
### Upgrade
```bash
# Upgrade to the latest version (requires root)
sudo onebox upgrade
```
## Configuration 🔧
### System Requirements

View File

@@ -1,56 +0,0 @@
#!/bin/bash
#
# Compile Onebox for all platforms
#
set -e
VERSION=$(grep '"version"' deno.json | cut -d'"' -f4)
echo "Compiling Onebox v${VERSION} for all platforms..."
# Create dist directory
mkdir -p dist/binaries
# Compile for each platform
echo "Compiling for Linux x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-linux-x64" \
--target x86_64-unknown-linux-gnu \
mod.ts
echo "Compiling for Linux ARM64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-linux-arm64" \
--target aarch64-unknown-linux-gnu \
mod.ts
echo "Compiling for macOS x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-macos-x64" \
--target x86_64-apple-darwin \
mod.ts
echo "Compiling for macOS ARM64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-macos-arm64" \
--target aarch64-apple-darwin \
mod.ts
echo "Compiling for Windows x64..."
deno compile --allow-all --no-check \
--output "dist/binaries/onebox-windows-x64.exe" \
--target x86_64-pc-windows-msvc \
mod.ts
echo ""
echo "✓ Compilation complete!"
echo ""
echo "Binaries:"
ls -lh dist/binaries/
echo ""
echo "Next steps:"
echo "1. Test binaries on their respective platforms"
echo "2. Create git tag: git tag v${VERSION}"
echo "3. Push tag: git push origin v${VERSION}"
echo "4. Upload binaries to Gitea release"
echo "5. Publish to npm: pnpm publish"

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@serve.zone/onebox',
version: '1.10.3',
version: '1.13.8',
description: 'Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers'
}

View File

@@ -72,6 +72,10 @@ export async function runCli(): Promise<void> {
await handleStatusCommand(onebox);
break;
case 'upgrade':
await handleUpgradeCommand();
break;
default:
logger.error(`Unknown command: ${command}`);
printHelp();
@@ -386,6 +390,78 @@ async function handleStatusCommand(onebox: Onebox) {
console.log(JSON.stringify(status, null, 2));
}
// Upgrade command - self-update onebox to latest version
async function handleUpgradeCommand(): Promise<void> {
// Check if running as root
if (Deno.uid() !== 0) {
logger.error('This command must be run as root to upgrade Onebox.');
logger.info('Try: sudo onebox upgrade');
Deno.exit(1);
}
logger.info('Checking for updates...');
try {
// Get current version
const currentVersion = projectInfo.version;
// Fetch latest version from Gitea API
const apiUrl = 'https://code.foss.global/api/v1/repos/serve.zone/onebox/releases/latest';
const curlCmd = new Deno.Command('curl', {
args: ['-sSL', apiUrl],
stdout: 'piped',
stderr: 'piped',
});
const curlResult = await curlCmd.output();
const response = new TextDecoder().decode(curlResult.stdout);
const release = JSON.parse(response);
const latestVersion = release.tag_name as string; // e.g., "v1.11.0"
// Normalize versions for comparison (ensure both have "v" prefix)
const normalizedCurrent = currentVersion.startsWith('v')
? currentVersion
: `v${currentVersion}`;
const normalizedLatest = latestVersion.startsWith('v')
? latestVersion
: `v${latestVersion}`;
console.log(` Current version: ${normalizedCurrent}`);
console.log(` Latest version: ${normalizedLatest}`);
console.log('');
// Compare normalized versions
if (normalizedCurrent === normalizedLatest) {
logger.success('Already up to date!');
return;
}
logger.info(`New version available: ${latestVersion}`);
logger.info('Downloading and installing...');
console.log('');
// Download and run the install script
const installUrl = 'https://code.foss.global/serve.zone/onebox/raw/branch/main/install.sh';
const installCmd = new Deno.Command('bash', {
args: ['-c', `curl -sSL ${installUrl} | bash`],
stdin: 'inherit',
stdout: 'inherit',
stderr: 'inherit',
});
const installResult = await installCmd.output();
if (!installResult.success) {
logger.error('Upgrade failed');
Deno.exit(1);
}
console.log('');
logger.success(`Upgraded to ${latestVersion}`);
} catch (error) {
logger.error(`Upgrade failed: ${getErrorMessage(error)}`);
Deno.exit(1);
}
}
// Helpers
function getArg(args: string[], flag: string): string {
const arg = args.find((a) => a.startsWith(`${flag}=`));
@@ -441,6 +517,9 @@ Commands:
status
upgrade
Upgrade Onebox to the latest version (requires root)
Options:
--help, -h Show this help message
--version, -v Show version

View File

@@ -25,6 +25,7 @@ import type {
import type { TBindValue } from './types.ts';
import { logger } from '../logging.ts';
import { getErrorMessage } from '../utils/error.ts';
import { MigrationRunner } from './migrations/index.ts';
// Import repositories
import {
@@ -71,7 +72,8 @@ export class OneboxDatabase {
await this.createTables();
// Run migrations if needed
await this.runMigrations();
const runner = new MigrationRunner(this.query.bind(this));
runner.run();
// Initialize repositories with bound query function
const queryFn = this.query.bind(this);
@@ -241,724 +243,6 @@ export class OneboxDatabase {
/**
* Run database migrations
*/
private async runMigrations(): Promise<void> {
if (!this.db) throw new Error('Database not initialized');
try {
const currentVersion = this.getMigrationVersion();
logger.info(`Current database migration version: ${currentVersion}`);
// Migration 1: Initial schema
if (currentVersion === 0) {
logger.info('Setting initial migration version to 1');
this.setMigrationVersion(1);
}
// Migration 2: Convert timestamp columns from INTEGER to REAL
const updatedVersion = this.getMigrationVersion();
if (updatedVersion < 2) {
logger.info('Running migration 2: Converting timestamps to REAL...');
// SSL certificates
this.query(`
CREATE TABLE ssl_certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO ssl_certificates_new SELECT * FROM ssl_certificates`);
this.query(`DROP TABLE ssl_certificates`);
this.query(`ALTER TABLE ssl_certificates_new RENAME TO ssl_certificates`);
// Services
this.query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT NOT NULL,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL DEFAULT 'stopped',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO services_new SELECT * FROM services`);
this.query(`DROP TABLE services`);
this.query(`ALTER TABLE services_new RENAME TO services`);
// Registries
this.query(`
CREATE TABLE registries_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
url TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
password_encrypted TEXT NOT NULL,
created_at REAL NOT NULL
)
`);
this.query(`INSERT INTO registries_new SELECT * FROM registries`);
this.query(`DROP TABLE registries`);
this.query(`ALTER TABLE registries_new RENAME TO registries`);
// Nginx configs
this.query(`
CREATE TABLE nginx_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain TEXT NOT NULL,
port INTEGER NOT NULL,
ssl_enabled INTEGER NOT NULL DEFAULT 0,
config_template TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO nginx_configs_new SELECT * FROM nginx_configs`);
this.query(`DROP TABLE nginx_configs`);
this.query(`ALTER TABLE nginx_configs_new RENAME TO nginx_configs`);
// DNS records
this.query(`
CREATE TABLE dns_records_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
value TEXT NOT NULL,
cloudflare_id TEXT,
zone_id TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO dns_records_new SELECT * FROM dns_records`);
this.query(`DROP TABLE dns_records`);
this.query(`ALTER TABLE dns_records_new RENAME TO dns_records`);
// Metrics
this.query(`
CREATE TABLE metrics_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
cpu_percent REAL NOT NULL,
memory_used INTEGER NOT NULL,
memory_limit INTEGER NOT NULL,
network_rx_bytes INTEGER NOT NULL,
network_tx_bytes INTEGER NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO metrics_new SELECT * FROM metrics`);
this.query(`DROP TABLE metrics`);
this.query(`ALTER TABLE metrics_new RENAME TO metrics`);
this.query(`CREATE INDEX IF NOT EXISTS idx_metrics_service_timestamp ON metrics(service_id, timestamp DESC)`);
// Logs
this.query(`
CREATE TABLE logs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
message TEXT NOT NULL,
level TEXT NOT NULL,
source TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`INSERT INTO logs_new SELECT * FROM logs`);
this.query(`DROP TABLE logs`);
this.query(`ALTER TABLE logs_new RENAME TO logs`);
this.query(`CREATE INDEX IF NOT EXISTS idx_logs_service_timestamp ON logs(service_id, timestamp DESC)`);
// Users
this.query(`
CREATE TABLE users_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
role TEXT NOT NULL DEFAULT 'user',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO users_new SELECT * FROM users`);
this.query(`DROP TABLE users`);
this.query(`ALTER TABLE users_new RENAME TO users`);
// Settings
this.query(`
CREATE TABLE settings_new (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`INSERT INTO settings_new SELECT * FROM settings`);
this.query(`DROP TABLE settings`);
this.query(`ALTER TABLE settings_new RENAME TO settings`);
// Migrations table itself
this.query(`
CREATE TABLE migrations_new (
version INTEGER PRIMARY KEY,
applied_at REAL NOT NULL
)
`);
this.query(`INSERT INTO migrations_new SELECT * FROM migrations`);
this.query(`DROP TABLE migrations`);
this.query(`ALTER TABLE migrations_new RENAME TO migrations`);
this.setMigrationVersion(2);
logger.success('Migration 2 completed: All timestamps converted to REAL');
}
// Migration 3: Domain management tables
const version3 = this.getMigrationVersion();
if (version3 < 3) {
logger.info('Running migration 3: Creating domain management tables...');
this.query(`
CREATE TABLE domains (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
dns_provider TEXT,
cloudflare_zone_id TEXT,
is_obsolete INTEGER NOT NULL DEFAULT 0,
default_wildcard INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`
CREATE TABLE certificates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
this.query(`
CREATE TABLE cert_requirements (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain_id INTEGER NOT NULL,
subdomain TEXT NOT NULL,
certificate_id INTEGER,
status TEXT NOT NULL DEFAULT 'pending',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE,
FOREIGN KEY (certificate_id) REFERENCES certificates(id) ON DELETE SET NULL
)
`);
interface OldSslCert {
id?: number;
domain?: string;
cert_path?: string;
key_path?: string;
full_chain_path?: string;
expiry_date?: number;
issuer?: string;
created_at?: number;
updated_at?: number;
[key: number]: unknown;
}
const existingCerts = this.query<OldSslCert>('SELECT * FROM ssl_certificates');
const now = Date.now();
const domainMap = new Map<string, number>();
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
if (!domainMap.has(domain)) {
this.query(
'INSERT INTO domains (domain, dns_provider, is_obsolete, default_wildcard, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)',
[domain, null, 0, 1, now, now]
);
const result = this.query<{ id?: number; [key: number]: unknown }>('SELECT last_insert_rowid() as id');
const domainId = result[0].id ?? (result[0] as Record<number, unknown>)[0];
domainMap.set(domain, Number(domainId));
}
}
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
const domainId = domainMap.get(domain);
this.query(
`INSERT INTO certificates (
domain_id, cert_domain, is_wildcard, cert_path, key_path, full_chain_path,
expiry_date, issuer, is_valid, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[
domainId,
domain,
0,
String(cert.cert_path ?? (cert as Record<number, unknown>)[2]),
String(cert.key_path ?? (cert as Record<number, unknown>)[3]),
String(cert.full_chain_path ?? (cert as Record<number, unknown>)[4]),
Number(cert.expiry_date ?? (cert as Record<number, unknown>)[5]),
String(cert.issuer ?? (cert as Record<number, unknown>)[6]),
1,
Number(cert.created_at ?? (cert as Record<number, unknown>)[7]),
Number(cert.updated_at ?? (cert as Record<number, unknown>)[8])
]
);
}
this.query('DROP TABLE ssl_certificates');
this.query('CREATE INDEX IF NOT EXISTS idx_domains_cloudflare_zone ON domains(cloudflare_zone_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
this.query('CREATE INDEX IF NOT EXISTS idx_cert_requirements_service ON cert_requirements(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_cert_requirements_domain ON cert_requirements(domain_id)');
this.setMigrationVersion(3);
logger.success('Migration 3 completed: Domain management tables created');
}
// Migration 4: Add Onebox Registry support columns
const version4 = this.getMigrationVersion();
if (version4 < 4) {
logger.info('Running migration 4: Adding Onebox Registry columns to services table...');
this.query(`ALTER TABLE services ADD COLUMN use_onebox_registry INTEGER DEFAULT 0`);
this.query(`ALTER TABLE services ADD COLUMN registry_repository TEXT`);
this.query(`ALTER TABLE services ADD COLUMN registry_token TEXT`);
this.query(`ALTER TABLE services ADD COLUMN registry_image_tag TEXT DEFAULT 'latest'`);
this.query(`ALTER TABLE services ADD COLUMN auto_update_on_push INTEGER DEFAULT 0`);
this.query(`ALTER TABLE services ADD COLUMN image_digest TEXT`);
this.setMigrationVersion(4);
logger.success('Migration 4 completed: Onebox Registry columns added to services table');
}
// Migration 5: Registry tokens table
const version5 = this.getMigrationVersion();
if (version5 < 5) {
logger.info('Running migration 5: Creating registry_tokens table...');
this.query(`
CREATE TABLE registry_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
token_hash TEXT NOT NULL UNIQUE,
token_type TEXT NOT NULL,
scope TEXT NOT NULL,
expires_at REAL,
created_at REAL NOT NULL,
last_used_at REAL,
created_by TEXT NOT NULL
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_registry_tokens_type ON registry_tokens(token_type)');
this.query('CREATE INDEX IF NOT EXISTS idx_registry_tokens_hash ON registry_tokens(token_hash)');
this.setMigrationVersion(5);
logger.success('Migration 5 completed: Registry tokens table created');
}
// Migration 6: Drop registry_token column from services table
const version6 = this.getMigrationVersion();
if (version6 < 6) {
logger.info('Running migration 6: Dropping registry_token column from services table...');
this.query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
use_onebox_registry INTEGER DEFAULT 0,
registry_repository TEXT,
registry_image_tag TEXT DEFAULT 'latest',
auto_update_on_push INTEGER DEFAULT 0,
image_digest TEXT
)
`);
this.query(`
INSERT INTO services_new (
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
)
SELECT
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
FROM services
`);
this.query('DROP TABLE services');
this.query('ALTER TABLE services_new RENAME TO services');
this.query('CREATE INDEX IF NOT EXISTS idx_services_name ON services(name)');
this.query('CREATE INDEX IF NOT EXISTS idx_services_status ON services(status)');
this.setMigrationVersion(6);
logger.success('Migration 6 completed: registry_token column dropped from services table');
}
// Migration 7: Platform services tables
const version7 = this.getMigrationVersion();
if (version7 < 7) {
logger.info('Running migration 7: Creating platform services tables...');
this.query(`
CREATE TABLE platform_services (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'stopped',
container_id TEXT,
config TEXT NOT NULL DEFAULT '{}',
admin_credentials_encrypted TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
this.query(`
CREATE TABLE platform_resources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_service_id INTEGER NOT NULL,
service_id INTEGER NOT NULL,
resource_type TEXT NOT NULL,
resource_name TEXT NOT NULL,
credentials_encrypted TEXT NOT NULL,
created_at REAL NOT NULL,
FOREIGN KEY (platform_service_id) REFERENCES platform_services(id) ON DELETE CASCADE,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`ALTER TABLE services ADD COLUMN platform_requirements TEXT DEFAULT '{}'`);
this.query('CREATE INDEX IF NOT EXISTS idx_platform_services_type ON platform_services(type)');
this.query('CREATE INDEX IF NOT EXISTS idx_platform_resources_service ON platform_resources(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_platform_resources_platform ON platform_resources(platform_service_id)');
this.setMigrationVersion(7);
logger.success('Migration 7 completed: Platform services tables created');
}
// Migration 8: Convert certificates table to store PEM content
const version8 = this.getMigrationVersion();
if (version8 < 8) {
logger.info('Running migration 8: Converting certificates table to store PEM content...');
this.query(`
CREATE TABLE certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_pem TEXT NOT NULL DEFAULT '',
key_pem TEXT NOT NULL DEFAULT '',
fullchain_pem TEXT NOT NULL DEFAULT '',
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
this.query(`
INSERT INTO certificates_new (id, domain_id, cert_domain, is_wildcard, cert_pem, key_pem, fullchain_pem, expiry_date, issuer, is_valid, created_at, updated_at)
SELECT id, domain_id, cert_domain, is_wildcard, '', '', '', expiry_date, issuer, 0, created_at, updated_at FROM certificates
`);
this.query('DROP TABLE certificates');
this.query('ALTER TABLE certificates_new RENAME TO certificates');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
this.setMigrationVersion(8);
logger.success('Migration 8 completed: Certificates table now stores PEM content');
}
// Migration 9: Backup system tables
const version9 = this.getMigrationVersion();
if (version9 < 9) {
logger.info('Running migration 9: Creating backup system tables...');
// Add include_image_in_backup column to services table
this.query(`ALTER TABLE services ADD COLUMN include_image_in_backup INTEGER DEFAULT 1`);
// Create backups table
this.query(`
CREATE TABLE backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
this.setMigrationVersion(9);
logger.success('Migration 9 completed: Backup system tables created');
}
// Migration 10: Backup schedules table and extend backups table
const version10 = this.getMigrationVersion();
if (version10 < 10) {
logger.info('Running migration 10: Creating backup schedules table...');
// Create backup_schedules table
this.query(`
CREATE TABLE backup_schedules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
// Extend backups table with retention_tier and schedule_id columns
this.query('ALTER TABLE backups ADD COLUMN retention_tier TEXT');
this.query('ALTER TABLE backups ADD COLUMN schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL');
this.setMigrationVersion(10);
logger.success('Migration 10 completed: Backup schedules table created');
}
// Migration 11: Add scope columns for global/pattern backup schedules
const version11 = this.getMigrationVersion();
if (version11 < 11) {
logger.info('Running migration 11: Adding scope columns to backup_schedules...');
// Recreate backup_schedules table with nullable service_id/service_name and new scope columns
this.query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Copy existing schedules (all are service-specific)
this.query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
)
SELECT
id, 'service', NULL, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
FROM backup_schedules
`);
this.query('DROP TABLE backup_schedules');
this.query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)');
this.setMigrationVersion(11);
logger.success('Migration 11 completed: Scope columns added to backup_schedules');
}
// Migration 12: GFS retention policy - replace retention_tier with per-tier retention counts
const version12 = this.getMigrationVersion();
if (version12 < 12) {
logger.info('Running migration 12: Updating backup system for GFS retention policy...');
// Recreate backup_schedules table with new retention columns
this.query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_hourly INTEGER NOT NULL DEFAULT 0,
retention_daily INTEGER NOT NULL DEFAULT 7,
retention_weekly INTEGER NOT NULL DEFAULT 4,
retention_monthly INTEGER NOT NULL DEFAULT 12,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Migrate existing data - convert old retention_tier to new format
// daily -> D:7, weekly -> W:4, monthly -> M:12, yearly -> M:12 (yearly becomes long monthly retention)
this.query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_hourly, retention_daily, retention_weekly, retention_monthly,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
)
SELECT
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
0, -- retention_hourly
CASE WHEN retention_tier = 'daily' THEN 7 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly') THEN 4 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly', 'monthly') THEN 12
WHEN retention_tier = 'yearly' THEN 24 ELSE 12 END,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
FROM backup_schedules
`);
this.query('DROP TABLE backup_schedules');
this.query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)');
this.query('CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)');
// Recreate backups table without retention_tier column
this.query(`
CREATE TABLE backups_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
this.query(`
INSERT INTO backups_new (
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
)
SELECT
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
FROM backups
`);
this.query('DROP TABLE backups');
this.query('ALTER TABLE backups_new RENAME TO backups');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
this.query('CREATE INDEX IF NOT EXISTS idx_backups_schedule ON backups(schedule_id)');
this.setMigrationVersion(12);
logger.success('Migration 12 completed: GFS retention policy schema updated');
}
} catch (error) {
logger.error(`Migration failed: ${getErrorMessage(error)}`);
if (error instanceof Error && error.stack) {
logger.error(`Stack: ${error.stack}`);
}
throw error;
}
}
/**
* Get current migration version
*/
private getMigrationVersion(): number {
if (!this.db) throw new Error('Database not initialized');
try {
const result = this.query<{ version?: number | null; [key: number]: unknown }>('SELECT MAX(version) as version FROM migrations');
if (result.length === 0) return 0;
const versionValue = result[0].version ?? (result[0] as Record<number, unknown>)[0];
return versionValue !== null && versionValue !== undefined ? Number(versionValue) : 0;
} catch (error) {
logger.warn(`Error getting migration version: ${getErrorMessage(error)}, defaulting to 0`);
return 0;
}
}
/**
* Set migration version
*/
private setMigrationVersion(version: number): void {
if (!this.db) throw new Error('Database not initialized');
this.query('INSERT INTO migrations (version, applied_at) VALUES (?, ?)', [
version,
Date.now(),
]);
logger.debug(`Migration version set to ${version}`);
}
/**
* Close database connection
*/

View File

@@ -0,0 +1,22 @@
/**
* Abstract base class for database migrations.
* All migrations must extend this class and implement the abstract members.
*/
import type { TQueryFunction } from '../types.ts';
export abstract class BaseMigration {
/** The migration version number (must be unique and sequential) */
abstract readonly version: number;
/** A short description of what this migration does */
abstract readonly description: string;
/** Execute the migration's SQL statements */
abstract up(query: TQueryFunction): void;
/** Returns a human-readable name for logging */
getName(): string {
return `Migration ${this.version}: ${this.description}`;
}
}

View File

@@ -0,0 +1,2 @@
export { BaseMigration } from './base-migration.ts';
export { MigrationRunner } from './migration-runner.ts';

View File

@@ -0,0 +1,12 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration001Initial extends BaseMigration {
readonly version = 1;
readonly description = 'Initial schema';
up(_query: TQueryFunction): void {
// Initial schema is created by createTables() in the database class.
// This migration just marks the initial version.
}
}

View File

@@ -0,0 +1,170 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration002TimestampsToReal extends BaseMigration {
readonly version = 2;
readonly description = 'Convert timestamp columns from INTEGER to REAL';
up(query: TQueryFunction): void {
// SSL certificates
query(`
CREATE TABLE ssl_certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO ssl_certificates_new SELECT * FROM ssl_certificates`);
query(`DROP TABLE ssl_certificates`);
query(`ALTER TABLE ssl_certificates_new RENAME TO ssl_certificates`);
// Services
query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT NOT NULL,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL DEFAULT 'stopped',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO services_new SELECT * FROM services`);
query(`DROP TABLE services`);
query(`ALTER TABLE services_new RENAME TO services`);
// Registries
query(`
CREATE TABLE registries_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
url TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
password_encrypted TEXT NOT NULL,
created_at REAL NOT NULL
)
`);
query(`INSERT INTO registries_new SELECT * FROM registries`);
query(`DROP TABLE registries`);
query(`ALTER TABLE registries_new RENAME TO registries`);
// Nginx configs
query(`
CREATE TABLE nginx_configs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain TEXT NOT NULL,
port INTEGER NOT NULL,
ssl_enabled INTEGER NOT NULL DEFAULT 0,
config_template TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO nginx_configs_new SELECT * FROM nginx_configs`);
query(`DROP TABLE nginx_configs`);
query(`ALTER TABLE nginx_configs_new RENAME TO nginx_configs`);
// DNS records
query(`
CREATE TABLE dns_records_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
value TEXT NOT NULL,
cloudflare_id TEXT,
zone_id TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO dns_records_new SELECT * FROM dns_records`);
query(`DROP TABLE dns_records`);
query(`ALTER TABLE dns_records_new RENAME TO dns_records`);
// Metrics
query(`
CREATE TABLE metrics_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
cpu_percent REAL NOT NULL,
memory_used INTEGER NOT NULL,
memory_limit INTEGER NOT NULL,
network_rx_bytes INTEGER NOT NULL,
network_tx_bytes INTEGER NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO metrics_new SELECT * FROM metrics`);
query(`DROP TABLE metrics`);
query(`ALTER TABLE metrics_new RENAME TO metrics`);
query(`CREATE INDEX IF NOT EXISTS idx_metrics_service_timestamp ON metrics(service_id, timestamp DESC)`);
// Logs
query(`
CREATE TABLE logs_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
timestamp REAL NOT NULL,
message TEXT NOT NULL,
level TEXT NOT NULL,
source TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`INSERT INTO logs_new SELECT * FROM logs`);
query(`DROP TABLE logs`);
query(`ALTER TABLE logs_new RENAME TO logs`);
query(`CREATE INDEX IF NOT EXISTS idx_logs_service_timestamp ON logs(service_id, timestamp DESC)`);
// Users
query(`
CREATE TABLE users_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
role TEXT NOT NULL DEFAULT 'user',
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO users_new SELECT * FROM users`);
query(`DROP TABLE users`);
query(`ALTER TABLE users_new RENAME TO users`);
// Settings
query(`
CREATE TABLE settings_new (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`INSERT INTO settings_new SELECT * FROM settings`);
query(`DROP TABLE settings`);
query(`ALTER TABLE settings_new RENAME TO settings`);
// Migrations table itself
query(`
CREATE TABLE migrations_new (
version INTEGER PRIMARY KEY,
applied_at REAL NOT NULL
)
`);
query(`INSERT INTO migrations_new SELECT * FROM migrations`);
query(`DROP TABLE migrations`);
query(`ALTER TABLE migrations_new RENAME TO migrations`);
}
}

View File

@@ -0,0 +1,125 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration003DomainManagement extends BaseMigration {
readonly version = 3;
readonly description = 'Domain management tables';
up(query: TQueryFunction): void {
query(`
CREATE TABLE domains (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL UNIQUE,
dns_provider TEXT,
cloudflare_zone_id TEXT,
is_obsolete INTEGER NOT NULL DEFAULT 0,
default_wildcard INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`
CREATE TABLE certificates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_path TEXT NOT NULL,
key_path TEXT NOT NULL,
full_chain_path TEXT NOT NULL,
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
query(`
CREATE TABLE cert_requirements (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
domain_id INTEGER NOT NULL,
subdomain TEXT NOT NULL,
certificate_id INTEGER,
status TEXT NOT NULL DEFAULT 'pending',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE,
FOREIGN KEY (certificate_id) REFERENCES certificates(id) ON DELETE SET NULL
)
`);
// Migrate data from old ssl_certificates table
interface OldSslCert {
id?: number;
domain?: string;
cert_path?: string;
key_path?: string;
full_chain_path?: string;
expiry_date?: number;
issuer?: string;
created_at?: number;
updated_at?: number;
[key: number]: unknown;
}
const existingCerts = query<OldSslCert>('SELECT * FROM ssl_certificates');
const now = Date.now();
const domainMap = new Map<string, number>();
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
if (!domainMap.has(domain)) {
query(
'INSERT INTO domains (domain, dns_provider, is_obsolete, default_wildcard, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)',
[domain, null, 0, 1, now, now],
);
const result = query<{ id?: number; [key: number]: unknown }>(
'SELECT last_insert_rowid() as id',
);
const domainId = result[0].id ?? (result[0] as Record<number, unknown>)[0];
domainMap.set(domain, Number(domainId));
}
}
for (const cert of existingCerts) {
const domain = String(cert.domain ?? (cert as Record<number, unknown>)[1]);
const domainId = domainMap.get(domain);
query(
`INSERT INTO certificates (
domain_id, cert_domain, is_wildcard, cert_path, key_path, full_chain_path,
expiry_date, issuer, is_valid, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
[
domainId,
domain,
0,
String(cert.cert_path ?? (cert as Record<number, unknown>)[2]),
String(cert.key_path ?? (cert as Record<number, unknown>)[3]),
String(cert.full_chain_path ?? (cert as Record<number, unknown>)[4]),
Number(cert.expiry_date ?? (cert as Record<number, unknown>)[5]),
String(cert.issuer ?? (cert as Record<number, unknown>)[6]),
1,
Number(cert.created_at ?? (cert as Record<number, unknown>)[7]),
Number(cert.updated_at ?? (cert as Record<number, unknown>)[8]),
],
);
}
query('DROP TABLE ssl_certificates');
query('CREATE INDEX IF NOT EXISTS idx_domains_cloudflare_zone ON domains(cloudflare_zone_id)');
query('CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)');
query('CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)');
query(
'CREATE INDEX IF NOT EXISTS idx_cert_requirements_service ON cert_requirements(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_cert_requirements_domain ON cert_requirements(domain_id)',
);
}
}

View File

@@ -0,0 +1,16 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration004RegistryColumns extends BaseMigration {
readonly version = 4;
readonly description = 'Add Onebox Registry columns to services table';
up(query: TQueryFunction): void {
query(`ALTER TABLE services ADD COLUMN use_onebox_registry INTEGER DEFAULT 0`);
query(`ALTER TABLE services ADD COLUMN registry_repository TEXT`);
query(`ALTER TABLE services ADD COLUMN registry_token TEXT`);
query(`ALTER TABLE services ADD COLUMN registry_image_tag TEXT DEFAULT 'latest'`);
query(`ALTER TABLE services ADD COLUMN auto_update_on_push INTEGER DEFAULT 0`);
query(`ALTER TABLE services ADD COLUMN image_digest TEXT`);
}
}

View File

@@ -0,0 +1,30 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration005RegistryTokens extends BaseMigration {
readonly version = 5;
readonly description = 'Registry tokens table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE registry_tokens (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
token_hash TEXT NOT NULL UNIQUE,
token_type TEXT NOT NULL,
scope TEXT NOT NULL,
expires_at REAL,
created_at REAL NOT NULL,
last_used_at REAL,
created_by TEXT NOT NULL
)
`);
query(
'CREATE INDEX IF NOT EXISTS idx_registry_tokens_type ON registry_tokens(token_type)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_registry_tokens_hash ON registry_tokens(token_hash)',
);
}
}

View File

@@ -0,0 +1,48 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration006DropRegistryToken extends BaseMigration {
readonly version = 6;
readonly description = 'Drop registry_token column from services table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE services_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
image TEXT NOT NULL,
registry TEXT,
env_vars TEXT,
port INTEGER NOT NULL,
domain TEXT,
container_id TEXT,
status TEXT NOT NULL,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
use_onebox_registry INTEGER DEFAULT 0,
registry_repository TEXT,
registry_image_tag TEXT DEFAULT 'latest',
auto_update_on_push INTEGER DEFAULT 0,
image_digest TEXT
)
`);
query(`
INSERT INTO services_new (
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
)
SELECT
id, name, image, registry, env_vars, port, domain, container_id, status,
created_at, updated_at, use_onebox_registry, registry_repository,
registry_image_tag, auto_update_on_push, image_digest
FROM services
`);
query('DROP TABLE services');
query('ALTER TABLE services_new RENAME TO services');
query('CREATE INDEX IF NOT EXISTS idx_services_name ON services(name)');
query('CREATE INDEX IF NOT EXISTS idx_services_status ON services(status)');
}
}

View File

@@ -0,0 +1,49 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration007PlatformServices extends BaseMigration {
readonly version = 7;
readonly description = 'Platform services tables';
up(query: TQueryFunction): void {
query(`
CREATE TABLE platform_services (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
type TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'stopped',
container_id TEXT,
config TEXT NOT NULL DEFAULT '{}',
admin_credentials_encrypted TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL
)
`);
query(`
CREATE TABLE platform_resources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform_service_id INTEGER NOT NULL,
service_id INTEGER NOT NULL,
resource_type TEXT NOT NULL,
resource_name TEXT NOT NULL,
credentials_encrypted TEXT NOT NULL,
created_at REAL NOT NULL,
FOREIGN KEY (platform_service_id) REFERENCES platform_services(id) ON DELETE CASCADE,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`ALTER TABLE services ADD COLUMN platform_requirements TEXT DEFAULT '{}'`);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_services_type ON platform_services(type)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_resources_service ON platform_resources(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_platform_resources_platform ON platform_resources(platform_service_id)',
);
}
}

View File

@@ -0,0 +1,41 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration008CertPemContent extends BaseMigration {
readonly version = 8;
readonly description = 'Convert certificates table to store PEM content';
up(query: TQueryFunction): void {
query(`
CREATE TABLE certificates_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain_id INTEGER NOT NULL,
cert_domain TEXT NOT NULL,
is_wildcard INTEGER NOT NULL DEFAULT 0,
cert_pem TEXT NOT NULL DEFAULT '',
key_pem TEXT NOT NULL DEFAULT '',
fullchain_pem TEXT NOT NULL DEFAULT '',
expiry_date REAL NOT NULL,
issuer TEXT NOT NULL,
is_valid INTEGER NOT NULL DEFAULT 1,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (domain_id) REFERENCES domains(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO certificates_new (id, domain_id, cert_domain, is_wildcard, cert_pem, key_pem, fullchain_pem, expiry_date, issuer, is_valid, created_at, updated_at)
SELECT id, domain_id, cert_domain, is_wildcard, '', '', '', expiry_date, issuer, 0, created_at, updated_at FROM certificates
`);
query('DROP TABLE certificates');
query('ALTER TABLE certificates_new RENAME TO certificates');
query(
'CREATE INDEX IF NOT EXISTS idx_certificates_domain ON certificates(domain_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_certificates_expiry ON certificates(expiry_date)',
);
}
}

View File

@@ -0,0 +1,29 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration009BackupSystem extends BaseMigration {
readonly version = 9;
readonly description = 'Backup system tables';
up(query: TQueryFunction): void {
query(`ALTER TABLE services ADD COLUMN include_image_in_backup INTEGER DEFAULT 1`);
query(`
CREATE TABLE backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
}
}

View File

@@ -0,0 +1,39 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration010BackupSchedules extends BaseMigration {
readonly version = 10;
readonly description = 'Backup schedules table';
up(query: TQueryFunction): void {
query(`
CREATE TABLE backup_schedules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query('ALTER TABLE backups ADD COLUMN retention_tier TEXT');
query(
'ALTER TABLE backups ADD COLUMN schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL',
);
}
}

View File

@@ -0,0 +1,54 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration011ScopeColumns extends BaseMigration {
readonly version = 11;
readonly description = 'Add scope columns to backup_schedules';
up(query: TQueryFunction): void {
query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_tier TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
)
SELECT
id, 'service', NULL, service_id, service_name, cron_expression,
retention_tier, enabled, last_run_at, next_run_at, last_status, last_error,
created_at, updated_at
FROM backup_schedules
`);
query('DROP TABLE backup_schedules');
query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)',
);
}
}

View File

@@ -0,0 +1,97 @@
import { BaseMigration } from './base-migration.ts';
import type { TQueryFunction } from '../types.ts';
export class Migration012GfsRetention extends BaseMigration {
readonly version = 12;
readonly description = 'GFS retention policy schema';
up(query: TQueryFunction): void {
// Recreate backup_schedules with GFS retention columns
query(`
CREATE TABLE backup_schedules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope_type TEXT NOT NULL DEFAULT 'service',
scope_pattern TEXT,
service_id INTEGER,
service_name TEXT,
cron_expression TEXT NOT NULL,
retention_hourly INTEGER NOT NULL DEFAULT 0,
retention_daily INTEGER NOT NULL DEFAULT 7,
retention_weekly INTEGER NOT NULL DEFAULT 4,
retention_monthly INTEGER NOT NULL DEFAULT 12,
enabled INTEGER NOT NULL DEFAULT 1,
last_run_at REAL,
next_run_at REAL,
last_status TEXT,
last_error TEXT,
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
// Migrate existing data - convert old retention_tier to new format
query(`
INSERT INTO backup_schedules_new (
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
retention_hourly, retention_daily, retention_weekly, retention_monthly,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
)
SELECT
id, scope_type, scope_pattern, service_id, service_name, cron_expression,
0,
CASE WHEN retention_tier = 'daily' THEN 7 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly') THEN 4 ELSE 0 END,
CASE WHEN retention_tier IN ('daily', 'weekly', 'monthly') THEN 12
WHEN retention_tier = 'yearly' THEN 24 ELSE 12 END,
enabled, last_run_at, next_run_at, last_status, last_error, created_at, updated_at
FROM backup_schedules
`);
query('DROP TABLE backup_schedules');
query('ALTER TABLE backup_schedules_new RENAME TO backup_schedules');
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_service ON backup_schedules(service_id)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_enabled ON backup_schedules(enabled)',
);
query(
'CREATE INDEX IF NOT EXISTS idx_backup_schedules_scope ON backup_schedules(scope_type)',
);
// Recreate backups table without retention_tier column
query(`
CREATE TABLE backups_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
service_id INTEGER NOT NULL,
service_name TEXT NOT NULL,
filename TEXT NOT NULL,
size_bytes INTEGER NOT NULL,
created_at REAL NOT NULL,
includes_image INTEGER NOT NULL,
platform_resources TEXT NOT NULL DEFAULT '[]',
checksum TEXT NOT NULL,
schedule_id INTEGER REFERENCES backup_schedules(id) ON DELETE SET NULL,
FOREIGN KEY (service_id) REFERENCES services(id) ON DELETE CASCADE
)
`);
query(`
INSERT INTO backups_new (
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
)
SELECT
id, service_id, service_name, filename, size_bytes, created_at,
includes_image, platform_resources, checksum, schedule_id
FROM backups
`);
query('DROP TABLE backups');
query('ALTER TABLE backups_new RENAME TO backups');
query('CREATE INDEX IF NOT EXISTS idx_backups_service ON backups(service_id)');
query('CREATE INDEX IF NOT EXISTS idx_backups_created ON backups(created_at DESC)');
query('CREATE INDEX IF NOT EXISTS idx_backups_schedule ON backups(schedule_id)');
}
}

View File

@@ -0,0 +1,100 @@
/**
* Migration runner - discovers, orders, and executes database migrations.
* Mirrors the pattern from @serve.zone/nupst.
*/
import type { TQueryFunction } from '../types.ts';
import { logger } from '../../logging.ts';
import { getErrorMessage } from '../../utils/error.ts';
import { Migration001Initial } from './migration-001-initial.ts';
import { Migration002TimestampsToReal } from './migration-002-timestamps-to-real.ts';
import { Migration003DomainManagement } from './migration-003-domain-management.ts';
import { Migration004RegistryColumns } from './migration-004-registry-columns.ts';
import { Migration005RegistryTokens } from './migration-005-registry-tokens.ts';
import { Migration006DropRegistryToken } from './migration-006-drop-registry-token.ts';
import { Migration007PlatformServices } from './migration-007-platform-services.ts';
import { Migration008CertPemContent } from './migration-008-cert-pem-content.ts';
import { Migration009BackupSystem } from './migration-009-backup-system.ts';
import { Migration010BackupSchedules } from './migration-010-backup-schedules.ts';
import { Migration011ScopeColumns } from './migration-011-scope-columns.ts';
import { Migration012GfsRetention } from './migration-012-gfs-retention.ts';
import type { BaseMigration } from './base-migration.ts';
export class MigrationRunner {
private query: TQueryFunction;
private migrations: BaseMigration[];
constructor(query: TQueryFunction) {
this.query = query;
// Register all migrations in order
this.migrations = [
new Migration001Initial(),
new Migration002TimestampsToReal(),
new Migration003DomainManagement(),
new Migration004RegistryColumns(),
new Migration005RegistryTokens(),
new Migration006DropRegistryToken(),
new Migration007PlatformServices(),
new Migration008CertPemContent(),
new Migration009BackupSystem(),
new Migration010BackupSchedules(),
new Migration011ScopeColumns(),
new Migration012GfsRetention(),
].sort((a, b) => a.version - b.version);
}
/** Run all pending migrations */
run(): void {
try {
const currentVersion = this.getMigrationVersion();
logger.info(`Current database migration version: ${currentVersion}`);
let applied = 0;
for (const migration of this.migrations) {
if (migration.version <= currentVersion) continue;
logger.info(`Running ${migration.getName()}...`);
migration.up(this.query);
this.setMigrationVersion(migration.version);
logger.success(`${migration.getName()} completed`);
applied++;
}
if (applied > 0) {
logger.success(`Applied ${applied} migration(s)`);
}
} catch (error) {
logger.error(`Migration failed: ${getErrorMessage(error)}`);
if (error instanceof Error && error.stack) {
logger.error(`Stack: ${error.stack}`);
}
throw error;
}
}
/** Get current migration version from the migrations table */
private getMigrationVersion(): number {
try {
const result = this.query<{ version?: number | null; [key: number]: unknown }>(
'SELECT MAX(version) as version FROM migrations',
);
if (result.length === 0) return 0;
const versionValue = result[0].version ?? (result[0] as Record<number, unknown>)[0];
return versionValue !== null && versionValue !== undefined ? Number(versionValue) : 0;
} catch {
// Table might not exist yet on fresh databases
return 0;
}
}
/** Record a migration version as applied */
private setMigrationVersion(version: number): void {
this.query('INSERT INTO migrations (version, applied_at) VALUES (?, ?)', [
version,
Date.now(),
]);
}
}

File diff suppressed because one or more lines are too long

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@serve.zone/onebox',
version: '1.10.3',
version: '1.13.8',
description: 'Self-hosted container platform with automatic SSL and DNS - a mini Heroku for single servers'
}

View File

@@ -381,7 +381,7 @@ export const fetchServiceLogsAction = servicesStatePart.createAction<{
const response = await typedRequest.fire({
identity: context.identity!,
serviceName: dataArg.name,
lines: dataArg.lines || 200,
tail: dataArg.lines || 200,
});
return { ...statePartArg.getState(), currentServiceLogs: response.logs };
} catch (err) {

View File

@@ -1,6 +1,7 @@
import * as plugins from '../plugins.js';
import * as shared from './shared/index.js';
import * as appstate from '../appstate.js';
import * as interfaces from '../../ts_interfaces/index.js';
import {
DeesElement,
customElement,
@@ -11,6 +12,91 @@ import {
type TemplateResult,
} from '@design.estate/dees-element';
// ============================================================================
// Data transformation helpers
// Maps backend data shapes to @serve.zone/catalog component interfaces
// ============================================================================
function formatBytes(bytes: number): string {
if (!bytes || bytes === 0) return '0 B';
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
const k = 1024;
const i = Math.floor(Math.log(bytes) / Math.log(k));
const value = bytes / Math.pow(k, i);
return `${value.toFixed(1)} ${units[i]}`;
}
function parseImageString(image: string): { repository: string; tag: string } {
const lastColon = image.lastIndexOf(':');
const lastSlash = image.lastIndexOf('/');
if (lastColon > lastSlash && lastColon > 0) {
return {
repository: image.substring(0, lastColon),
tag: image.substring(lastColon + 1),
};
}
return { repository: image, tag: 'latest' };
}
function mapStatus(status: string): 'running' | 'stopped' | 'starting' | 'error' {
switch (status) {
case 'running': return 'running';
case 'starting': return 'starting';
case 'failed': return 'error';
case 'stopped':
case 'stopping':
default: return 'stopped';
}
}
function toServiceDetail(service: interfaces.data.IService) {
const parsed = parseImageString(service.image);
return {
name: service.name,
status: mapStatus(service.status),
image: service.image,
port: service.port,
domain: service.domain || null,
containerId: service.containerID || '',
created: service.createdAt ? new Date(service.createdAt).toLocaleString() : '-',
updated: service.updatedAt ? new Date(service.updatedAt).toLocaleString() : '-',
registry: service.useOneboxRegistry ? 'Onebox Registry' : (service.registry || 'Docker Hub'),
repository: service.registryRepository || parsed.repository,
tag: service.registryImageTag || parsed.tag,
};
}
function toServiceStats(stats: interfaces.data.IContainerStats) {
return {
cpu: stats.cpuPercent,
memory: formatBytes(stats.memoryUsed),
memoryLimit: formatBytes(stats.memoryLimit),
networkIn: formatBytes(stats.networkRx),
networkOut: formatBytes(stats.networkTx),
};
}
function parseLogs(logs: any): Array<{ timestamp: string; message: string }> {
if (Array.isArray(logs)) {
return logs.map((entry: any) => ({
timestamp: entry.timestamp ? String(entry.timestamp) : '',
message: entry.message || String(entry),
}));
}
if (typeof logs === 'string' && logs.trim()) {
return logs.split('\n').filter((line: string) => line.trim()).map((line: string) => {
const match = line.match(/^(\d{4}-\d{2}-\d{2}T[\d:.]+Z?)\s+(.*)/);
if (match) {
return { timestamp: match[1], message: match[2] };
}
return { timestamp: '', message: line };
});
}
return [];
}
const defaultStats = { cpu: 0, memory: '0 B', memoryLimit: '0 B', networkIn: '0 B', networkOut: '0 B' };
@customElement('ob-view-services')
export class ObViewServices extends DeesElement {
@state()
@@ -86,10 +172,16 @@ export class ObViewServices extends DeesElement {
}
private renderListView(): TemplateResult {
const mappedServices = this.servicesState.services.map((s) => ({
name: s.name,
image: s.image,
domain: s.domain || null,
status: mapStatus(s.status),
}));
return html`
<ob-sectionheading>Services</ob-sectionheading>
<sz-services-list-view
.services=${this.servicesState.services}
.services=${mappedServices}
@service-click=${(e: CustomEvent) => {
this.selectedServiceName = e.detail.name || e.detail.service?.name;
appstate.servicesStatePart.dispatchAction(appstate.fetchServiceAction, {
@@ -98,6 +190,9 @@ export class ObViewServices extends DeesElement {
appstate.servicesStatePart.dispatchAction(appstate.fetchServiceLogsAction, {
name: this.selectedServiceName,
});
appstate.servicesStatePart.dispatchAction(appstate.fetchServiceStatsAction, {
name: this.selectedServiceName,
});
this.currentView = 'detail';
}}
@service-action=${(e: CustomEvent) => this.handleServiceAction(e)}
@@ -124,12 +219,19 @@ export class ObViewServices extends DeesElement {
}
private renderDetailView(): TemplateResult {
const service = this.servicesState.currentService;
const transformedService = service ? toServiceDetail(service) : null;
const transformedStats = this.servicesState.currentServiceStats
? toServiceStats(this.servicesState.currentServiceStats)
: defaultStats;
const transformedLogs = parseLogs(this.servicesState.currentServiceLogs);
return html`
<ob-sectionheading>Service Details</ob-sectionheading>
<sz-service-detail-view
.service=${this.servicesState.currentService}
.logs=${this.servicesState.currentServiceLogs}
.stats=${this.servicesState.currentServiceStats}
.service=${transformedService}
.logs=${transformedLogs}
.stats=${transformedStats}
@back=${() => {
this.currentView = 'list';
}}