Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ac22617849 | |||
| e5a91f298c | |||
| 5e93710c42 | |||
| 331b5c8d3f | |||
| bf3418d0ed | |||
| 6d5e6f60f8 | |||
| de8922148e | |||
| e84eecf82c | |||
| c7641853cf | |||
| 6e2025db3e | |||
| 693031ecdd | |||
| a2cdadc5e3 | |||
| 948032fc9e | |||
| a400945371 |
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"json.schemas": [
|
"json.schemas": [
|
||||||
{
|
{
|
||||||
"fileMatch": ["/npmextra.json"],
|
"fileMatch": ["/.smartconfig.json"],
|
||||||
"schema": {
|
"schema": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|||||||
47
changelog.md
47
changelog.md
@@ -1,5 +1,52 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-03-26 - 4.14.2 - fix(hub-core)
|
||||||
|
improve stream shutdown handling and connection cleanup in hub and edge
|
||||||
|
|
||||||
|
- Cancel edge upload loops immediately when the hub closes a stream instead of waiting for the window stall timeout.
|
||||||
|
- Reduce stalled stream timeouts from 120s to 55s to detect broken connections faster.
|
||||||
|
- Allow hub writer tasks to shut down gracefully before aborting to avoid unnecessary TCP resets.
|
||||||
|
- Enable TCP keepalive on hub upstream connections to detect silent SmartProxy failures.
|
||||||
|
- Remove leaked QUIC UDP session entries when setup fails or sessions end.
|
||||||
|
- Rename npmextra.json to .smartconfig.json and update package packaging references.
|
||||||
|
|
||||||
|
## 2026-03-21 - 4.14.1 - fix(remoteingress edge/hub crash recovery)
|
||||||
|
prevent duplicate crash recovery listeners and reset saved runtime state on shutdown
|
||||||
|
|
||||||
|
- Removes existing exit listeners before re-registering crash recovery handlers for edge and hub processes.
|
||||||
|
- Clears saved edge and hub configuration on stop to avoid stale restart state.
|
||||||
|
- Resets orphaned edge status intervals and restarts periodic status logging after successful crash recovery.
|
||||||
|
|
||||||
|
## 2026-03-20 - 4.14.0 - feat(quic)
|
||||||
|
add QUIC stability test coverage and bridge logging for hub and edge
|
||||||
|
|
||||||
|
- adds a long-running QUIC stability test with periodic echo probes and disconnect detection
|
||||||
|
- enables prefixed bridge logging for RemoteIngressHub and RemoteIngressEdge to improve runtime diagnostics
|
||||||
|
|
||||||
|
## 2026-03-20 - 4.13.2 - fix(remoteingress-core)
|
||||||
|
preserve reconnected edge entries during disconnect cleanup
|
||||||
|
|
||||||
|
- Guard edge removal so disconnect handlers only delete entries whose cancel token is already cancelled
|
||||||
|
- Prevents stale TCP and QUIC disconnect paths from removing a newer connection after an edge reconnects
|
||||||
|
|
||||||
|
## 2026-03-19 - 4.13.1 - fix(remoteingress-core)
|
||||||
|
default edge transport mode to QUIC with fallback
|
||||||
|
|
||||||
|
- Changes the default transport mode in edge connections from TCP/TLS to QUIC with fallback when no transport mode is explicitly configured.
|
||||||
|
|
||||||
|
## 2026-03-19 - 4.13.0 - feat(docs)
|
||||||
|
document TCP and UDP tunneling over TLS and QUIC
|
||||||
|
|
||||||
|
- update package description to reflect TCP and UDP support and TLS or QUIC transports
|
||||||
|
- refresh README architecture, features, and usage examples for UDP forwarding, QUIC transport, and PROXY protocol v1/v2 support
|
||||||
|
|
||||||
|
## 2026-03-19 - 4.12.1 - fix(remoteingress-core)
|
||||||
|
send PROXY v2 headers for UDP upstream sessions and expire idle UDP sessions
|
||||||
|
|
||||||
|
- Adds periodic idle UDP session expiry in edge tunnel and QUIC loops, including UDP close signaling for expired tunnel sessions.
|
||||||
|
- Sends the PROXY v2 header as the first datagram for UDP upstream connections in both standard and QUIC hub paths.
|
||||||
|
- Updates the UDP node test server to ignore the initial PROXY v2 datagram per source before echoing payload traffic.
|
||||||
|
|
||||||
## 2026-03-19 - 4.12.0 - feat(remoteingress-core)
|
## 2026-03-19 - 4.12.0 - feat(remoteingress-core)
|
||||||
add UDP tunneling over QUIC datagrams and expand transport-specific test coverage
|
add UDP tunneling over QUIC datagrams and expand transport-specific test coverage
|
||||||
|
|
||||||
|
|||||||
21
license.md
Normal file
21
license.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2024 Task Venture Capital GmbH
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
20
package.json
20
package.json
@@ -1,8 +1,8 @@
|
|||||||
{
|
{
|
||||||
"name": "@serve.zone/remoteingress",
|
"name": "@serve.zone/remoteingress",
|
||||||
"version": "4.12.0",
|
"version": "4.14.2",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
"description": "Edge ingress tunnel for DcRouter - tunnels TCP and UDP traffic from the network edge to SmartProxy over TLS or QUIC, preserving client IP via PROXY protocol.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
@@ -14,17 +14,17 @@
|
|||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^4.1.2",
|
"@git.zone/tsbuild": "^4.4.0",
|
||||||
"@git.zone/tsbundle": "^2.8.3",
|
"@git.zone/tsbundle": "^2.10.0",
|
||||||
"@git.zone/tsrun": "^2.0.1",
|
"@git.zone/tsrun": "^2.0.2",
|
||||||
"@git.zone/tsrust": "^1.3.0",
|
"@git.zone/tsrust": "^1.3.2",
|
||||||
"@git.zone/tstest": "^3.1.8",
|
"@git.zone/tstest": "^3.6.0",
|
||||||
"@push.rocks/tapbundle": "^6.0.3",
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
"@types/node": "^25.3.0"
|
"@types/node": "^25.5.0"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@push.rocks/qenv": "^6.1.3",
|
"@push.rocks/qenv": "^6.1.3",
|
||||||
"@push.rocks/smartrust": "^1.2.1"
|
"@push.rocks/smartrust": "^1.3.2"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
@@ -47,7 +47,7 @@
|
|||||||
"dist_rust/**/*",
|
"dist_rust/**/*",
|
||||||
"assets/**/*",
|
"assets/**/*",
|
||||||
"cli.js",
|
"cli.js",
|
||||||
"npmextra.json",
|
".smartconfig.json",
|
||||||
"readme.md"
|
"readme.md"
|
||||||
],
|
],
|
||||||
"keywords": [
|
"keywords": [
|
||||||
|
|||||||
2725
pnpm-lock.yaml
generated
2725
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
311
readme.md
311
readme.md
@@ -1,6 +1,6 @@
|
|||||||
# @serve.zone/remoteingress
|
# @serve.zone/remoteingress
|
||||||
|
|
||||||
Edge ingress tunnel for DcRouter — accepts incoming TCP connections at the network edge and tunnels them over a single encrypted TLS connection to a DcRouter SmartProxy instance, preserving the original client IP via PROXY protocol v1.
|
Edge ingress tunnel for DcRouter — tunnels **TCP and UDP** traffic from the network edge to a private DcRouter/SmartProxy cluster over encrypted TLS or QUIC connections, preserving the original client IP via PROXY protocol.
|
||||||
|
|
||||||
## Issue Reporting and Security
|
## Issue Reporting and Security
|
||||||
|
|
||||||
@@ -17,43 +17,46 @@ pnpm install @serve.zone/remoteingress
|
|||||||
`@serve.zone/remoteingress` uses a **Hub/Edge** topology with a high-performance Rust core and a TypeScript API surface:
|
`@serve.zone/remoteingress` uses a **Hub/Edge** topology with a high-performance Rust core and a TypeScript API surface:
|
||||||
|
|
||||||
```
|
```
|
||||||
┌─────────────────────┐ TLS Tunnel ┌─────────────────────┐
|
┌─────────────────────┐ TLS or QUIC Tunnel ┌─────────────────────┐
|
||||||
│ Network Edge │ ◄══════════════════════════► │ Private Cluster │
|
│ Network Edge │ ◄══════════════════════════► │ Private Cluster │
|
||||||
│ │ (multiplexed frames + │ │
|
│ │ TCP+TLS: frame mux │ │
|
||||||
│ RemoteIngressEdge │ shared-secret auth) │ RemoteIngressHub │
|
│ RemoteIngressEdge │ QUIC: native streams │ RemoteIngressHub │
|
||||||
│ Accepts client TCP │ │ Forwards to │
|
│ │ UDP: QUIC datagrams │ │
|
||||||
│ connections on │ │ SmartProxy on │
|
│ Accepts TCP & UDP │ │ Forwards to │
|
||||||
│ hub-assigned ports │ │ local ports │
|
│ on hub-assigned │ │ SmartProxy on │
|
||||||
|
│ ports │ │ local ports │
|
||||||
└─────────────────────┘ └─────────────────────┘
|
└─────────────────────┘ └─────────────────────┘
|
||||||
▲ │
|
▲ │
|
||||||
│ TCP from end users ▼
|
│ TCP + UDP from end users ▼
|
||||||
Internet DcRouter / SmartProxy
|
Internet DcRouter / SmartProxy
|
||||||
```
|
```
|
||||||
|
|
||||||
| Component | Role |
|
| Component | Role |
|
||||||
|-----------|------|
|
|-----------|------|
|
||||||
| **RemoteIngressEdge** | Deployed at the network edge (e.g. a VPS or cloud instance). Listens on ports assigned by the hub, accepts raw TCP connections, and multiplexes them over a single TLS tunnel to the hub. Ports are hot-reloadable — the hub can change them at runtime. |
|
| **RemoteIngressEdge** | Deployed at the network edge (VPS, cloud instance). Listens on TCP and UDP ports assigned by the hub, accepts connections/datagrams, and tunnels them to the hub. Ports are hot-reloadable at runtime. |
|
||||||
| **RemoteIngressHub** | Deployed alongside DcRouter/SmartProxy in a private cluster. Accepts edge connections, demuxes streams, and forwards each to SmartProxy with a PROXY protocol v1 header so the real client IP is preserved. Controls which ports each edge listens on. |
|
| **RemoteIngressHub** | Deployed alongside DcRouter/SmartProxy in a private cluster. Accepts edge connections, demuxes streams/datagrams, and forwards each to SmartProxy with PROXY protocol headers so the real client IP is preserved. |
|
||||||
| **Rust Binary** (`remoteingress-bin`) | The performance-critical networking core. Managed via `@push.rocks/smartrust` RustBridge IPC — you never interact with it directly. Cross-compiled for `linux/amd64` and `linux/arm64`. |
|
| **Rust Binary** (`remoteingress-bin`) | The performance-critical networking core. Managed via `@push.rocks/smartrust` RustBridge IPC — you never interact with it directly. Cross-compiled for `linux/amd64` and `linux/arm64`. |
|
||||||
|
|
||||||
### ✨ Key Features
|
### ✨ Key Features
|
||||||
|
|
||||||
- 🔒 **TLS-encrypted tunnel** between edge and hub (auto-generated self-signed cert or bring your own)
|
- 🔒 **Dual transport** — choose between TCP+TLS (frame-multiplexed) or QUIC (native stream multiplexing, zero head-of-line blocking)
|
||||||
- 🔀 **Multiplexed streams** — thousands of client connections flow over a single tunnel
|
- 🌐 **TCP + UDP tunneling** — tunnel any TCP connection or UDP datagram through the same edge/hub pair
|
||||||
- 🌐 **PROXY protocol v1** — SmartProxy sees the real client IP, not the tunnel IP
|
- 📋 **PROXY protocol v1 & v2** — SmartProxy sees the real client IP for both TCP (v1 text) and UDP (v2 binary)
|
||||||
|
- 🔀 **Multiplexed streams** — thousands of concurrent TCP connections over a single tunnel
|
||||||
|
- ⚡ **QUIC datagrams** — UDP traffic forwarded via QUIC unreliable datagrams for lowest possible latency
|
||||||
- 🔑 **Shared-secret authentication** — edges must present valid credentials to connect
|
- 🔑 **Shared-secret authentication** — edges must present valid credentials to connect
|
||||||
- 🎫 **Connection tokens** — encode all connection details into a single opaque string
|
- 🎫 **Connection tokens** — encode all connection details into a single opaque base64url string
|
||||||
- 📡 **STUN-based public IP discovery** — the edge automatically discovers its public IP via Cloudflare STUN
|
- 📡 **STUN-based public IP discovery** — edges automatically discover their public IP via Cloudflare STUN
|
||||||
- 🔄 **Auto-reconnect** with exponential backoff if the tunnel drops
|
- 🔄 **Auto-reconnect** with exponential backoff if the tunnel drops
|
||||||
- 🎛️ **Dynamic port configuration** — the hub assigns listen ports per edge and can hot-reload them at runtime via `FRAME_CONFIG` frames
|
- 🎛️ **Dynamic port configuration** — the hub assigns TCP and UDP listen ports per edge, hot-reloadable at runtime
|
||||||
- 📣 **Event-driven** — both Hub and Edge extend `EventEmitter` for real-time monitoring
|
- 📣 **Event-driven** — both Hub and Edge extend `EventEmitter` for real-time monitoring
|
||||||
- ⚡ **Rust core** — all frame encoding, TLS, and TCP proxying happen in native code for maximum throughput
|
|
||||||
- 🎚️ **3-tier QoS** — control frames, normal data, and sustained (elephant flow) traffic each get their own priority queue
|
- 🎚️ **3-tier QoS** — control frames, normal data, and sustained (elephant flow) traffic each get their own priority queue
|
||||||
- 📊 **Adaptive flow control** — per-stream windows scale with active stream count to prevent memory overuse
|
- 📊 **Adaptive flow control** — per-stream windows scale with active stream count to prevent memory overuse
|
||||||
|
- 🕒 **UDP session management** — automatic session tracking with 60s idle timeout and cleanup
|
||||||
|
|
||||||
## 🚀 Usage
|
## 🚀 Usage
|
||||||
|
|
||||||
Both classes are imported from the package and communicate with the Rust binary under the hood. All you need to do is configure and start them.
|
Both classes are imported from the package and communicate with the Rust binary under the hood.
|
||||||
|
|
||||||
### Setting Up the Hub (Private Cluster Side)
|
### Setting Up the Hub (Private Cluster Side)
|
||||||
|
|
||||||
@@ -63,32 +66,25 @@ import { RemoteIngressHub } from '@serve.zone/remoteingress';
|
|||||||
const hub = new RemoteIngressHub();
|
const hub = new RemoteIngressHub();
|
||||||
|
|
||||||
// Listen for events
|
// Listen for events
|
||||||
hub.on('edgeConnected', ({ edgeId }) => {
|
hub.on('edgeConnected', ({ edgeId }) => console.log(`Edge ${edgeId} connected`));
|
||||||
console.log(`Edge ${edgeId} connected`);
|
hub.on('edgeDisconnected', ({ edgeId }) => console.log(`Edge ${edgeId} disconnected`));
|
||||||
});
|
hub.on('streamOpened', ({ edgeId, streamId }) => console.log(`Stream ${streamId} from ${edgeId}`));
|
||||||
hub.on('edgeDisconnected', ({ edgeId }) => {
|
hub.on('streamClosed', ({ edgeId, streamId }) => console.log(`Stream ${streamId} closed`));
|
||||||
console.log(`Edge ${edgeId} disconnected`);
|
|
||||||
});
|
|
||||||
hub.on('streamOpened', ({ edgeId, streamId }) => {
|
|
||||||
console.log(`Stream ${streamId} opened from edge ${edgeId}`);
|
|
||||||
});
|
|
||||||
hub.on('streamClosed', ({ edgeId, streamId }) => {
|
|
||||||
console.log(`Stream ${streamId} closed from edge ${edgeId}`);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Start the hub — it will listen for incoming edge TLS connections
|
// Start the hub — listens for edge connections on both TCP and QUIC (same port)
|
||||||
await hub.start({
|
await hub.start({
|
||||||
tunnelPort: 8443, // port edges connect to (default: 8443)
|
tunnelPort: 8443, // port edges connect to (default: 8443)
|
||||||
targetHost: '127.0.0.1', // SmartProxy host to forward streams to (default: 127.0.0.1)
|
targetHost: '127.0.0.1', // SmartProxy host to forward traffic to
|
||||||
});
|
});
|
||||||
|
|
||||||
// Register which edges are allowed to connect, including their listen ports
|
// Register allowed edges with TCP and UDP listen ports
|
||||||
await hub.updateAllowedEdges([
|
await hub.updateAllowedEdges([
|
||||||
{
|
{
|
||||||
id: 'edge-nyc-01',
|
id: 'edge-nyc-01',
|
||||||
secret: 'supersecrettoken1',
|
secret: 'supersecrettoken1',
|
||||||
listenPorts: [80, 443], // ports the edge should listen on
|
listenPorts: [80, 443], // TCP ports the edge should listen on
|
||||||
stunIntervalSecs: 300, // STUN discovery interval (default: 300)
|
listenPortsUdp: [53, 51820], // UDP ports (e.g., DNS, WireGuard)
|
||||||
|
stunIntervalSecs: 300,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'edge-fra-02',
|
id: 'edge-fra-02',
|
||||||
@@ -97,38 +93,29 @@ await hub.updateAllowedEdges([
|
|||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Dynamically update ports for a connected edge — changes are pushed instantly
|
// Dynamically update ports — changes are pushed instantly to connected edges
|
||||||
await hub.updateAllowedEdges([
|
await hub.updateAllowedEdges([
|
||||||
{
|
{
|
||||||
id: 'edge-nyc-01',
|
id: 'edge-nyc-01',
|
||||||
secret: 'supersecrettoken1',
|
secret: 'supersecrettoken1',
|
||||||
listenPorts: [80, 443, 8443], // added port 8443 — edge picks it up in real time
|
listenPorts: [80, 443, 8443], // added TCP port 8443
|
||||||
|
listenPortsUdp: [53], // removed WireGuard UDP port
|
||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Check status at any time
|
// Check status
|
||||||
const status = await hub.getStatus();
|
const status = await hub.getStatus();
|
||||||
console.log(status);
|
// { running: true, tunnelPort: 8443, connectedEdges: [...] }
|
||||||
// {
|
|
||||||
// running: true,
|
|
||||||
// tunnelPort: 8443,
|
|
||||||
// connectedEdges: [
|
|
||||||
// { edgeId: 'edge-nyc-01', connectedAt: 1700000000, activeStreams: 12 }
|
|
||||||
// ]
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Graceful shutdown
|
|
||||||
await hub.stop();
|
await hub.stop();
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setting Up the Edge (Network Edge Side)
|
### Setting Up the Edge (Network Edge Side)
|
||||||
|
|
||||||
The edge can be configured in two ways: with an **opaque connection token** (recommended) or with explicit config fields.
|
The edge can connect via **TCP+TLS** (default) or **QUIC** transport.
|
||||||
|
|
||||||
#### Option A: Connection Token (Recommended)
|
#### Option A: Connection Token (Recommended)
|
||||||
|
|
||||||
A single token encodes all connection details — ideal for provisioning edges at scale:
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { RemoteIngressEdge } from '@serve.zone/remoteingress';
|
import { RemoteIngressEdge } from '@serve.zone/remoteingress';
|
||||||
|
|
||||||
@@ -137,79 +124,64 @@ const edge = new RemoteIngressEdge();
|
|||||||
edge.on('tunnelConnected', () => console.log('Tunnel established'));
|
edge.on('tunnelConnected', () => console.log('Tunnel established'));
|
||||||
edge.on('tunnelDisconnected', () => console.log('Tunnel lost — will auto-reconnect'));
|
edge.on('tunnelDisconnected', () => console.log('Tunnel lost — will auto-reconnect'));
|
||||||
edge.on('publicIpDiscovered', ({ ip }) => console.log(`Public IP: ${ip}`));
|
edge.on('publicIpDiscovered', ({ ip }) => console.log(`Public IP: ${ip}`));
|
||||||
edge.on('portsAssigned', ({ listenPorts }) => console.log(`Listening on ports: ${listenPorts}`));
|
edge.on('portsAssigned', ({ listenPorts }) => console.log(`TCP ports: ${listenPorts}`));
|
||||||
edge.on('portsUpdated', ({ listenPorts }) => console.log(`Ports updated: ${listenPorts}`));
|
|
||||||
|
|
||||||
// Single token contains hubHost, hubPort, edgeId, and secret
|
|
||||||
await edge.start({
|
await edge.start({
|
||||||
token: 'eyJoIjoiaHViLmV4YW1wbGUuY29tIiwicCI6ODQ0MywiZSI6ImVkZ2UtbnljLTAxIiwicyI6InN1cGVyc2VjcmV0dG9rZW4xIn0',
|
token: 'eyJoIjoiaHViLmV4YW1wbGUuY29tIiwi...',
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Option B: Explicit Config
|
#### Option B: Explicit Config with QUIC Transport
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { RemoteIngressEdge } from '@serve.zone/remoteingress';
|
import { RemoteIngressEdge } from '@serve.zone/remoteingress';
|
||||||
|
|
||||||
const edge = new RemoteIngressEdge();
|
const edge = new RemoteIngressEdge();
|
||||||
|
|
||||||
edge.on('tunnelConnected', () => console.log('Tunnel established'));
|
|
||||||
edge.on('tunnelDisconnected', () => console.log('Tunnel lost — will auto-reconnect'));
|
|
||||||
edge.on('publicIpDiscovered', ({ ip }) => console.log(`Public IP: ${ip}`));
|
|
||||||
edge.on('portsAssigned', ({ listenPorts }) => console.log(`Listening on ports: ${listenPorts}`));
|
|
||||||
edge.on('portsUpdated', ({ listenPorts }) => console.log(`Ports updated: ${listenPorts}`));
|
|
||||||
|
|
||||||
await edge.start({
|
await edge.start({
|
||||||
hubHost: 'hub.example.com', // hostname or IP of the hub
|
hubHost: 'hub.example.com',
|
||||||
hubPort: 8443, // must match hub's tunnelPort (default: 8443)
|
hubPort: 8443,
|
||||||
edgeId: 'edge-nyc-01', // unique edge identifier
|
edgeId: 'edge-nyc-01',
|
||||||
secret: 'supersecrettoken1', // must match the hub's allowed edge secret
|
secret: 'supersecrettoken1',
|
||||||
|
transportMode: 'quic', // 'tcpTls' (default) | 'quic' | 'quicWithFallback'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Check status at any time
|
|
||||||
const edgeStatus = await edge.getStatus();
|
const edgeStatus = await edge.getStatus();
|
||||||
console.log(edgeStatus);
|
// { running: true, connected: true, publicIp: '203.0.113.42', activeStreams: 5, listenPorts: [80, 443] }
|
||||||
// {
|
|
||||||
// running: true,
|
|
||||||
// connected: true,
|
|
||||||
// publicIp: '203.0.113.42',
|
|
||||||
// activeStreams: 5,
|
|
||||||
// listenPorts: [80, 443]
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Graceful shutdown
|
|
||||||
await edge.stop();
|
await edge.stop();
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Transport Modes
|
||||||
|
|
||||||
|
| Mode | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `'tcpTls'` | **Default.** Single TLS connection with frame-based multiplexing. Universal compatibility. |
|
||||||
|
| `'quic'` | QUIC with native stream multiplexing. Eliminates head-of-line blocking. Uses QUIC datagrams for UDP traffic. |
|
||||||
|
| `'quicWithFallback'` | Tries QUIC first (5s timeout), falls back to TCP+TLS if UDP is blocked by the network. |
|
||||||
|
|
||||||
### 🎫 Connection Tokens
|
### 🎫 Connection Tokens
|
||||||
|
|
||||||
Connection tokens let you distribute a single opaque string instead of four separate config values. The hub operator generates the token; the edge operator just pastes it in.
|
Encode all connection details into a single opaque string for easy distribution:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { encodeConnectionToken, decodeConnectionToken } from '@serve.zone/remoteingress';
|
import { encodeConnectionToken, decodeConnectionToken } from '@serve.zone/remoteingress';
|
||||||
|
|
||||||
// Hub side: generate a token for a new edge
|
// Hub operator generates a token
|
||||||
const token = encodeConnectionToken({
|
const token = encodeConnectionToken({
|
||||||
hubHost: 'hub.example.com',
|
hubHost: 'hub.example.com',
|
||||||
hubPort: 8443,
|
hubPort: 8443,
|
||||||
edgeId: 'edge-nyc-01',
|
edgeId: 'edge-nyc-01',
|
||||||
secret: 'supersecrettoken1',
|
secret: 'supersecrettoken1',
|
||||||
});
|
});
|
||||||
console.log(token);
|
|
||||||
// => 'eyJoIjoiaHViLmV4YW1wbGUuY29tIiwi...'
|
// => 'eyJoIjoiaHViLmV4YW1wbGUuY29tIiwi...'
|
||||||
|
|
||||||
// Edge side: inspect a token (optional — start() does this automatically)
|
// Edge operator decodes (optional — start() does this automatically)
|
||||||
const data = decodeConnectionToken(token);
|
const data = decodeConnectionToken(token);
|
||||||
console.log(data);
|
// { hubHost: 'hub.example.com', hubPort: 8443, edgeId: 'edge-nyc-01', secret: '...' }
|
||||||
// {
|
|
||||||
// hubHost: 'hub.example.com',
|
|
||||||
// hubPort: 8443,
|
|
||||||
// edgeId: 'edge-nyc-01',
|
|
||||||
// secret: 'supersecrettoken1'
|
|
||||||
// }
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Tokens are base64url-encoded (URL-safe, no padding) — safe to pass as environment variables, CLI arguments, or store in config files.
|
Tokens are base64url-encoded — safe for environment variables, CLI arguments, and config files.
|
||||||
|
|
||||||
## 📖 API Reference
|
## 📖 API Reference
|
||||||
|
|
||||||
@@ -217,10 +189,10 @@ Tokens are base64url-encoded (URL-safe, no padding) — safe to pass as environm
|
|||||||
|
|
||||||
| Method / Property | Description |
|
| Method / Property | Description |
|
||||||
|-------------------|-------------|
|
|-------------------|-------------|
|
||||||
| `start(config?)` | Spawns the Rust binary and starts the tunnel listener. Config: `{ tunnelPort?: number, targetHost?: string }` |
|
| `start(config?)` | Start the hub. Config: `{ tunnelPort?, targetHost?, tls?: { certPem?, keyPem? } }`. Listens on both TCP and UDP (QUIC) on the tunnel port. |
|
||||||
| `stop()` | Gracefully shuts down the hub and kills the Rust process. |
|
| `stop()` | Graceful shutdown. |
|
||||||
| `updateAllowedEdges(edges)` | Dynamically update which edges are authorized and what ports they listen on. Each edge: `{ id: string, secret: string, listenPorts?: number[], stunIntervalSecs?: number }`. If ports change for a connected edge, the update is pushed immediately via a `FRAME_CONFIG` frame. |
|
| `updateAllowedEdges(edges)` | Set authorized edges. Each: `{ id, secret, listenPorts?, listenPortsUdp?, stunIntervalSecs? }`. Port changes are pushed to connected edges in real time. |
|
||||||
| `getStatus()` | Returns current hub status including connected edges and active stream counts. |
|
| `getStatus()` | Returns `{ running, tunnelPort, connectedEdges: [...] }`. |
|
||||||
| `running` | `boolean` — whether the Rust binary is alive. |
|
| `running` | `boolean` — whether the Rust binary is alive. |
|
||||||
|
|
||||||
**Events:** `edgeConnected`, `edgeDisconnected`, `streamOpened`, `streamClosed`
|
**Events:** `edgeConnected`, `edgeDisconnected`, `streamOpened`, `streamClosed`
|
||||||
@@ -229,9 +201,9 @@ Tokens are base64url-encoded (URL-safe, no padding) — safe to pass as environm
|
|||||||
|
|
||||||
| Method / Property | Description |
|
| Method / Property | Description |
|
||||||
|-------------------|-------------|
|
|-------------------|-------------|
|
||||||
| `start(config)` | Spawns the Rust binary and connects to the hub. Accepts `{ token: string }` or `IEdgeConfig`. Listen ports are received from the hub during handshake. |
|
| `start(config)` | Connect to hub. Accepts `{ token }` or `{ hubHost, hubPort, edgeId, secret, transportMode? }`. |
|
||||||
| `stop()` | Gracefully shuts down the edge and kills the Rust process. |
|
| `stop()` | Graceful shutdown. |
|
||||||
| `getStatus()` | Returns current edge status including connection state, public IP, listen ports, and active streams. |
|
| `getStatus()` | Returns `{ running, connected, publicIp, activeStreams, listenPorts }`. |
|
||||||
| `running` | `boolean` — whether the Rust binary is alive. |
|
| `running` | `boolean` — whether the Rust binary is alive. |
|
||||||
|
|
||||||
**Events:** `tunnelConnected`, `tunnelDisconnected`, `publicIpDiscovered`, `portsAssigned`, `portsUpdated`
|
**Events:** `tunnelConnected`, `tunnelDisconnected`, `publicIpDiscovered`, `portsAssigned`, `portsUpdated`
|
||||||
@@ -240,8 +212,8 @@ Tokens are base64url-encoded (URL-safe, no padding) — safe to pass as environm
|
|||||||
|
|
||||||
| Function | Description |
|
| Function | Description |
|
||||||
|----------|-------------|
|
|----------|-------------|
|
||||||
| `encodeConnectionToken(data)` | Encodes `IConnectionTokenData` into a base64url token string. |
|
| `encodeConnectionToken(data)` | Encodes connection info into a base64url token. |
|
||||||
| `decodeConnectionToken(token)` | Decodes a token back into `IConnectionTokenData`. Throws on malformed or incomplete tokens. |
|
| `decodeConnectionToken(token)` | Decodes a token. Throws on malformed input. |
|
||||||
|
|
||||||
### Interfaces
|
### Interfaces
|
||||||
|
|
||||||
@@ -249,6 +221,10 @@ Tokens are base64url-encoded (URL-safe, no padding) — safe to pass as environm
|
|||||||
interface IHubConfig {
|
interface IHubConfig {
|
||||||
tunnelPort?: number; // default: 8443
|
tunnelPort?: number; // default: 8443
|
||||||
targetHost?: string; // default: '127.0.0.1'
|
targetHost?: string; // default: '127.0.0.1'
|
||||||
|
tls?: {
|
||||||
|
certPem?: string; // PEM-encoded TLS certificate
|
||||||
|
keyPem?: string; // PEM-encoded TLS private key
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
interface IEdgeConfig {
|
interface IEdgeConfig {
|
||||||
@@ -256,6 +232,8 @@ interface IEdgeConfig {
|
|||||||
hubPort?: number; // default: 8443
|
hubPort?: number; // default: 8443
|
||||||
edgeId: string;
|
edgeId: string;
|
||||||
secret: string;
|
secret: string;
|
||||||
|
bindAddress?: string;
|
||||||
|
transportMode?: 'tcpTls' | 'quic' | 'quicWithFallback';
|
||||||
}
|
}
|
||||||
|
|
||||||
interface IConnectionTokenData {
|
interface IConnectionTokenData {
|
||||||
@@ -268,7 +246,9 @@ interface IConnectionTokenData {
|
|||||||
|
|
||||||
## 🔌 Wire Protocol
|
## 🔌 Wire Protocol
|
||||||
|
|
||||||
The tunnel uses a custom binary frame protocol over TLS:
|
### TCP+TLS Transport (Frame Protocol)
|
||||||
|
|
||||||
|
The tunnel uses a custom binary frame protocol over a single TLS connection:
|
||||||
|
|
||||||
```
|
```
|
||||||
[stream_id: 4 bytes BE][type: 1 byte][length: 4 bytes BE][payload: N bytes]
|
[stream_id: 4 bytes BE][type: 1 byte][length: 4 bytes BE][payload: N bytes]
|
||||||
@@ -276,116 +256,127 @@ The tunnel uses a custom binary frame protocol over TLS:
|
|||||||
|
|
||||||
| Frame Type | Value | Direction | Purpose |
|
| Frame Type | Value | Direction | Purpose |
|
||||||
|------------|-------|-----------|---------|
|
|------------|-------|-----------|---------|
|
||||||
| `OPEN` | `0x01` | Edge → Hub | Open a new stream; payload is PROXY v1 header |
|
| `OPEN` | `0x01` | Edge → Hub | Open TCP stream; payload is PROXY v1 header |
|
||||||
| `DATA` | `0x02` | Edge → Hub | Client data flowing upstream |
|
| `DATA` | `0x02` | Edge → Hub | Client data (upload) |
|
||||||
| `CLOSE` | `0x03` | Edge → Hub | Client closed the connection |
|
| `CLOSE` | `0x03` | Edge → Hub | Client closed connection |
|
||||||
| `DATA_BACK` | `0x04` | Hub → Edge | Response data flowing downstream |
|
| `DATA_BACK` | `0x04` | Hub → Edge | Response data (download) |
|
||||||
| `CLOSE_BACK` | `0x05` | Hub → Edge | Upstream (SmartProxy) closed the connection |
|
| `CLOSE_BACK` | `0x05` | Hub → Edge | Upstream closed connection |
|
||||||
| `CONFIG` | `0x06` | Hub → Edge | Runtime configuration update (e.g. port changes); payload is JSON |
|
| `CONFIG` | `0x06` | Hub → Edge | Runtime config update (JSON payload) |
|
||||||
| `PING` | `0x07` | Hub → Edge | Heartbeat probe (sent every 15s) |
|
| `PING` | `0x07` | Hub → Edge | Heartbeat probe (every 15s) |
|
||||||
| `PONG` | `0x08` | Edge → Hub | Heartbeat response |
|
| `PONG` | `0x08` | Edge → Hub | Heartbeat response |
|
||||||
| `WINDOW_UPDATE` | `0x09` | Edge → Hub | Per-stream flow control: edge consumed N bytes, hub can send more |
|
| `WINDOW_UPDATE` | `0x09` | Edge → Hub | Flow control: edge consumed N bytes |
|
||||||
| `WINDOW_UPDATE_BACK` | `0x0A` | Hub → Edge | Per-stream flow control: hub consumed N bytes, edge can send more |
|
| `WINDOW_UPDATE_BACK` | `0x0A` | Hub → Edge | Flow control: hub consumed N bytes |
|
||||||
|
| `UDP_OPEN` | `0x0B` | Edge → Hub | Open UDP session; payload is PROXY v2 header |
|
||||||
|
| `UDP_DATA` | `0x0C` | Edge → Hub | UDP datagram (upload) |
|
||||||
|
| `UDP_DATA_BACK` | `0x0D` | Hub → Edge | UDP datagram (download) |
|
||||||
|
| `UDP_CLOSE` | `0x0E` | Either | Close UDP session |
|
||||||
|
|
||||||
Max payload size per frame: **16 MB**. Stream IDs are 32-bit unsigned integers.
|
### QUIC Transport
|
||||||
|
|
||||||
|
When using QUIC, the frame protocol is replaced by native QUIC primitives:
|
||||||
|
|
||||||
|
- **TCP connections:** Each tunneled TCP connection gets its own QUIC bidirectional stream. No framing overhead.
|
||||||
|
- **UDP datagrams:** Forwarded via QUIC unreliable datagrams (RFC 9221). Format: `[session_id: 4 bytes][payload]`. Session open uses magic byte `0xFF`: `[session_id: 4][0xFF][PROXY v2 header]`.
|
||||||
|
- **Control channel:** First QUIC bidirectional stream carries auth handshake + config updates using `[type: 1][length: 4][payload]` format.
|
||||||
|
|
||||||
### Handshake Sequence
|
### Handshake Sequence
|
||||||
|
|
||||||
1. Edge opens a TLS connection to the hub
|
1. Edge opens a TLS or QUIC connection to the hub
|
||||||
2. Edge sends: `EDGE <edgeId> <secret>\n`
|
2. Edge sends: `EDGE <edgeId> <secret>\n`
|
||||||
3. Hub verifies credentials (constant-time comparison) and responds with JSON: `{"listenPorts":[...],"stunIntervalSecs":300}\n`
|
3. Hub verifies credentials (constant-time comparison) and responds with JSON:
|
||||||
4. Edge starts TCP listeners on the assigned ports
|
`{"listenPorts":[...],"listenPortsUdp":[...],"stunIntervalSecs":300}\n`
|
||||||
5. Frame protocol begins — `OPEN`/`DATA`/`CLOSE` frames flow in both directions
|
4. Edge starts TCP and UDP listeners on the assigned ports
|
||||||
6. Hub can push `CONFIG` frames at any time to update the edge's listen ports
|
5. Data flows — TCP frames/QUIC streams for TCP traffic, UDP frames/QUIC datagrams for UDP traffic
|
||||||
|
|
||||||
## 🎚️ QoS & Flow Control
|
## 🎚️ QoS & Flow Control
|
||||||
|
|
||||||
The tunnel multiplexer uses a **3-tier priority system** and **per-stream flow control** to ensure fair bandwidth sharing across thousands of concurrent streams.
|
### Priority Tiers (TCP+TLS Transport)
|
||||||
|
|
||||||
### Priority Tiers
|
| Tier | Frames | Behavior |
|
||||||
|
|------|--------|----------|
|
||||||
All outbound frames are queued into one of three priority levels:
|
| 🔴 **Control** | PING, PONG, WINDOW_UPDATE, OPEN, CLOSE, CONFIG | Always drained first. Never delayed. |
|
||||||
|
| 🟡 **Data** | DATA/DATA_BACK from normal streams, UDP frames | Drained when control queue is empty. |
|
||||||
| Tier | Queue | Frames | Behavior |
|
| 🟢 **Sustained** | DATA/DATA_BACK from elephant flows | Lowest priority with guaranteed **1 MB/s** drain rate. |
|
||||||
|------|-------|--------|----------|
|
|
||||||
| 🔴 **Control** (highest) | `ctrl_queue` | PING, PONG, WINDOW_UPDATE, OPEN, CLOSE, CONFIG | Always drained first. Never delayed. |
|
|
||||||
| 🟡 **Data** (normal) | `data_queue` | DATA, DATA_BACK from normal streams | Drained when ctrl is empty. Gated at 64 buffered items for backpressure. |
|
|
||||||
| 🟢 **Sustained** (lowest) | `sustained_queue` | DATA, DATA_BACK from elephant flows | Drained freely when ctrl+data are empty. Otherwise guaranteed **1 MB/s** via forced drain every second. |
|
|
||||||
|
|
||||||
This prevents large bulk transfers (e.g. git clones, file downloads) from starving interactive traffic and ensures `WINDOW_UPDATE` frames are never delayed — which would cause flow control deadlocks.
|
|
||||||
|
|
||||||
### Sustained Stream Classification
|
### Sustained Stream Classification
|
||||||
|
|
||||||
A stream is automatically classified as **sustained** (elephant flow) when:
|
A TCP stream is classified as **sustained** (elephant flow) when:
|
||||||
- It has been active for **>10 seconds**, AND
|
- Active for **>10 seconds**, AND
|
||||||
- Its average throughput exceeds **20 Mbit/s** (2.5 MB/s)
|
- Average throughput exceeds **20 Mbit/s** (2.5 MB/s)
|
||||||
|
|
||||||
Once classified, the stream's flow control window is locked to the **1 MB floor** and its data frames move to the lowest-priority queue. Classification is one-way — a stream never gets promoted back to normal.
|
Once classified, its flow control window locks to 1 MB and data frames move to the lowest-priority queue.
|
||||||
|
|
||||||
### Adaptive Per-Stream Windows
|
### Adaptive Per-Stream Windows
|
||||||
|
|
||||||
Each stream has a send window that limits bytes-in-flight. The window size adapts to the number of active streams using a shared **200 MB memory budget**:
|
Each TCP stream has a send window from a shared **200 MB budget**:
|
||||||
|
|
||||||
| Active Streams | Window per Stream |
|
| Active Streams | Window per Stream |
|
||||||
|---|---|
|
|---|---|
|
||||||
| 1–50 | 4 MB (maximum) |
|
| 1–50 | 4 MB (maximum) |
|
||||||
| 51–100 | Scales down (4 MB → 2 MB) |
|
| 51–200 | Scales down (4 MB → 1 MB) |
|
||||||
| 200+ | 1 MB (floor) |
|
| 200+ | 1 MB (floor) |
|
||||||
|
|
||||||
The consumer sends `WINDOW_UPDATE` frames after processing data, allowing the producer to send more. This prevents any single stream from consuming unbounded memory and provides natural backpressure.
|
UDP traffic uses no flow control — datagrams are fire-and-forget, matching UDP semantics.
|
||||||
|
|
||||||
## 💡 Example Scenarios
|
## 💡 Example Scenarios
|
||||||
|
|
||||||
### 1. Expose a Private Kubernetes Cluster to the Internet
|
### 1. Expose a Private Cluster to the Internet
|
||||||
|
|
||||||
Deploy an Edge on a public VPS, point your DNS to the VPS IP. The Edge tunnels all traffic to the Hub running inside the cluster, which hands it off to SmartProxy/DcRouter. Your cluster stays fully private — no public-facing ports needed.
|
Deploy an Edge on a public VPS, point DNS to its IP. The Edge tunnels all TCP and UDP traffic to the Hub running inside your private cluster. No public ports needed on the cluster.
|
||||||
|
|
||||||
### 2. Multi-Region Edge Ingress
|
### 2. Multi-Region Edge Ingress
|
||||||
|
|
||||||
Run multiple Edges in different geographic regions (NYC, Frankfurt, Tokyo) all connecting to a single Hub. Use GeoDNS to route users to their nearest Edge. The Hub sees the real client IPs via PROXY protocol regardless of which edge they connected through.
|
Run Edges in NYC, Frankfurt, and Tokyo — all connecting to a single Hub. Use GeoDNS to route users to their nearest Edge. PROXY protocol ensures the Hub sees real client IPs regardless of which Edge they entered through.
|
||||||
|
|
||||||
### 3. Secure API Exposure
|
### 3. UDP Forwarding (DNS, Gaming, VoIP)
|
||||||
|
|
||||||
Your backend runs on a private network with no direct internet access. An Edge on a minimal cloud instance acts as the only public entry point. TLS tunnel + shared-secret auth ensure only your authorized Edge can forward traffic.
|
Configure UDP listen ports alongside TCP ports. DNS queries, game server traffic, or VoIP packets are tunneled through the same edge/hub connection and forwarded to SmartProxy with a PROXY v2 binary header preserving the client's real IP.
|
||||||
|
|
||||||
### 4. Token-Based Edge Provisioning
|
```typescript
|
||||||
|
await hub.updateAllowedEdges([
|
||||||
Generate connection tokens on the hub side and distribute them to edge operators. Each edge only needs a single token string to connect — no manual configuration of host, port, ID, and secret.
|
{
|
||||||
|
id: 'edge-nyc-01',
|
||||||
|
secret: 'secret',
|
||||||
|
listenPorts: [80, 443], // TCP
|
||||||
|
listenPortsUdp: [53, 27015], // DNS + game server
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. QUIC Transport for Low-Latency
|
||||||
|
|
||||||
|
Use QUIC transport to eliminate head-of-line blocking — a lost packet on one stream doesn't stall others. QUIC also enables 0-RTT reconnection and connection migration.
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
await edge.start({
|
||||||
|
hubHost: 'hub.example.com',
|
||||||
|
hubPort: 8443,
|
||||||
|
edgeId: 'edge-01',
|
||||||
|
secret: 'secret',
|
||||||
|
transportMode: 'quicWithFallback', // try QUIC, fall back to TLS if UDP blocked
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Token-Based Edge Provisioning
|
||||||
|
|
||||||
|
Generate connection tokens on the hub side and distribute them to edge operators:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Hub operator generates token
|
|
||||||
const token = encodeConnectionToken({
|
const token = encodeConnectionToken({
|
||||||
hubHost: 'hub.prod.example.com',
|
hubHost: 'hub.prod.example.com',
|
||||||
hubPort: 8443,
|
hubPort: 8443,
|
||||||
edgeId: 'edge-tokyo-01',
|
edgeId: 'edge-tokyo-01',
|
||||||
secret: 'generated-secret-abc123',
|
secret: 'generated-secret-abc123',
|
||||||
});
|
});
|
||||||
// Send `token` to the edge operator via secure channel
|
// Send `token` to the edge operator — a single string is all they need
|
||||||
|
|
||||||
// Edge operator starts with just the token
|
|
||||||
const edge = new RemoteIngressEdge();
|
const edge = new RemoteIngressEdge();
|
||||||
await edge.start({ token });
|
await edge.start({ token });
|
||||||
```
|
```
|
||||||
|
|
||||||
### 5. Dynamic Port Management
|
|
||||||
|
|
||||||
The hub controls which ports each edge listens on. Ports can be changed at runtime without restarting the edge — the hub pushes a `CONFIG` frame and the edge hot-reloads its TCP listeners.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Initially assign ports 80 and 443
|
|
||||||
await hub.updateAllowedEdges([
|
|
||||||
{ id: 'edge-nyc-01', secret: 'secret', listenPorts: [80, 443] },
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Later, add port 8080 — the connected edge picks it up instantly
|
|
||||||
await hub.updateAllowedEdges([
|
|
||||||
{ id: 'edge-nyc-01', secret: 'secret', listenPorts: [80, 443, 8080] },
|
|
||||||
]);
|
|
||||||
```
|
|
||||||
|
|
||||||
## License and Legal Information
|
## License and Legal Information
|
||||||
|
|
||||||
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [LICENSE](./LICENSE) file.
|
This repository contains open-source code licensed under the MIT License. A copy of the license can be found in the [license](./license.md) file.
|
||||||
|
|
||||||
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,9 @@ struct EdgeStreamState {
|
|||||||
send_window: Arc<AtomicU32>,
|
send_window: Arc<AtomicU32>,
|
||||||
/// Notifier to wake the client reader when the window opens.
|
/// Notifier to wake the client reader when the window opens.
|
||||||
window_notify: Arc<Notify>,
|
window_notify: Arc<Notify>,
|
||||||
|
/// Per-stream cancellation token — cancelled on FRAME_CLOSE_BACK to promptly
|
||||||
|
/// terminate the upload loop instead of waiting for the window stall timeout.
|
||||||
|
cancel_token: CancellationToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Edge configuration (hub-host + credentials only; ports come from hub).
|
/// Edge configuration (hub-host + credentials only; ports come from hub).
|
||||||
@@ -220,7 +223,7 @@ async fn edge_main_loop(
|
|||||||
let mut backoff_ms: u64 = 1000;
|
let mut backoff_ms: u64 = 1000;
|
||||||
let max_backoff_ms: u64 = 30000;
|
let max_backoff_ms: u64 = 30000;
|
||||||
|
|
||||||
let transport_mode = config.transport_mode.unwrap_or(TransportMode::TcpTls);
|
let transport_mode = config.transport_mode.unwrap_or(TransportMode::QuicWithFallback);
|
||||||
|
|
||||||
// Build TLS config ONCE outside the reconnect loop — preserves session
|
// Build TLS config ONCE outside the reconnect loop — preserves session
|
||||||
// cache across reconnections for TLS session resumption (saves 1 RTT).
|
// cache across reconnections for TLS session resumption (saves 1 RTT).
|
||||||
@@ -399,7 +402,11 @@ async fn handle_edge_frame(
|
|||||||
}
|
}
|
||||||
FRAME_CLOSE_BACK => {
|
FRAME_CLOSE_BACK => {
|
||||||
let mut writers = client_writers.lock().await;
|
let mut writers = client_writers.lock().await;
|
||||||
writers.remove(&frame.stream_id);
|
if let Some(state) = writers.remove(&frame.stream_id) {
|
||||||
|
// Cancel the stream's token so the upload loop exits promptly
|
||||||
|
// instead of waiting for the window stall timeout.
|
||||||
|
state.cancel_token.cancel();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FRAME_CONFIG => {
|
FRAME_CONFIG => {
|
||||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
||||||
@@ -642,8 +649,23 @@ async fn connect_to_hub_and_run(
|
|||||||
let liveness_timeout_dur = Duration::from_secs(45);
|
let liveness_timeout_dur = Duration::from_secs(45);
|
||||||
let mut last_activity = Instant::now();
|
let mut last_activity = Instant::now();
|
||||||
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
||||||
|
let mut next_udp_expiry = Instant::now() + Duration::from_secs(30);
|
||||||
|
|
||||||
let result = 'io_loop: loop {
|
let result = 'io_loop: loop {
|
||||||
|
// Expire idle UDP sessions periodically
|
||||||
|
if Instant::now() >= next_udp_expiry {
|
||||||
|
let mut sessions = udp_sessions.lock().await;
|
||||||
|
let expired = sessions.expire_idle();
|
||||||
|
for sid in &expired {
|
||||||
|
let close_frame = encode_frame(*sid, FRAME_UDP_CLOSE, &[]);
|
||||||
|
let _ = tunnel_data_tx.try_send(close_frame);
|
||||||
|
}
|
||||||
|
if !expired.is_empty() {
|
||||||
|
log::debug!("Expired {} idle UDP sessions", expired.len());
|
||||||
|
}
|
||||||
|
next_udp_expiry = Instant::now() + Duration::from_secs(30);
|
||||||
|
}
|
||||||
|
|
||||||
// Drain any buffered frames
|
// Drain any buffered frames
|
||||||
loop {
|
loop {
|
||||||
let frame = match tunnel_io.try_parse_frame() {
|
let frame = match tunnel_io.try_parse_frame() {
|
||||||
@@ -997,6 +1019,7 @@ async fn handle_client_connection(
|
|||||||
back_tx,
|
back_tx,
|
||||||
send_window: Arc::clone(&send_window),
|
send_window: Arc::clone(&send_window),
|
||||||
window_notify: Arc::clone(&window_notify),
|
window_notify: Arc::clone(&window_notify),
|
||||||
|
cancel_token: client_token.clone(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1078,8 +1101,8 @@ async fn handle_client_connection(
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = notified => continue,
|
_ = notified => continue,
|
||||||
_ = client_token.cancelled() => break,
|
_ = client_token.cancelled() => break,
|
||||||
_ = tokio::time::sleep(Duration::from_secs(120)) => {
|
_ = tokio::time::sleep(Duration::from_secs(55)) => {
|
||||||
log::warn!("Stream {} upload stalled (window empty for 120s)", stream_id);
|
log::warn!("Stream {} upload stalled (window empty for 55s)", stream_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1346,7 +1369,18 @@ async fn connect_to_hub_and_run_quic_with_connection(
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Monitor control stream for config updates, connection health, and QUIC datagrams.
|
// Monitor control stream for config updates, connection health, and QUIC datagrams.
|
||||||
|
let mut next_udp_expiry_quic = Instant::now() + Duration::from_secs(30);
|
||||||
let result = 'quic_loop: loop {
|
let result = 'quic_loop: loop {
|
||||||
|
// Expire idle UDP sessions periodically
|
||||||
|
if Instant::now() >= next_udp_expiry_quic {
|
||||||
|
let mut sessions = udp_sessions_quic.lock().await;
|
||||||
|
let expired = sessions.expire_idle();
|
||||||
|
if !expired.is_empty() {
|
||||||
|
log::debug!("Expired {} idle QUIC UDP sessions", expired.len());
|
||||||
|
}
|
||||||
|
next_udp_expiry_quic = Instant::now() + Duration::from_secs(30);
|
||||||
|
}
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
// Read control messages from hub
|
// Read control messages from hub
|
||||||
ctrl_msg = quic_transport::read_ctrl_message(&mut ctrl_recv) => {
|
ctrl_msg = quic_transport::read_ctrl_message(&mut ctrl_recv) => {
|
||||||
@@ -1370,6 +1404,16 @@ async fn connect_to_hub_and_run_quic_with_connection(
|
|||||||
connection_token,
|
connection_token,
|
||||||
bind_address,
|
bind_address,
|
||||||
);
|
);
|
||||||
|
apply_udp_port_config_quic(
|
||||||
|
&update.listen_ports_udp,
|
||||||
|
&mut udp_listeners_quic,
|
||||||
|
&quic_conn,
|
||||||
|
&udp_sessions_quic,
|
||||||
|
&udp_sockets_quic,
|
||||||
|
next_stream_id,
|
||||||
|
connection_token,
|
||||||
|
bind_address,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
quic_transport::CTRL_PING => {
|
quic_transport::CTRL_PING => {
|
||||||
|
|||||||
@@ -475,6 +475,12 @@ async fn handle_hub_frame(
|
|||||||
})??;
|
})??;
|
||||||
|
|
||||||
upstream.set_nodelay(true)?;
|
upstream.set_nodelay(true)?;
|
||||||
|
// TCP keepalive detects silent failures on the hub→SmartProxy connection
|
||||||
|
let ka = socket2::TcpKeepalive::new()
|
||||||
|
.with_time(Duration::from_secs(30));
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
let ka = ka.with_interval(Duration::from_secs(10));
|
||||||
|
let _ = socket2::SockRef::from(&upstream).set_tcp_keepalive(&ka);
|
||||||
upstream.write_all(proxy_header.as_bytes()).await?;
|
upstream.write_all(proxy_header.as_bytes()).await?;
|
||||||
|
|
||||||
let (mut up_read, mut up_write) =
|
let (mut up_read, mut up_write) =
|
||||||
@@ -485,7 +491,7 @@ async fn handle_hub_frame(
|
|||||||
let writer_token = stream_token.clone();
|
let writer_token = stream_token.clone();
|
||||||
let wub_tx = writer_tx.clone();
|
let wub_tx = writer_tx.clone();
|
||||||
let stream_counter_w = Arc::clone(&stream_counter);
|
let stream_counter_w = Arc::clone(&stream_counter);
|
||||||
let writer_for_edge_data = tokio::spawn(async move {
|
let mut writer_for_edge_data = tokio::spawn(async move {
|
||||||
let mut consumed_since_update: u32 = 0;
|
let mut consumed_since_update: u32 = 0;
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
@@ -569,8 +575,8 @@ async fn handle_hub_frame(
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = notified => continue,
|
_ = notified => continue,
|
||||||
_ = stream_token.cancelled() => break,
|
_ = stream_token.cancelled() => break,
|
||||||
_ = tokio::time::sleep(Duration::from_secs(120)) => {
|
_ = tokio::time::sleep(Duration::from_secs(55)) => {
|
||||||
log::warn!("Stream {} download stalled (window empty for 120s)", stream_id);
|
log::warn!("Stream {} download stalled (window empty for 55s)", stream_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -633,7 +639,11 @@ async fn handle_hub_frame(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writer_for_edge_data.abort();
|
// Give the writer task 2s to shut down gracefully (sends TCP FIN
|
||||||
|
// via up_write.shutdown()) before force-aborting (which causes RST).
|
||||||
|
if tokio::time::timeout(Duration::from_secs(2), &mut writer_for_edge_data).await.is_err() {
|
||||||
|
writer_for_edge_data.abort();
|
||||||
|
}
|
||||||
Ok::<(), Box<dyn std::error::Error + Send + Sync>>(())
|
Ok::<(), Box<dyn std::error::Error + Send + Sync>>(())
|
||||||
}
|
}
|
||||||
.await;
|
.await;
|
||||||
@@ -706,6 +716,7 @@ async fn handle_hub_frame(
|
|||||||
let data_writer_tx = data_tx.clone();
|
let data_writer_tx = data_tx.clone();
|
||||||
let session_token = edge_token.child_token();
|
let session_token = edge_token.child_token();
|
||||||
let edge_id_str = edge_id.to_string();
|
let edge_id_str = edge_id.to_string();
|
||||||
|
let proxy_v2_header = frame.payload.clone();
|
||||||
|
|
||||||
// Channel for forwarding datagrams from edge to upstream
|
// Channel for forwarding datagrams from edge to upstream
|
||||||
let (udp_tx, mut udp_rx) = mpsc::channel::<Bytes>(256);
|
let (udp_tx, mut udp_rx) = mpsc::channel::<Bytes>(256);
|
||||||
@@ -728,6 +739,12 @@ async fn handle_hub_frame(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send PROXY v2 header as first datagram so SmartProxy knows the original client
|
||||||
|
if let Err(e) = upstream.send(&proxy_v2_header).await {
|
||||||
|
log::error!("UDP session {} failed to send PROXY v2 header: {}", stream_id, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Task: upstream -> edge (return datagrams)
|
// Task: upstream -> edge (return datagrams)
|
||||||
let upstream_recv = Arc::new(upstream);
|
let upstream_recv = Arc::new(upstream);
|
||||||
let upstream_send = upstream_recv.clone();
|
let upstream_send = upstream_recv.clone();
|
||||||
@@ -1066,7 +1083,11 @@ async fn handle_edge_connection(
|
|||||||
).await;
|
).await;
|
||||||
{
|
{
|
||||||
let mut edges = connected.lock().await;
|
let mut edges = connected.lock().await;
|
||||||
edges.remove(&edge_id);
|
// Only remove if the entry is still ours (not replaced by a reconnection).
|
||||||
|
// A replaced entry has a fresh non-cancelled token from the new handler.
|
||||||
|
if edges.get(&edge_id).map_or(false, |e| e.cancel_token.is_cancelled()) {
|
||||||
|
edges.remove(&edge_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let _ = event_tx.try_send(HubEvent::EdgeDisconnected {
|
let _ = event_tx.try_send(HubEvent::EdgeDisconnected {
|
||||||
edge_id: edge_id.clone(),
|
edge_id: edge_id.clone(),
|
||||||
@@ -1367,6 +1388,8 @@ async fn handle_edge_connection_quic(
|
|||||||
let sessions = dgram_sessions.clone();
|
let sessions = dgram_sessions.clone();
|
||||||
let session_token = dgram_token.child_token();
|
let session_token = dgram_token.child_token();
|
||||||
let (tx, mut rx) = mpsc::channel::<Bytes>(256);
|
let (tx, mut rx) = mpsc::channel::<Bytes>(256);
|
||||||
|
let proxy_v2_data: Vec<u8> = proxy_data.to_vec();
|
||||||
|
let cleanup_sessions = sessions.clone();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut s = sessions.lock().await;
|
let mut s = sessions.lock().await;
|
||||||
@@ -1378,11 +1401,20 @@ async fn handle_edge_connection_quic(
|
|||||||
Ok(s) => Arc::new(s),
|
Ok(s) => Arc::new(s),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("QUIC UDP session {} bind failed: {}", session_id, e);
|
log::error!("QUIC UDP session {} bind failed: {}", session_id, e);
|
||||||
|
cleanup_sessions.lock().await.remove(&session_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Err(e) = upstream.connect((target.as_str(), dest_port)).await {
|
if let Err(e) = upstream.connect((target.as_str(), dest_port)).await {
|
||||||
log::error!("QUIC UDP session {} connect failed: {}", session_id, e);
|
log::error!("QUIC UDP session {} connect failed: {}", session_id, e);
|
||||||
|
cleanup_sessions.lock().await.remove(&session_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send PROXY v2 header as first datagram so SmartProxy knows the original client
|
||||||
|
if let Err(e) = upstream.send(&proxy_v2_data).await {
|
||||||
|
log::error!("QUIC UDP session {} failed to send PROXY v2 header: {}", session_id, e);
|
||||||
|
cleanup_sessions.lock().await.remove(&session_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1425,6 +1457,8 @@ async fn handle_edge_connection_quic(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
recv_handle.abort();
|
recv_handle.abort();
|
||||||
|
// Clean up session entry to prevent memory leak
|
||||||
|
cleanup_sessions.lock().await.remove(&session_id);
|
||||||
});
|
});
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
@@ -1520,7 +1554,11 @@ async fn handle_edge_connection_quic(
|
|||||||
|
|
||||||
{
|
{
|
||||||
let mut edges = connected.lock().await;
|
let mut edges = connected.lock().await;
|
||||||
edges.remove(&edge_id);
|
// Only remove if the entry is still ours (not replaced by a reconnection).
|
||||||
|
// A replaced entry has a fresh non-cancelled token from the new handler.
|
||||||
|
if edges.get(&edge_id).map_or(false, |e| e.cancel_token.is_cancelled()) {
|
||||||
|
edges.remove(&edge_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let _ = event_tx.try_send(HubEvent::EdgeDisconnected {
|
let _ = event_tx.try_send(HubEvent::EdgeDisconnected {
|
||||||
edge_id,
|
edge_id,
|
||||||
@@ -1568,6 +1606,12 @@ async fn handle_quic_stream(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let _ = upstream.set_nodelay(true);
|
let _ = upstream.set_nodelay(true);
|
||||||
|
// TCP keepalive detects silent failures on the hub→SmartProxy connection
|
||||||
|
let ka = socket2::TcpKeepalive::new()
|
||||||
|
.with_time(Duration::from_secs(30));
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
let ka = ka.with_interval(Duration::from_secs(10));
|
||||||
|
let _ = socket2::SockRef::from(&upstream).set_tcp_keepalive(&ka);
|
||||||
// Send PROXY header to SmartProxy
|
// Send PROXY header to SmartProxy
|
||||||
if let Err(e) = upstream.write_all(proxy_header.as_bytes()).await {
|
if let Err(e) = upstream.write_all(proxy_header.as_bytes()).await {
|
||||||
log::error!("QUIC stream {} failed to write PROXY header to upstream: {}", stream_id, e);
|
log::error!("QUIC stream {} failed to write PROXY header to upstream: {}", stream_id, e);
|
||||||
@@ -1578,7 +1622,7 @@ async fn handle_quic_stream(
|
|||||||
|
|
||||||
// Task: QUIC -> upstream (edge data to SmartProxy)
|
// Task: QUIC -> upstream (edge data to SmartProxy)
|
||||||
let writer_token = stream_token.clone();
|
let writer_token = stream_token.clone();
|
||||||
let writer_task = tokio::spawn(async move {
|
let mut writer_task = tokio::spawn(async move {
|
||||||
let mut buf = vec![0u8; 32768];
|
let mut buf = vec![0u8; 32768];
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
@@ -1629,7 +1673,11 @@ async fn handle_quic_stream(
|
|||||||
|
|
||||||
// Gracefully close the QUIC send stream
|
// Gracefully close the QUIC send stream
|
||||||
let _ = quic_send.finish();
|
let _ = quic_send.finish();
|
||||||
writer_task.abort();
|
// Give the writer task 2s to shut down gracefully (sends TCP FIN
|
||||||
|
// via up_write.shutdown()) before force-aborting (which causes RST).
|
||||||
|
if tokio::time::timeout(Duration::from_secs(2), &mut writer_task).await.is_err() {
|
||||||
|
writer_task.abort();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
228
test/test.quic-stability.node.ts
Normal file
228
test/test.quic-stability.node.ts
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as net from 'net';
|
||||||
|
import * as crypto from 'crypto';
|
||||||
|
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers (same patterns as test.quic.node.ts)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
async function findFreePorts(count: number): Promise<number[]> {
|
||||||
|
const servers: net.Server[] = [];
|
||||||
|
const ports: number[] = [];
|
||||||
|
for (let i = 0; i < count; i++) {
|
||||||
|
const server = net.createServer();
|
||||||
|
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
|
||||||
|
ports.push((server.address() as net.AddressInfo).port);
|
||||||
|
servers.push(server);
|
||||||
|
}
|
||||||
|
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrackingServer = net.Server & { destroyAll: () => void };
|
||||||
|
|
||||||
|
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((socket) => {
|
||||||
|
connections.add(socket);
|
||||||
|
socket.on('close', () => connections.delete(socket));
|
||||||
|
let proxyHeaderParsed = false;
|
||||||
|
let pendingBuf = Buffer.alloc(0);
|
||||||
|
socket.on('data', (data: Buffer) => {
|
||||||
|
if (!proxyHeaderParsed) {
|
||||||
|
pendingBuf = Buffer.concat([pendingBuf, data]);
|
||||||
|
const idx = pendingBuf.indexOf('\r\n');
|
||||||
|
if (idx !== -1) {
|
||||||
|
proxyHeaderParsed = true;
|
||||||
|
const remainder = pendingBuf.subarray(idx + 2);
|
||||||
|
if (remainder.length > 0) socket.write(remainder);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
socket.write(data);
|
||||||
|
});
|
||||||
|
socket.on('error', () => {});
|
||||||
|
}) as TrackingServer;
|
||||||
|
server.destroyAll = () => {
|
||||||
|
for (const conn of connections) conn.destroy();
|
||||||
|
connections.clear();
|
||||||
|
};
|
||||||
|
server.on('error', reject);
|
||||||
|
server.listen(port, host, () => resolve(server));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async function forceCloseServer(server: TrackingServer): Promise<void> {
|
||||||
|
server.destroyAll();
|
||||||
|
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
let totalReceived = 0;
|
||||||
|
const expectedLength = data.length;
|
||||||
|
let settled = false;
|
||||||
|
|
||||||
|
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
|
||||||
|
client.write(data);
|
||||||
|
client.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
client.destroy();
|
||||||
|
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
|
||||||
|
}
|
||||||
|
}, timeoutMs);
|
||||||
|
|
||||||
|
client.on('data', (chunk: Buffer) => {
|
||||||
|
chunks.push(chunk);
|
||||||
|
totalReceived += chunk.length;
|
||||||
|
if (totalReceived >= expectedLength && !settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
client.destroy();
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('end', () => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('error', (err) => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
reject(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sha256(buf: Buffer): string {
|
||||||
|
return crypto.createHash('sha256').update(buf).digest('hex');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// QUIC Long-Running Stability Test — 2 minutes
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let hub: RemoteIngressHub;
|
||||||
|
let edge: RemoteIngressEdge;
|
||||||
|
let echoServer: TrackingServer;
|
||||||
|
let hubPort: number;
|
||||||
|
let edgePort: number;
|
||||||
|
let disconnectCount = 0;
|
||||||
|
|
||||||
|
tap.test('QUIC stability setup: start echo server and QUIC tunnel', async () => {
|
||||||
|
[hubPort, edgePort] = await findFreePorts(2);
|
||||||
|
|
||||||
|
echoServer = await startEchoServer(edgePort, '127.0.0.2');
|
||||||
|
|
||||||
|
hub = new RemoteIngressHub();
|
||||||
|
edge = new RemoteIngressEdge();
|
||||||
|
|
||||||
|
await hub.start({
|
||||||
|
tunnelPort: hubPort,
|
||||||
|
targetHost: '127.0.0.2',
|
||||||
|
});
|
||||||
|
|
||||||
|
await hub.updateAllowedEdges([
|
||||||
|
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const connectedPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
const timeout = setTimeout(() => reject(new Error('QUIC edge did not connect within 10s')), 10000);
|
||||||
|
edge.once('tunnelConnected', () => {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Track disconnects — any disconnect during the test is a failure signal
|
||||||
|
edge.on('tunnelDisconnected', () => {
|
||||||
|
disconnectCount++;
|
||||||
|
console.log(`[STABILITY] Unexpected tunnel disconnect #${disconnectCount}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
await edge.start({
|
||||||
|
hubHost: '127.0.0.1',
|
||||||
|
hubPort,
|
||||||
|
edgeId: 'test-edge',
|
||||||
|
secret: 'test-secret',
|
||||||
|
bindAddress: '127.0.0.1',
|
||||||
|
transportMode: 'quic',
|
||||||
|
});
|
||||||
|
|
||||||
|
await connectedPromise;
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('QUIC stability: tunnel stays alive for 30s with periodic echo probes', async () => {
|
||||||
|
const testDurationMs = 30_000; // 30 seconds
|
||||||
|
const probeIntervalMs = 5_000; // probe every 5 seconds
|
||||||
|
const startTime = Date.now();
|
||||||
|
let probeCount = 0;
|
||||||
|
let failedProbes = 0;
|
||||||
|
|
||||||
|
while (Date.now() - startTime < testDurationMs) {
|
||||||
|
probeCount++;
|
||||||
|
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
||||||
|
|
||||||
|
// Verify edge still reports connected
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
if (!status.connected) {
|
||||||
|
throw new Error(`Tunnel disconnected at ${elapsed}s (probe #${probeCount})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a 4KB echo probe through the tunnel
|
||||||
|
const data = crypto.randomBytes(4096);
|
||||||
|
const hash = sha256(data);
|
||||||
|
try {
|
||||||
|
const received = await sendAndReceive(edgePort, data, 10000);
|
||||||
|
if (received.length !== 4096 || sha256(received) !== hash) {
|
||||||
|
failedProbes++;
|
||||||
|
console.log(`[STABILITY] Probe #${probeCount} at ${elapsed}s: data mismatch`);
|
||||||
|
} else {
|
||||||
|
console.log(`[STABILITY] Probe #${probeCount} at ${elapsed}s: OK`);
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
failedProbes++;
|
||||||
|
console.log(`[STABILITY] Probe #${probeCount} at ${elapsed}s: FAILED — ${err}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for next probe interval
|
||||||
|
const remaining = testDurationMs - (Date.now() - startTime);
|
||||||
|
if (remaining > 0) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, Math.min(probeIntervalMs, remaining)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[STABILITY] Completed: ${probeCount} probes, ${failedProbes} failures, ${disconnectCount} disconnects`);
|
||||||
|
expect(failedProbes).toEqual(0);
|
||||||
|
expect(disconnectCount).toEqual(0);
|
||||||
|
|
||||||
|
// Final status check
|
||||||
|
const finalStatus = await edge.getStatus();
|
||||||
|
expect(finalStatus.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('QUIC stability teardown', async () => {
|
||||||
|
await edge.stop();
|
||||||
|
await hub.stop();
|
||||||
|
await forceCloseServer(echoServer);
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -29,15 +29,16 @@ async function findFreePorts(count: number): Promise<number[]> {
|
|||||||
function startUdpEchoServer(port: number, host: string): Promise<dgram.Socket> {
|
function startUdpEchoServer(port: number, host: string): Promise<dgram.Socket> {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const server = dgram.createSocket('udp4');
|
const server = dgram.createSocket('udp4');
|
||||||
let proxyHeaderReceived = false;
|
// Track which source endpoints have sent their PROXY v2 header.
|
||||||
|
// The hub sends a 28-byte PROXY v2 header as the first datagram per session.
|
||||||
|
const seenSources = new Set<string>();
|
||||||
|
|
||||||
server.on('message', (msg, rinfo) => {
|
server.on('message', (msg, rinfo) => {
|
||||||
if (!proxyHeaderReceived) {
|
const sourceKey = `${rinfo.address}:${rinfo.port}`;
|
||||||
// First datagram is the PROXY v2 header (28 bytes for IPv4)
|
if (!seenSources.has(sourceKey)) {
|
||||||
// In the current implementation, the hub connects directly via UDP
|
seenSources.add(sourceKey);
|
||||||
// so the first real datagram is the actual data (no PROXY header yet)
|
// First datagram from this source is the PROXY v2 header — skip it
|
||||||
// For now, just echo everything back
|
return;
|
||||||
proxyHeaderReceived = true;
|
|
||||||
}
|
}
|
||||||
// Echo back
|
// Echo back
|
||||||
server.send(msg, rinfo.port, rinfo.address);
|
server.send(msg, rinfo.port, rinfo.address);
|
||||||
|
|||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@serve.zone/remoteingress',
|
name: '@serve.zone/remoteingress',
|
||||||
version: '4.12.0',
|
version: '4.14.2',
|
||||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
description: 'Edge ingress tunnel for DcRouter - tunnels TCP and UDP traffic from the network edge to SmartProxy over TLS or QUIC, preserving client IP via PROXY protocol.'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,6 +79,15 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
plugins.path.join(packageDir, 'rust', 'target', 'debug', 'remoteingress-bin'),
|
plugins.path.join(packageDir, 'rust', 'target', 'debug', 'remoteingress-bin'),
|
||||||
],
|
],
|
||||||
searchSystemPath: false,
|
searchSystemPath: false,
|
||||||
|
logger: {
|
||||||
|
log: (level: string, message: string) => {
|
||||||
|
if (level === 'error') {
|
||||||
|
console.error(`[RemoteIngressEdge] ${message}`);
|
||||||
|
} else {
|
||||||
|
console.log(`[RemoteIngressEdge] ${message}`);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Forward events from Rust binary
|
// Forward events from Rust binary
|
||||||
@@ -130,7 +139,8 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
throw new Error('Failed to spawn remoteingress-bin');
|
throw new Error('Failed to spawn remoteingress-bin');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register crash recovery handler
|
// Register crash recovery handler (remove first to avoid duplicates)
|
||||||
|
this.bridge.removeListener('exit', this.handleCrashRecovery);
|
||||||
this.bridge.on('exit', this.handleCrashRecovery);
|
this.bridge.on('exit', this.handleCrashRecovery);
|
||||||
|
|
||||||
await this.bridge.sendCommand('startEdge', {
|
await this.bridge.sendCommand('startEdge', {
|
||||||
@@ -180,6 +190,7 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
this.bridge.kill();
|
this.bridge.kill();
|
||||||
this.started = false;
|
this.started = false;
|
||||||
}
|
}
|
||||||
|
this.savedConfig = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -211,6 +222,12 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
|
|
||||||
this.started = false;
|
this.started = false;
|
||||||
|
|
||||||
|
// Clear orphaned status interval from previous run
|
||||||
|
if (this.statusInterval) {
|
||||||
|
clearInterval(this.statusInterval);
|
||||||
|
this.statusInterval = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
if (this.restartAttempts >= MAX_RESTART_ATTEMPTS) {
|
if (this.restartAttempts >= MAX_RESTART_ATTEMPTS) {
|
||||||
console.error('[RemoteIngressEdge] Max restart attempts reached, giving up');
|
console.error('[RemoteIngressEdge] Max restart attempts reached, giving up');
|
||||||
this.emit('crashRecoveryFailed');
|
this.emit('crashRecoveryFailed');
|
||||||
@@ -228,6 +245,7 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.bridge.removeListener('exit', this.handleCrashRecovery);
|
||||||
this.bridge.on('exit', this.handleCrashRecovery);
|
this.bridge.on('exit', this.handleCrashRecovery);
|
||||||
|
|
||||||
await this.bridge.sendCommand('startEdge', {
|
await this.bridge.sendCommand('startEdge', {
|
||||||
@@ -242,6 +260,21 @@ export class RemoteIngressEdge extends EventEmitter {
|
|||||||
this.started = true;
|
this.started = true;
|
||||||
this.restartAttempts = 0;
|
this.restartAttempts = 0;
|
||||||
this.restartBackoffMs = 1000;
|
this.restartBackoffMs = 1000;
|
||||||
|
|
||||||
|
// Restart periodic status logging
|
||||||
|
this.statusInterval = setInterval(async () => {
|
||||||
|
try {
|
||||||
|
const status = await this.getStatus();
|
||||||
|
console.log(
|
||||||
|
`[RemoteIngressEdge] Status: connected=${status.connected}, ` +
|
||||||
|
`streams=${status.activeStreams}, ports=[${status.listenPorts.join(',')}], ` +
|
||||||
|
`publicIp=${status.publicIp ?? 'unknown'}`
|
||||||
|
);
|
||||||
|
} catch {
|
||||||
|
// Bridge may be shutting down
|
||||||
|
}
|
||||||
|
}, 60_000);
|
||||||
|
|
||||||
console.log('[RemoteIngressEdge] Successfully recovered from crash');
|
console.log('[RemoteIngressEdge] Successfully recovered from crash');
|
||||||
this.emit('crashRecovered');
|
this.emit('crashRecovered');
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
|||||||
@@ -87,6 +87,15 @@ export class RemoteIngressHub extends EventEmitter {
|
|||||||
plugins.path.join(packageDir, 'rust', 'target', 'debug', 'remoteingress-bin'),
|
plugins.path.join(packageDir, 'rust', 'target', 'debug', 'remoteingress-bin'),
|
||||||
],
|
],
|
||||||
searchSystemPath: false,
|
searchSystemPath: false,
|
||||||
|
logger: {
|
||||||
|
log: (level: string, message: string) => {
|
||||||
|
if (level === 'error') {
|
||||||
|
console.error(`[RemoteIngressHub] ${message}`);
|
||||||
|
} else {
|
||||||
|
console.log(`[RemoteIngressHub] ${message}`);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Forward events from Rust binary
|
// Forward events from Rust binary
|
||||||
@@ -118,7 +127,8 @@ export class RemoteIngressHub extends EventEmitter {
|
|||||||
throw new Error('Failed to spawn remoteingress-bin');
|
throw new Error('Failed to spawn remoteingress-bin');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register crash recovery handler
|
// Register crash recovery handler (remove first to avoid duplicates)
|
||||||
|
this.bridge.removeListener('exit', this.handleCrashRecovery);
|
||||||
this.bridge.on('exit', this.handleCrashRecovery);
|
this.bridge.on('exit', this.handleCrashRecovery);
|
||||||
|
|
||||||
await this.bridge.sendCommand('startHub', {
|
await this.bridge.sendCommand('startHub', {
|
||||||
@@ -149,6 +159,8 @@ export class RemoteIngressHub extends EventEmitter {
|
|||||||
this.bridge.kill();
|
this.bridge.kill();
|
||||||
this.started = false;
|
this.started = false;
|
||||||
}
|
}
|
||||||
|
this.savedConfig = null;
|
||||||
|
this.savedEdges = [];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -205,6 +217,7 @@ export class RemoteIngressHub extends EventEmitter {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.bridge.removeListener('exit', this.handleCrashRecovery);
|
||||||
this.bridge.on('exit', this.handleCrashRecovery);
|
this.bridge.on('exit', this.handleCrashRecovery);
|
||||||
|
|
||||||
const config = this.savedConfig;
|
const config = this.savedConfig;
|
||||||
|
|||||||
@@ -6,7 +6,8 @@
|
|||||||
"module": "NodeNext",
|
"module": "NodeNext",
|
||||||
"moduleResolution": "NodeNext",
|
"moduleResolution": "NodeNext",
|
||||||
"esModuleInterop": true,
|
"esModuleInterop": true,
|
||||||
"verbatimModuleSyntax": true
|
"verbatimModuleSyntax": true,
|
||||||
|
"types": ["node"]
|
||||||
},
|
},
|
||||||
"exclude": [
|
"exclude": [
|
||||||
"dist_*/**/*.d.ts"
|
"dist_*/**/*.d.ts"
|
||||||
|
|||||||
Reference in New Issue
Block a user