diff --git a/changelog.md b/changelog.md index 68c2ecb..a64deae 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,13 @@ # Changelog +## 2026-03-16 - 4.7.0 - feat(edge,protocol,test) +add configurable edge bind address and expand flow-control test coverage + +- adds an optional bindAddress configuration for edge TCP listeners, defaulting to 0.0.0.0 when not provided +- passes bindAddress through the TypeScript edge client and Rust edge runtime so local test setups can bind to localhost +- adds protocol unit tests for adaptive stream window sizing and window update frame encoding/decoding +- introduces end-to-end flow-control tests and updates the test script to build before running tests + ## 2026-03-16 - 4.6.1 - fix(remoteingress-core) avoid spurious tunnel disconnect events and increase control channel capacity diff --git a/package.json b/package.json index 73675b7..25507f1 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,7 @@ "author": "Task Venture Capital GmbH", "license": "MIT", "scripts": { - "test": "(tstest test/ --verbose --logfile --timeout 60)", + "test": "(pnpm run build && tstest test/ --verbose --logfile --timeout 60)", "build": "(tsbuild tsfolders --allowimplicitany && tsrust)", "buildDocs": "(tsdoc)" }, diff --git a/rust/crates/remoteingress-core/src/edge.rs b/rust/crates/remoteingress-core/src/edge.rs index 9ddd2aa..81f47ef 100644 --- a/rust/crates/remoteingress-core/src/edge.rs +++ b/rust/crates/remoteingress-core/src/edge.rs @@ -32,6 +32,10 @@ pub struct EdgeConfig { pub hub_port: u16, pub edge_id: String, pub secret: String, + /// Optional bind address for TCP listeners (defaults to "0.0.0.0"). + /// Useful for testing on localhost where edge and upstream share the same machine. + #[serde(default)] + pub bind_address: Option, } /// Handshake config received from hub after authentication. @@ -416,6 +420,7 @@ async fn connect_to_hub_and_run( // Start TCP listeners for initial ports (hot-reloadable) let mut port_listeners: HashMap> = HashMap::new(); + let bind_address = config.bind_address.as_deref().unwrap_or("0.0.0.0"); apply_port_config( &handshake.listen_ports, &mut port_listeners, @@ -426,6 +431,7 @@ async fn connect_to_hub_and_run( next_stream_id, &config.edge_id, connection_token, + bind_address, ); // Heartbeat: liveness timeout detects silent hub failures @@ -492,6 +498,7 @@ async fn connect_to_hub_and_run( next_stream_id, &config.edge_id, connection_token, + bind_address, ); } } @@ -557,6 +564,7 @@ fn apply_port_config( next_stream_id: &Arc, edge_id: &str, connection_token: &CancellationToken, + bind_address: &str, ) { let new_set: std::collections::HashSet = new_ports.iter().copied().collect(); let old_set: std::collections::HashSet = port_listeners.keys().copied().collect(); @@ -579,8 +587,9 @@ fn apply_port_config( let edge_id = edge_id.to_string(); let port_token = connection_token.child_token(); + let bind_addr = bind_address.to_string(); let handle = tokio::spawn(async move { - let listener = match TcpListener::bind(("0.0.0.0", port)).await { + let listener = match TcpListener::bind((bind_addr.as_str(), port)).await { Ok(l) => l, Err(e) => { log::error!("Failed to bind port {}: {}", port, e); @@ -840,6 +849,7 @@ mod tests { hub_port: 9999, edge_id: "e1".to_string(), secret: "sec".to_string(), + bind_address: None, }; let json = serde_json::to_string(&config).unwrap(); let back: EdgeConfig = serde_json::from_str(&json).unwrap(); @@ -955,6 +965,7 @@ mod tests { hub_port: 8443, edge_id: "test-edge".to_string(), secret: "test-secret".to_string(), + bind_address: None, }); let status = edge.get_status().await; assert!(!status.running); @@ -971,6 +982,7 @@ mod tests { hub_port: 8443, edge_id: "e".to_string(), secret: "s".to_string(), + bind_address: None, }); let rx1 = edge.take_event_rx().await; assert!(rx1.is_some()); @@ -985,6 +997,7 @@ mod tests { hub_port: 8443, edge_id: "e".to_string(), secret: "s".to_string(), + bind_address: None, }); edge.stop().await; // should not panic let status = edge.get_status().await; diff --git a/rust/crates/remoteingress-protocol/Cargo.toml b/rust/crates/remoteingress-protocol/Cargo.toml index 377530f..3d4d8aa 100644 --- a/rust/crates/remoteingress-protocol/Cargo.toml +++ b/rust/crates/remoteingress-protocol/Cargo.toml @@ -5,3 +5,6 @@ edition = "2021" [dependencies] tokio = { version = "1", features = ["io-util"] } + +[dev-dependencies] +tokio = { version = "1", features = ["io-util", "macros", "rt"] } diff --git a/rust/crates/remoteingress-protocol/src/lib.rs b/rust/crates/remoteingress-protocol/src/lib.rs index 1e5cdc9..38b6089 100644 --- a/rust/crates/remoteingress-protocol/src/lib.rs +++ b/rust/crates/remoteingress-protocol/src/lib.rs @@ -345,4 +345,134 @@ mod tests { assert_eq!(&pong[0..4], &0u32.to_be_bytes()); assert_eq!(pong.len(), FRAME_HEADER_SIZE); } + + // --- compute_window_for_stream_count tests --- + + #[test] + fn test_adaptive_window_zero_streams() { + // 0 streams treated as 1: 32MB/1 = 32MB → clamped to 4MB max + assert_eq!(compute_window_for_stream_count(0), INITIAL_STREAM_WINDOW); + } + + #[test] + fn test_adaptive_window_one_stream() { + // 32MB/1 = 32MB → clamped to 4MB max + assert_eq!(compute_window_for_stream_count(1), INITIAL_STREAM_WINDOW); + } + + #[test] + fn test_adaptive_window_at_max_boundary() { + // 32MB/8 = 4MB = exactly INITIAL_STREAM_WINDOW + assert_eq!(compute_window_for_stream_count(8), INITIAL_STREAM_WINDOW); + } + + #[test] + fn test_adaptive_window_just_below_max() { + // 32MB/9 = 3,728,270 — first value below INITIAL_STREAM_WINDOW + let w = compute_window_for_stream_count(9); + assert!(w < INITIAL_STREAM_WINDOW); + assert_eq!(w, (32 * 1024 * 1024u64 / 9) as u32); + } + + #[test] + fn test_adaptive_window_16_streams() { + // 32MB/16 = 2MB + assert_eq!(compute_window_for_stream_count(16), 2 * 1024 * 1024); + } + + #[test] + fn test_adaptive_window_100_streams() { + // 32MB/100 = 335,544 bytes (~327KB) + let w = compute_window_for_stream_count(100); + assert_eq!(w, (32 * 1024 * 1024u64 / 100) as u32); + assert!(w > 64 * 1024); // above floor + assert!(w < INITIAL_STREAM_WINDOW as u32); // below ceiling + } + + #[test] + fn test_adaptive_window_200_streams() { + // 32MB/200 = 167,772 bytes (~163KB), above 64KB floor + let w = compute_window_for_stream_count(200); + assert_eq!(w, (32 * 1024 * 1024u64 / 200) as u32); + assert!(w > 64 * 1024); + } + + #[test] + fn test_adaptive_window_500_streams() { + // 32MB/500 = 67,108 bytes (~65.5KB), just above 64KB floor + let w = compute_window_for_stream_count(500); + assert_eq!(w, (32 * 1024 * 1024u64 / 500) as u32); + assert!(w > 64 * 1024); + } + + #[test] + fn test_adaptive_window_at_min_boundary() { + // 32MB/512 = 65,536 = exactly 64KB floor + assert_eq!(compute_window_for_stream_count(512), 64 * 1024); + } + + #[test] + fn test_adaptive_window_below_min_clamped() { + // 32MB/513 = 65,408 → clamped up to 64KB + assert_eq!(compute_window_for_stream_count(513), 64 * 1024); + } + + #[test] + fn test_adaptive_window_1000_streams() { + // 32MB/1000 = 33,554 → clamped to 64KB + assert_eq!(compute_window_for_stream_count(1000), 64 * 1024); + } + + #[test] + fn test_adaptive_window_max_u32() { + // Extreme: u32::MAX streams → tiny value → clamped to 64KB + assert_eq!(compute_window_for_stream_count(u32::MAX), 64 * 1024); + } + + #[test] + fn test_adaptive_window_monotonically_decreasing() { + // Window should decrease (or stay same) as stream count increases + let mut prev = compute_window_for_stream_count(1); + for n in [2, 5, 10, 50, 100, 200, 500, 512, 1000] { + let w = compute_window_for_stream_count(n); + assert!(w <= prev, "window increased from {} to {} at n={}", prev, w, n); + prev = w; + } + } + + #[test] + fn test_adaptive_window_total_budget_bounded() { + // active × per_stream_window should never exceed 32MB (+ clamp overhead for high N) + for n in [1, 10, 50, 100, 200, 500] { + let w = compute_window_for_stream_count(n); + let total = w as u64 * n as u64; + assert!(total <= 32 * 1024 * 1024, "total {}MB exceeds budget at n={}", total / (1024*1024), n); + } + } + + // --- encode/decode window_update roundtrip --- + + #[test] + fn test_window_update_roundtrip() { + for &increment in &[0u32, 1, 64 * 1024, INITIAL_STREAM_WINDOW, MAX_WINDOW_SIZE, u32::MAX] { + let frame = encode_window_update(42, FRAME_WINDOW_UPDATE, increment); + assert_eq!(frame[4], FRAME_WINDOW_UPDATE); + let decoded = decode_window_update(&frame[FRAME_HEADER_SIZE..]); + assert_eq!(decoded, Some(increment)); + } + } + + #[test] + fn test_window_update_back_roundtrip() { + let frame = encode_window_update(7, FRAME_WINDOW_UPDATE_BACK, 1234567); + assert_eq!(frame[4], FRAME_WINDOW_UPDATE_BACK); + assert_eq!(decode_window_update(&frame[FRAME_HEADER_SIZE..]), Some(1234567)); + } + + #[test] + fn test_decode_window_update_malformed() { + assert_eq!(decode_window_update(&[]), None); + assert_eq!(decode_window_update(&[0, 0, 0]), None); + assert_eq!(decode_window_update(&[0, 0, 0, 0, 0]), None); + } } diff --git a/test/test.flowcontrol.node.ts b/test/test.flowcontrol.node.ts new file mode 100644 index 0000000..0125b4a --- /dev/null +++ b/test/test.flowcontrol.node.ts @@ -0,0 +1,475 @@ +import { expect, tap } from '@push.rocks/tapbundle'; +import * as net from 'net'; +import * as crypto from 'crypto'; +import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js'; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Find N free ports by binding to port 0 and collecting OS-assigned ports. */ +async function findFreePorts(count: number): Promise { + const servers: net.Server[] = []; + const ports: number[] = []; + for (let i = 0; i < count; i++) { + const server = net.createServer(); + await new Promise((resolve) => server.listen(0, '127.0.0.1', resolve)); + ports.push((server.address() as net.AddressInfo).port); + servers.push(server); + } + await Promise.all(servers.map((s) => new Promise((resolve) => s.close(() => resolve())))); + return ports; +} + +type TrackingServer = net.Server & { destroyAll: () => void }; + +/** Start a TCP echo server that tracks connections for force-close. */ +function startEchoServer(port: number, host: string): Promise { + return new Promise((resolve, reject) => { + const connections = new Set(); + const server = net.createServer((socket) => { + connections.add(socket); + socket.on('close', () => connections.delete(socket)); + + // Skip PROXY protocol v1 header line before echoing + let proxyHeaderParsed = false; + let pendingBuf = Buffer.alloc(0); + socket.on('data', (data: Buffer) => { + if (!proxyHeaderParsed) { + pendingBuf = Buffer.concat([pendingBuf, data]); + const idx = pendingBuf.indexOf('\r\n'); + if (idx !== -1) { + proxyHeaderParsed = true; + const remainder = pendingBuf.subarray(idx + 2); + if (remainder.length > 0) { + socket.write(remainder); + } + } + return; + } + socket.write(data); + }); + socket.on('error', () => {}); + }) as TrackingServer; + + server.destroyAll = () => { + for (const conn of connections) conn.destroy(); + connections.clear(); + }; + + server.on('error', reject); + server.listen(port, host, () => resolve(server)); + }); +} + +/** + * Start a server that sends a large response immediately on first data received. + * Does NOT wait for end (the tunnel protocol has no half-close). + * On receiving first data chunk after PROXY header, sends responseSize bytes then closes. + */ +function startLargeResponseServer(port: number, host: string, responseSize: number): Promise { + return new Promise((resolve, reject) => { + const connections = new Set(); + const server = net.createServer((socket) => { + connections.add(socket); + socket.on('close', () => connections.delete(socket)); + + let proxyHeaderParsed = false; + let pendingBuf = Buffer.alloc(0); + let responseSent = false; + + socket.on('data', (data: Buffer) => { + if (!proxyHeaderParsed) { + pendingBuf = Buffer.concat([pendingBuf, data]); + const idx = pendingBuf.indexOf('\r\n'); + if (idx !== -1) { + proxyHeaderParsed = true; + const remainder = pendingBuf.subarray(idx + 2); + if (remainder.length > 0 && !responseSent) { + responseSent = true; + sendLargeResponse(socket, responseSize); + } + } + return; + } + if (!responseSent) { + responseSent = true; + sendLargeResponse(socket, responseSize); + } + }); + socket.on('error', () => {}); + }) as TrackingServer; + + server.destroyAll = () => { + for (const conn of connections) conn.destroy(); + connections.clear(); + }; + + server.on('error', reject); + server.listen(port, host, () => resolve(server)); + }); +} + +function sendLargeResponse(socket: net.Socket, totalBytes: number) { + const chunkSize = 32 * 1024; + let sent = 0; + const writeChunk = () => { + while (sent < totalBytes) { + const toWrite = Math.min(chunkSize, totalBytes - sent); + // Use a deterministic pattern for verification + const chunk = Buffer.alloc(toWrite, (sent % 256) & 0xff); + const canContinue = socket.write(chunk); + sent += toWrite; + if (!canContinue) { + socket.once('drain', writeChunk); + return; + } + } + socket.end(); + }; + writeChunk(); +} + +/** Force-close a server: destroy all connections, then close. */ +async function forceCloseServer(server: TrackingServer): Promise { + server.destroyAll(); + await new Promise((resolve) => server.close(() => resolve())); +} + +interface TestTunnel { + hub: RemoteIngressHub; + edge: RemoteIngressEdge; + edgePort: number; + cleanup: () => Promise; +} + +/** + * Start a full hub + edge tunnel. + * Edge binds to 127.0.0.1, upstream server binds to 127.0.0.2. + * Hub targetHost = 127.0.0.2 so hub -> upstream doesn't loop back to edge. + */ +async function startTunnel(edgePort: number, hubPort: number): Promise { + const hub = new RemoteIngressHub(); + const edge = new RemoteIngressEdge(); + + await hub.start({ + tunnelPort: hubPort, + targetHost: '127.0.0.2', + }); + + await hub.updateAllowedEdges([ + { id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] }, + ]); + + const connectedPromise = new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000); + edge.once('tunnelConnected', () => { + clearTimeout(timeout); + resolve(); + }); + }); + + await edge.start({ + hubHost: '127.0.0.1', + hubPort, + edgeId: 'test-edge', + secret: 'test-secret', + bindAddress: '127.0.0.1', + }); + + await connectedPromise; + await new Promise((resolve) => setTimeout(resolve, 500)); + + return { + hub, + edge, + edgePort, + cleanup: async () => { + await edge.stop(); + await hub.stop(); + }, + }; +} + +/** + * Send data through the tunnel and collect the echoed response. + */ +function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + let totalReceived = 0; + const expectedLength = data.length; + let settled = false; + + const client = net.createConnection({ host: '127.0.0.1', port }, () => { + client.write(data); + client.end(); + }); + + const timer = setTimeout(() => { + if (!settled) { + settled = true; + client.destroy(); + reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`)); + } + }, timeoutMs); + + client.on('data', (chunk: Buffer) => { + chunks.push(chunk); + totalReceived += chunk.length; + if (totalReceived >= expectedLength && !settled) { + settled = true; + clearTimeout(timer); + client.destroy(); + resolve(Buffer.concat(chunks)); + } + }); + + client.on('end', () => { + if (!settled) { + settled = true; + clearTimeout(timer); + resolve(Buffer.concat(chunks)); + } + }); + + client.on('error', (err) => { + if (!settled) { + settled = true; + clearTimeout(timer); + reject(err); + } + }); + }); +} + +/** + * Connect to the tunnel, send a small request, and collect a large response. + * Does NOT call end() — the tunnel has no half-close. + * Instead, collects until expectedResponseSize bytes arrive. + */ +function sendAndReceiveLarge( + port: number, + data: Buffer, + expectedResponseSize: number, + timeoutMs = 60000, +): Promise { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + let totalReceived = 0; + let settled = false; + + const client = net.createConnection({ host: '127.0.0.1', port }, () => { + client.write(data); + // Do NOT call client.end() — the server will respond immediately + // and the tunnel CLOSE will happen when the download finishes + }); + + const timer = setTimeout(() => { + if (!settled) { + settled = true; + client.destroy(); + reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedResponseSize} bytes`)); + } + }, timeoutMs); + + client.on('data', (chunk: Buffer) => { + chunks.push(chunk); + totalReceived += chunk.length; + if (totalReceived >= expectedResponseSize && !settled) { + settled = true; + clearTimeout(timer); + client.destroy(); + resolve(Buffer.concat(chunks)); + } + }); + + client.on('end', () => { + if (!settled) { + settled = true; + clearTimeout(timer); + resolve(Buffer.concat(chunks)); + } + }); + + client.on('error', (err) => { + if (!settled) { + settled = true; + clearTimeout(timer); + reject(err); + } + }); + }); +} + +function sha256(buf: Buffer): string { + return crypto.createHash('sha256').update(buf).digest('hex'); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +let tunnel: TestTunnel; +let echoServer: TrackingServer; +let hubPort: number; +let edgePort: number; + +tap.test('setup: start echo server and tunnel', async () => { + [hubPort, edgePort] = await findFreePorts(2); + + echoServer = await startEchoServer(edgePort, '127.0.0.2'); + tunnel = await startTunnel(edgePort, hubPort); + + expect(tunnel.hub.running).toBeTrue(); +}); + +tap.test('single stream: 32MB transfer exceeding initial 4MB window', async () => { + const size = 32 * 1024 * 1024; + const data = crypto.randomBytes(size); + const expectedHash = sha256(data); + + const received = await sendAndReceive(edgePort, data, 60000); + + expect(received.length).toEqual(size); + expect(sha256(received)).toEqual(expectedHash); +}); + +tap.test('200 concurrent streams with 64KB each', async () => { + const streamCount = 200; + const payloadSize = 64 * 1024; + + const promises = Array.from({ length: streamCount }, () => { + const data = crypto.randomBytes(payloadSize); + const hash = sha256(data); + return sendAndReceive(edgePort, data, 30000).then((received) => ({ + sent: hash, + received: sha256(received), + sizeOk: received.length === payloadSize, + })); + }); + + const results = await Promise.all(promises); + const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received); + + expect(failures.length).toEqual(0); +}); + +tap.test('512 concurrent streams at minimum window boundary (16KB each)', async () => { + const streamCount = 512; + const payloadSize = 16 * 1024; + + const promises = Array.from({ length: streamCount }, () => { + const data = crypto.randomBytes(payloadSize); + const hash = sha256(data); + return sendAndReceive(edgePort, data, 60000).then((received) => ({ + sent: hash, + received: sha256(received), + sizeOk: received.length === payloadSize, + })); + }); + + const results = await Promise.all(promises); + const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received); + + expect(failures.length).toEqual(0); +}); + +tap.test('asymmetric transfer: 4KB request -> 4MB response', async () => { + // Swap to large-response server + await forceCloseServer(echoServer); + const responseSize = 4 * 1024 * 1024; // 4 MB + const largeServer = await startLargeResponseServer(edgePort, '127.0.0.2', responseSize); + + try { + const requestData = crypto.randomBytes(4 * 1024); // 4 KB + const received = await sendAndReceiveLarge(edgePort, requestData, responseSize, 60000); + expect(received.length).toEqual(responseSize); + } finally { + // Always restore echo server even on failure + await forceCloseServer(largeServer); + echoServer = await startEchoServer(edgePort, '127.0.0.2'); + } +}); + +tap.test('100 streams x 1MB each (100MB total exceeding 32MB budget)', async () => { + const streamCount = 100; + const payloadSize = 1 * 1024 * 1024; + + const promises = Array.from({ length: streamCount }, () => { + const data = crypto.randomBytes(payloadSize); + const hash = sha256(data); + return sendAndReceive(edgePort, data, 120000).then((received) => ({ + sent: hash, + received: sha256(received), + sizeOk: received.length === payloadSize, + })); + }); + + const results = await Promise.all(promises); + const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received); + + expect(failures.length).toEqual(0); +}); + +tap.test('active stream counter tracks concurrent connections', async () => { + const N = 50; + + // Open N connections and keep them alive (send data but don't close) + const sockets: net.Socket[] = []; + const connectPromises = Array.from({ length: N }, () => { + return new Promise((resolve, reject) => { + const sock = net.createConnection({ host: '127.0.0.1', port: edgePort }, () => { + resolve(sock); + }); + sock.on('error', () => {}); + setTimeout(() => reject(new Error('connect timeout')), 5000); + }); + }); + + const connected = await Promise.all(connectPromises); + sockets.push(...connected); + + // Brief delay for stream registration to propagate + await new Promise((resolve) => setTimeout(resolve, 500)); + + // Verify the edge reports >= N active streams. + // This counter is the input to compute_window_for_stream_count(), + // so its accuracy determines whether adaptive window sizing is correct. + const status = await tunnel.edge.getStatus(); + expect(status.activeStreams).toBeGreaterThanOrEqual(N); + + // Clean up: destroy all sockets (the tunnel's 300s stream timeout will handle cleanup) + for (const sock of sockets) { + sock.destroy(); + } +}); + +tap.test('50 streams x 2MB each (forces multiple window refills per stream)', async () => { + // At 50 concurrent streams: adaptive window = 32MB/50 = 655KB per stream + // Each stream sends 2MB → needs ~3 WINDOW_UPDATE refill cycles per stream + const streamCount = 50; + const payloadSize = 2 * 1024 * 1024; + + const promises = Array.from({ length: streamCount }, () => { + const data = crypto.randomBytes(payloadSize); + const hash = sha256(data); + return sendAndReceive(edgePort, data, 120000).then((received) => ({ + sent: hash, + received: sha256(received), + sizeOk: received.length === payloadSize, + })); + }); + + const results = await Promise.all(promises); + const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received); + + expect(failures.length).toEqual(0); +}); + +tap.test('teardown: stop tunnel and echo server', async () => { + await tunnel.cleanup(); + await forceCloseServer(echoServer); +}); + +export default tap.start(); diff --git a/ts/00_commitinfo_data.ts b/ts/00_commitinfo_data.ts index e2b2e05..e0d8984 100644 --- a/ts/00_commitinfo_data.ts +++ b/ts/00_commitinfo_data.ts @@ -3,6 +3,6 @@ */ export const commitinfo = { name: '@serve.zone/remoteingress', - version: '4.6.1', + version: '4.7.0', description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.' } diff --git a/ts/classes.remoteingressedge.ts b/ts/classes.remoteingressedge.ts index 77f792e..863e6a8 100644 --- a/ts/classes.remoteingressedge.ts +++ b/ts/classes.remoteingressedge.ts @@ -14,6 +14,7 @@ type TEdgeCommands = { hubPort: number; edgeId: string; secret: string; + bindAddress?: string; }; result: { started: boolean }; }; @@ -38,6 +39,7 @@ export interface IEdgeConfig { hubPort?: number; edgeId: string; secret: string; + bindAddress?: string; } const MAX_RESTART_ATTEMPTS = 10; @@ -132,6 +134,7 @@ export class RemoteIngressEdge extends EventEmitter { hubPort: edgeConfig.hubPort ?? 8443, edgeId: edgeConfig.edgeId, secret: edgeConfig.secret, + ...(edgeConfig.bindAddress ? { bindAddress: edgeConfig.bindAddress } : {}), }); this.started = true; @@ -227,6 +230,7 @@ export class RemoteIngressEdge extends EventEmitter { hubPort: this.savedConfig.hubPort ?? 8443, edgeId: this.savedConfig.edgeId, secret: this.savedConfig.secret, + ...(this.savedConfig.bindAddress ? { bindAddress: this.savedConfig.bindAddress } : {}), }); this.started = true;