Compare commits

...

16 Commits

Author SHA1 Message Date
236d6d16ee v4.8.12
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 13:27:26 +00:00
81bbb33016 fix(tunnel): prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown 2026-03-17 13:27:26 +00:00
79af6fd425 v4.8.11
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:57:04 +00:00
f71b2f1876 fix(remoteingress-core): stop data frame send loops promptly when stream cancellation is triggered 2026-03-17 12:57:04 +00:00
0161a2589c v4.8.10
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:47:03 +00:00
bfd9e58b4f fix(remoteingress-core): guard tunnel frame sends with cancellation to prevent async send deadlocks 2026-03-17 12:47:03 +00:00
9a8760c18d v4.8.9
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:35:15 +00:00
c77caa89fc fix(repo): no changes to commit 2026-03-17 12:35:15 +00:00
04586aab39 v4.8.8
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:33:28 +00:00
f9a739858d fix(remoteingress-core): cancel stale edge connections when an edge reconnects 2026-03-17 12:33:28 +00:00
da01fbeecd v4.8.7 2026-03-17 12:04:20 +00:00
264e8eeb97 fix(remoteingress-core): perform graceful TLS shutdown on edge and hub tunnel streams 2026-03-17 12:04:20 +00:00
9922c3b020 v4.8.6 2026-03-17 11:50:22 +00:00
38cde37cff fix(remoteingress-core): initialize disconnect reason only when set in hub loop break paths 2026-03-17 11:50:22 +00:00
64572827e5 v4.8.5 2026-03-17 11:48:44 +00:00
c4e26198b9 fix(repo): no changes to commit 2026-03-17 11:48:44 +00:00
7 changed files with 526 additions and 30 deletions

View File

@@ -1,5 +1,51 @@
# Changelog
## 2026-03-17 - 4.8.12 - fix(tunnel)
prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown
- stop self-waking and writing new frames while a flush is pending to avoid unbounded TLS session buffer growth under load
- reorder edge and hub shutdown cleanup so stream cancellation happens before TLS close_notify, preventing handlers from blocking on dead channels
- add load tests covering sustained large transfers, burst traffic, and rapid stream churn to verify tunnel stability
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
stop data frame send loops promptly when stream cancellation is triggered
- Use cancellation-aware tokio::select! around data channel sends in both edge and hub stream forwarding paths
- Prevent stalled or noisy shutdown behavior when stream or client cancellation happens while awaiting frame delivery
## 2026-03-17 - 4.8.10 - fix(remoteingress-core)
guard tunnel frame sends with cancellation to prevent async send deadlocks
- Wrap OPEN, CLOSE, CLOSE_BACK, WINDOW_UPDATE, and cleanup channel sends in cancellation-aware tokio::select! blocks.
- Avoid indefinite blocking when tunnel, stream, or writer tasks are cancelled while awaiting channel capacity.
- Improve shutdown reliability for edge and hub stream handling under tunnel failure conditions.
## 2026-03-17 - 4.8.9 - fix(repo)
no changes to commit
## 2026-03-17 - 4.8.8 - fix(remoteingress-core)
cancel stale edge connections when an edge reconnects
- Remove any existing edge entry before registering a reconnected edge
- Trigger the previous connection's cancellation token so stale sessions shut down immediately instead of waiting for TCP keepalive
## 2026-03-17 - 4.8.7 - fix(remoteingress-core)
perform graceful TLS shutdown on edge and hub tunnel streams
- Send TLS close_notify before cleanup to avoid peer disconnect warnings on both tunnel endpoints
- Wrap stream shutdown in a 2 second timeout so connection teardown does not block cleanup
## 2026-03-17 - 4.8.6 - fix(remoteingress-core)
initialize disconnect reason only when set in hub loop break paths
- Replace the default "unknown" disconnect reason with an explicitly assigned string and document that all hub loop exits set it before use
- Add an allow attribute for unused assignments to avoid warnings around the deferred initialization pattern
## 2026-03-17 - 4.8.5 - fix(repo)
no changes to commit
## 2026-03-17 - 4.8.4 - fix(remoteingress-core)
prevent stream stalls by guaranteeing flow-control updates and avoiding bounded per-stream channel overflows

View File

@@ -1,6 +1,6 @@
{
"name": "@serve.zone/remoteingress",
"version": "4.8.4",
"version": "4.8.12",
"private": false,
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
"main": "dist_ts/index.js",

View File

@@ -587,13 +587,23 @@ async fn connect_to_hub_and_run(
}
};
// Cleanup
// Cancel stream tokens FIRST so stream handlers exit immediately.
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
// for up to 2 seconds while the shutdown times out on a dead connection.
connection_token.cancel();
stun_handle.abort();
for (_, h) in port_listeners.drain() {
h.abort();
}
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect.
// Stream handlers are already cancelled, so no new data is being produced.
let mut tls_stream = tunnel_io.into_inner();
let _ = tokio::time::timeout(
Duration::from_secs(2),
tls_stream.shutdown(),
).await;
result
}
@@ -731,7 +741,11 @@ async fn handle_client_connection(
// Send OPEN frame with PROXY v1 header via control channel
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
if tunnel_ctrl_tx.send(open_frame).await.is_err() {
let send_ok = tokio::select! {
result = tunnel_ctrl_tx.send(open_frame) => result.is_ok(),
_ = client_token.cancelled() => false,
};
if !send_ok {
return;
}
@@ -806,7 +820,10 @@ async fn handle_client_connection(
// Send final window update for any remaining consumed bytes
if consumed_since_update > 0 {
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
let _ = wu_tx.send(frame).await;
tokio::select! {
_ = wu_tx.send(frame) => {}
_ = hub_to_client_token.cancelled() => {}
}
}
let _ = client_write.shutdown().await;
});
@@ -858,10 +875,11 @@ async fn handle_client_connection(
send_window.fetch_sub(n as u32, Ordering::Release);
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
let data_frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
if tunnel_data_tx.send(data_frame).await.is_err() {
log::warn!("Stream {} data channel closed, closing", stream_id);
break;
}
let sent = tokio::select! {
result = tunnel_data_tx.send(data_frame) => result.is_ok(),
_ = client_token.cancelled() => false,
};
if !sent { break; }
}
Err(_) => break,
}
@@ -882,9 +900,13 @@ async fn handle_client_connection(
).await;
// NOW send CLOSE — the response has been fully delivered (or timed out).
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
if !client_token.is_cancelled() {
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
let _ = tunnel_data_tx.send(close_frame).await;
tokio::select! {
_ = tunnel_data_tx.send(close_frame) => {}
_ = client_token.cancelled() => {}
}
}
// Clean up

View File

@@ -136,7 +136,7 @@ struct ConnectedEdgeInfo {
peer_addr: String,
edge_stream_count: Arc<AtomicU32>,
config_tx: mpsc::Sender<EdgeConfigUpdate>,
#[allow(dead_code)] // kept alive for Drop — cancels child tokens when edge is removed
/// Used to cancel the old connection when an edge reconnects.
cancel_token: CancellationToken,
}
@@ -445,7 +445,10 @@ async fn handle_hub_frame(
// Send final window update for remaining consumed bytes
if consumed_since_update > 0 {
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
let _ = wub_tx.send(frame).await;
tokio::select! {
_ = wub_tx.send(frame) => {}
_ = writer_token.cancelled() => {}
}
}
let _ = up_write.shutdown().await;
});
@@ -498,10 +501,11 @@ async fn handle_hub_frame(
send_window.fetch_sub(n as u32, Ordering::Release);
encode_frame_header(&mut buf, stream_id, FRAME_DATA_BACK, n);
let frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
if data_writer_tx.send(frame).await.is_err() {
log::warn!("Stream {} data channel closed, closing", stream_id);
break;
}
let sent = tokio::select! {
result = data_writer_tx.send(frame) => result.is_ok(),
_ = stream_token.cancelled() => false,
};
if !sent { break; }
}
Err(_) => break,
}
@@ -511,10 +515,13 @@ async fn handle_hub_frame(
}
// Send CLOSE_BACK via DATA channel (must arrive AFTER last DATA_BACK).
// Use send().await to guarantee delivery (try_send silently drops if full).
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
if !stream_token.is_cancelled() {
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
let _ = data_writer_tx.send(close_frame).await;
tokio::select! {
_ = data_writer_tx.send(close_frame) => {}
_ = stream_token.cancelled() => {}
}
}
writer_for_edge_data.abort();
@@ -525,15 +532,21 @@ async fn handle_hub_frame(
if let Err(e) = result {
log::error!("Stream {} error: {}", stream_id, e);
// Send CLOSE_BACK via DATA channel on error (must arrive after any DATA_BACK).
// Use send().await to guarantee delivery.
if !stream_token.is_cancelled() {
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
let _ = data_writer_tx.send(close_frame).await;
tokio::select! {
_ = data_writer_tx.send(close_frame) => {}
_ = stream_token.cancelled() => {}
}
}
}
// Signal main loop to remove stream from the map
let _ = cleanup.send(stream_id).await;
// Signal main loop to remove stream from the map.
// Cancellation guard prevents indefinite blocking if cleanup channel is full.
tokio::select! {
_ = cleanup.send(stream_id) => {}
_ = stream_token.cancelled() => {}
}
stream_counter.fetch_sub(1, Ordering::Relaxed);
});
}
@@ -677,6 +690,13 @@ async fn handle_edge_connection(
{
let mut edges = connected.lock().await;
// If this edge already has an active connection (reconnect scenario),
// cancel the old connection so it shuts down immediately instead of
// lingering until TCP keepalive detects the dead socket.
if let Some(old) = edges.remove(&edge_id) {
log::info!("Edge {} reconnected, cancelling old connection", edge_id);
old.cancel_token.cancel();
}
edges.insert(
edge_id.clone(),
ConnectedEdgeInfo {
@@ -735,7 +755,9 @@ async fn handle_edge_connection(
// Single-owner I/O engine — no tokio::io::split, no mutex
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
let mut disconnect_reason = "unknown".to_string();
// Assigned in every break path of the hub_loop before use at the end.
#[allow(unused_assignments)]
let mut disconnect_reason = String::new();
'hub_loop: loop {
// Drain completed stream cleanups from spawned tasks
@@ -822,9 +844,19 @@ async fn handle_edge_connection(
}
}
// Cleanup: cancel edge token to propagate to all child tasks
// Cancel stream tokens FIRST so stream handlers exit immediately.
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
// for up to 2 seconds while the shutdown times out on a dead connection.
edge_token.cancel();
config_handle.abort();
// Graceful TLS shutdown: send close_notify so the edge sees a clean disconnect.
// Stream handlers are already cancelled, so no new data is being produced.
let mut tls_stream = tunnel_io.into_inner();
let _ = tokio::time::timeout(
Duration::from_secs(2),
tls_stream.shutdown(),
).await;
{
let mut edges = connected.lock().await;
edges.remove(&edge_id);

View File

@@ -312,11 +312,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
cancel_token: &tokio_util::sync::CancellationToken,
) -> Poll<TunnelEvent> {
// 1. WRITE: drain ctrl queue first, then data queue.
// TLS poll_write writes plaintext to session buffer (always Ready).
// Batch up to 16 frames per poll cycle.
// Only write when flush is complete — otherwise the TLS session buffer
// grows without bound (poll_write always returns Ready, buffering plaintext
// in the TLS session even when TCP can't keep up).
// Safe: `self.write` and `self.stream` are disjoint fields.
let mut writes = 0;
while self.write.has_work() && writes < 16 {
while self.write.has_work() && writes < 16 && !self.write.flush_needed {
let from_ctrl = !self.write.ctrl_queue.is_empty();
let frame = if from_ctrl {
self.write.ctrl_queue.front().unwrap()
@@ -424,10 +425,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
return Poll::Ready(TunnelEvent::Cancelled);
}
// 6. SELF-WAKE: only when we have frames AND flush is done.
// 6. SELF-WAKE: only when flush is complete AND we have work.
// If flush is pending, the TCP write-readiness waker will notify us.
// If we got new channel frames, wake to write them.
if got_new || (!self.write.flush_needed && self.write.has_work()) {
// CRITICAL: do NOT self-wake when flush_needed — this causes unbounded
// TLS session buffer growth (poll_write always accepts plaintext, but TCP
// can't drain it fast enough → OOM → process killed → ECONNRESET).
if !self.write.flush_needed && (got_new || self.write.has_work()) {
cx.waker().wake_by_ref();
}

393
test/test.loadtest.node.ts Normal file
View File

@@ -0,0 +1,393 @@
import { expect, tap } from '@push.rocks/tapbundle';
import * as net from 'net';
import * as stream from 'stream';
import * as crypto from 'crypto';
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
// ---------------------------------------------------------------------------
// Helpers (self-contained — same patterns as test.flowcontrol.node.ts)
// ---------------------------------------------------------------------------
async function findFreePorts(count: number): Promise<number[]> {
const servers: net.Server[] = [];
const ports: number[] = [];
for (let i = 0; i < count; i++) {
const server = net.createServer();
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
ports.push((server.address() as net.AddressInfo).port);
servers.push(server);
}
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
return ports;
}
type TrackingServer = net.Server & { destroyAll: () => void };
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
return new Promise((resolve, reject) => {
const connections = new Set<net.Socket>();
const server = net.createServer((socket) => {
connections.add(socket);
socket.on('close', () => connections.delete(socket));
let proxyHeaderParsed = false;
let pendingBuf = Buffer.alloc(0);
socket.on('data', (data: Buffer) => {
if (!proxyHeaderParsed) {
pendingBuf = Buffer.concat([pendingBuf, data]);
const idx = pendingBuf.indexOf('\r\n');
if (idx !== -1) {
proxyHeaderParsed = true;
const remainder = pendingBuf.subarray(idx + 2);
if (remainder.length > 0) socket.write(remainder);
}
return;
}
socket.write(data);
});
socket.on('error', () => {});
}) as TrackingServer;
server.destroyAll = () => {
for (const conn of connections) conn.destroy();
connections.clear();
};
server.on('error', reject);
server.listen(port, host, () => resolve(server));
});
}
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
return new Promise((resolve, reject) => {
const chunks: Buffer[] = [];
let totalReceived = 0;
const expectedLength = data.length;
let settled = false;
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
client.write(data);
client.end();
});
const timer = setTimeout(() => {
if (!settled) {
settled = true;
client.destroy();
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
}
}, timeoutMs);
client.on('data', (chunk: Buffer) => {
chunks.push(chunk);
totalReceived += chunk.length;
if (totalReceived >= expectedLength && !settled) {
settled = true;
clearTimeout(timer);
client.destroy();
resolve(Buffer.concat(chunks));
}
});
client.on('end', () => {
if (!settled) {
settled = true;
clearTimeout(timer);
resolve(Buffer.concat(chunks));
}
});
client.on('error', (err) => {
if (!settled) {
settled = true;
clearTimeout(timer);
reject(err);
}
});
});
}
function sha256(buf: Buffer): string {
return crypto.createHash('sha256').update(buf).digest('hex');
}
// ---------------------------------------------------------------------------
// Throttle Proxy: rate-limits TCP traffic between edge and hub
// ---------------------------------------------------------------------------
class ThrottleTransform extends stream.Transform {
private bytesPerSec: number;
private bucket: number;
private lastRefill: number;
private destroyed_: boolean = false;
constructor(bytesPerSecond: number) {
super();
this.bytesPerSec = bytesPerSecond;
this.bucket = bytesPerSecond;
this.lastRefill = Date.now();
}
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: stream.TransformCallback) {
if (this.destroyed_) return;
const now = Date.now();
const elapsed = (now - this.lastRefill) / 1000;
this.bucket = Math.min(this.bytesPerSec, this.bucket + elapsed * this.bytesPerSec);
this.lastRefill = now;
if (chunk.length <= this.bucket) {
this.bucket -= chunk.length;
callback(null, chunk);
} else {
// Not enough budget — delay the entire chunk (don't split)
const deficit = chunk.length - this.bucket;
this.bucket = 0;
const delayMs = Math.min((deficit / this.bytesPerSec) * 1000, 1000);
setTimeout(() => {
if (this.destroyed_) return;
this.lastRefill = Date.now();
this.bucket = 0;
callback(null, chunk);
}, delayMs);
}
}
_destroy(err: Error | null, callback: (error: Error | null) => void) {
this.destroyed_ = true;
callback(err);
}
}
interface ThrottleProxy {
server: net.Server;
close: () => Promise<void>;
}
async function startThrottleProxy(
listenPort: number,
targetHost: string,
targetPort: number,
bytesPerSecond: number,
): Promise<ThrottleProxy> {
const connections = new Set<net.Socket>();
const server = net.createServer((clientSock) => {
connections.add(clientSock);
const upstream = net.createConnection({ host: targetHost, port: targetPort });
connections.add(upstream);
const throttleUp = new ThrottleTransform(bytesPerSecond);
const throttleDown = new ThrottleTransform(bytesPerSecond);
clientSock.pipe(throttleUp).pipe(upstream);
upstream.pipe(throttleDown).pipe(clientSock);
const cleanup = () => {
throttleUp.destroy();
throttleDown.destroy();
clientSock.destroy();
upstream.destroy();
connections.delete(clientSock);
connections.delete(upstream);
};
clientSock.on('error', cleanup);
upstream.on('error', cleanup);
throttleUp.on('error', cleanup);
throttleDown.on('error', cleanup);
clientSock.on('close', cleanup);
upstream.on('close', cleanup);
});
await new Promise<void>((resolve) => server.listen(listenPort, '127.0.0.1', resolve));
return {
server,
close: async () => {
for (const c of connections) c.destroy();
connections.clear();
await new Promise<void>((resolve) => server.close(() => resolve()));
},
};
}
// ---------------------------------------------------------------------------
// Test state
// ---------------------------------------------------------------------------
let hub: RemoteIngressHub;
let edge: RemoteIngressEdge;
let echoServer: TrackingServer;
let throttle: ThrottleProxy;
let hubPort: number;
let proxyPort: number;
let edgePort: number;
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
tap.test('setup: start throttled tunnel (20 Mbit/s)', async () => {
[hubPort, proxyPort, edgePort] = await findFreePorts(3);
echoServer = await startEchoServer(edgePort, '127.0.0.2');
// Throttle proxy: edge → proxy → hub at 20 Mbit/s (2.5 MB/s)
throttle = await startThrottleProxy(proxyPort, '127.0.0.1', hubPort, 2.5 * 1024 * 1024);
hub = new RemoteIngressHub();
edge = new RemoteIngressEdge();
await hub.start({ tunnelPort: hubPort, targetHost: '127.0.0.2' });
await hub.updateAllowedEdges([
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
]);
const connectedPromise = new Promise<void>((resolve, reject) => {
const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000);
edge.once('tunnelConnected', () => {
clearTimeout(timeout);
resolve();
});
});
// Edge connects to proxy, not hub directly
await edge.start({
hubHost: '127.0.0.1',
hubPort: proxyPort,
edgeId: 'test-edge',
secret: 'test-secret',
bindAddress: '127.0.0.1',
});
await connectedPromise;
await new Promise((resolve) => setTimeout(resolve, 500));
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
});
tap.test('throttled: 10 streams x 50MB each through 10MB/s tunnel', async () => {
const streamCount = 10;
const payloadSize = 50 * 1024 * 1024; // 50MB per stream = 500MB total round-trip
const promises = Array.from({ length: streamCount }, () => {
const data = crypto.randomBytes(payloadSize);
const hash = sha256(data);
return sendAndReceive(edgePort, data, 300000).then((received) => ({
sent: hash,
received: sha256(received),
sizeOk: received.length === payloadSize,
}));
});
const results = await Promise.all(promises);
const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received);
expect(failures.length).toEqual(0);
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
});
tap.test('throttled: slow consumer with 50MB does not kill other streams', async () => {
// Open a connection that creates massive download-direction backpressure:
// send 50MB but DON'T read the response — client TCP receive buffer fills
const slowSock = net.createConnection({ host: '127.0.0.1', port: edgePort });
await new Promise<void>((resolve) => slowSock.on('connect', resolve));
const slowData = crypto.randomBytes(50 * 1024 * 1024);
slowSock.write(slowData);
slowSock.end();
// Don't read — backpressure builds on the download path
// Wait for backpressure to develop
await new Promise((r) => setTimeout(r, 3000));
// Meanwhile, 10 normal echo streams with 50MB each must complete
const payload = crypto.randomBytes(50 * 1024 * 1024);
const hash = sha256(payload);
const promises = Array.from({ length: 10 }, () =>
sendAndReceive(edgePort, payload, 300000).then((r) => ({
hash: sha256(r),
sizeOk: r.length === payload.length,
}))
);
const results = await Promise.all(promises);
const failures = results.filter((r) => !r.sizeOk || r.hash !== hash);
expect(failures.length).toEqual(0);
// Tunnel still alive
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
slowSock.destroy();
});
tap.test('throttled: rapid churn — 5 x 50MB long + 200 x 1MB short streams', async () => {
// 5 long streams (50MB each) running alongside 200 short streams (1MB each)
const longPayload = crypto.randomBytes(50 * 1024 * 1024);
const longHash = sha256(longPayload);
const longPromises = Array.from({ length: 5 }, () =>
sendAndReceive(edgePort, longPayload, 300000).then((r) => ({
hash: sha256(r),
sizeOk: r.length === longPayload.length,
}))
);
const shortPayload = crypto.randomBytes(1024 * 1024);
const shortHash = sha256(shortPayload);
const shortPromises = Array.from({ length: 200 }, () =>
sendAndReceive(edgePort, shortPayload, 300000).then((r) => ({
hash: sha256(r),
sizeOk: r.length === shortPayload.length,
}))
);
const [longResults, shortResults] = await Promise.all([
Promise.all(longPromises),
Promise.all(shortPromises),
]);
const longFails = longResults.filter((r) => !r.sizeOk || r.hash !== longHash);
const shortFails = shortResults.filter((r) => !r.sizeOk || r.hash !== shortHash);
expect(longFails.length).toEqual(0);
expect(shortFails.length).toEqual(0);
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
});
tap.test('throttled: 5 burst waves of 20 streams x 50MB each', async () => {
for (let wave = 0; wave < 5; wave++) {
const streamCount = 20;
const payloadSize = 50 * 1024 * 1024; // 50MB per stream = 1GB per wave
const promises = Array.from({ length: streamCount }, () => {
const data = crypto.randomBytes(payloadSize);
return sendAndReceive(edgePort, data, 300000).then((r) => r.length === payloadSize);
});
const results = await Promise.all(promises);
const ok = results.filter(Boolean).length;
expect(ok).toEqual(streamCount);
// Brief pause between waves
await new Promise((r) => setTimeout(r, 500));
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
}
});
tap.test('throttled: tunnel still works after all load tests', async () => {
const data = crypto.randomBytes(1024);
const hash = sha256(data);
const received = await sendAndReceive(edgePort, data, 30000);
expect(sha256(received)).toEqual(hash);
const status = await edge.getStatus();
expect(status.connected).toBeTrue();
});
tap.test('teardown: stop tunnel', async () => {
await edge.stop();
await hub.stop();
if (throttle) await throttle.close();
await new Promise<void>((resolve) => echoServer.close(() => resolve()));
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@serve.zone/remoteingress',
version: '4.8.4',
version: '4.8.12',
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
}