Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e8d429f117 | |||
| 3c2299430a | |||
| 8b5df9a0b7 | |||
| 236d6d16ee | |||
| 81bbb33016 | |||
| 79af6fd425 | |||
| f71b2f1876 | |||
| 0161a2589c | |||
| bfd9e58b4f | |||
| 9a8760c18d | |||
| c77caa89fc | |||
| 04586aab39 | |||
| f9a739858d | |||
| da01fbeecd | |||
| 264e8eeb97 | |||
| 9922c3b020 | |||
| 38cde37cff |
49
changelog.md
49
changelog.md
@@ -1,5 +1,54 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-03-17 - 4.8.13 - fix(remoteingress-protocol)
|
||||
require a flush after each written frame to bound TLS buffer growth
|
||||
|
||||
- Remove the unflushed byte threshold and stop queueing additional writes while a flush is pending
|
||||
- Simplify write and flush error logging after dropping unflushed byte tracking
|
||||
- Update tunnel I/O comments to reflect the stricter flush behavior that avoids OOM and connection resets
|
||||
|
||||
## 2026-03-17 - 4.8.12 - fix(tunnel)
|
||||
prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown
|
||||
|
||||
- stop self-waking and writing new frames while a flush is pending to avoid unbounded TLS session buffer growth under load
|
||||
- reorder edge and hub shutdown cleanup so stream cancellation happens before TLS close_notify, preventing handlers from blocking on dead channels
|
||||
- add load tests covering sustained large transfers, burst traffic, and rapid stream churn to verify tunnel stability
|
||||
|
||||
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
|
||||
stop data frame send loops promptly when stream cancellation is triggered
|
||||
|
||||
- Use cancellation-aware tokio::select! around data channel sends in both edge and hub stream forwarding paths
|
||||
- Prevent stalled or noisy shutdown behavior when stream or client cancellation happens while awaiting frame delivery
|
||||
|
||||
## 2026-03-17 - 4.8.10 - fix(remoteingress-core)
|
||||
guard tunnel frame sends with cancellation to prevent async send deadlocks
|
||||
|
||||
- Wrap OPEN, CLOSE, CLOSE_BACK, WINDOW_UPDATE, and cleanup channel sends in cancellation-aware tokio::select! blocks.
|
||||
- Avoid indefinite blocking when tunnel, stream, or writer tasks are cancelled while awaiting channel capacity.
|
||||
- Improve shutdown reliability for edge and hub stream handling under tunnel failure conditions.
|
||||
|
||||
## 2026-03-17 - 4.8.9 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-17 - 4.8.8 - fix(remoteingress-core)
|
||||
cancel stale edge connections when an edge reconnects
|
||||
|
||||
- Remove any existing edge entry before registering a reconnected edge
|
||||
- Trigger the previous connection's cancellation token so stale sessions shut down immediately instead of waiting for TCP keepalive
|
||||
|
||||
## 2026-03-17 - 4.8.7 - fix(remoteingress-core)
|
||||
perform graceful TLS shutdown on edge and hub tunnel streams
|
||||
|
||||
- Send TLS close_notify before cleanup to avoid peer disconnect warnings on both tunnel endpoints
|
||||
- Wrap stream shutdown in a 2 second timeout so connection teardown does not block cleanup
|
||||
|
||||
## 2026-03-17 - 4.8.6 - fix(remoteingress-core)
|
||||
initialize disconnect reason only when set in hub loop break paths
|
||||
|
||||
- Replace the default "unknown" disconnect reason with an explicitly assigned string and document that all hub loop exits set it before use
|
||||
- Add an allow attribute for unused assignments to avoid warnings around the deferred initialization pattern
|
||||
|
||||
## 2026-03-17 - 4.8.5 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@serve.zone/remoteingress",
|
||||
"version": "4.8.5",
|
||||
"version": "4.8.13",
|
||||
"private": false,
|
||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
||||
"main": "dist_ts/index.js",
|
||||
|
||||
@@ -519,6 +519,7 @@ async fn connect_to_hub_and_run(
|
||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||
|
||||
|
||||
let liveness_timeout_dur = Duration::from_secs(45);
|
||||
let mut last_activity = Instant::now();
|
||||
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
||||
@@ -587,13 +588,23 @@ async fn connect_to_hub_and_run(
|
||||
}
|
||||
};
|
||||
|
||||
// Cleanup
|
||||
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||
connection_token.cancel();
|
||||
stun_handle.abort();
|
||||
for (_, h) in port_listeners.drain() {
|
||||
h.abort();
|
||||
}
|
||||
|
||||
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect.
|
||||
// Stream handlers are already cancelled, so no new data is being produced.
|
||||
let mut tls_stream = tunnel_io.into_inner();
|
||||
let _ = tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
tls_stream.shutdown(),
|
||||
).await;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
@@ -731,7 +742,11 @@ async fn handle_client_connection(
|
||||
// Send OPEN frame with PROXY v1 header via control channel
|
||||
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
||||
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
||||
if tunnel_ctrl_tx.send(open_frame).await.is_err() {
|
||||
let send_ok = tokio::select! {
|
||||
result = tunnel_ctrl_tx.send(open_frame) => result.is_ok(),
|
||||
_ = client_token.cancelled() => false,
|
||||
};
|
||||
if !send_ok {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -806,7 +821,10 @@ async fn handle_client_connection(
|
||||
// Send final window update for any remaining consumed bytes
|
||||
if consumed_since_update > 0 {
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
||||
let _ = wu_tx.send(frame).await;
|
||||
tokio::select! {
|
||||
_ = wu_tx.send(frame) => {}
|
||||
_ = hub_to_client_token.cancelled() => {}
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
});
|
||||
@@ -858,10 +876,11 @@ async fn handle_client_connection(
|
||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
|
||||
let data_frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
||||
if tunnel_data_tx.send(data_frame).await.is_err() {
|
||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
||||
break;
|
||||
}
|
||||
let sent = tokio::select! {
|
||||
result = tunnel_data_tx.send(data_frame) => result.is_ok(),
|
||||
_ = client_token.cancelled() => false,
|
||||
};
|
||||
if !sent { break; }
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
@@ -882,9 +901,13 @@ async fn handle_client_connection(
|
||||
).await;
|
||||
|
||||
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
||||
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
|
||||
if !client_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||
let _ = tunnel_data_tx.send(close_frame).await;
|
||||
tokio::select! {
|
||||
_ = tunnel_data_tx.send(close_frame) => {}
|
||||
_ = client_token.cancelled() => {}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
|
||||
@@ -136,7 +136,7 @@ struct ConnectedEdgeInfo {
|
||||
peer_addr: String,
|
||||
edge_stream_count: Arc<AtomicU32>,
|
||||
config_tx: mpsc::Sender<EdgeConfigUpdate>,
|
||||
#[allow(dead_code)] // kept alive for Drop — cancels child tokens when edge is removed
|
||||
/// Used to cancel the old connection when an edge reconnects.
|
||||
cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
@@ -445,7 +445,10 @@ async fn handle_hub_frame(
|
||||
// Send final window update for remaining consumed bytes
|
||||
if consumed_since_update > 0 {
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
|
||||
let _ = wub_tx.send(frame).await;
|
||||
tokio::select! {
|
||||
_ = wub_tx.send(frame) => {}
|
||||
_ = writer_token.cancelled() => {}
|
||||
}
|
||||
}
|
||||
let _ = up_write.shutdown().await;
|
||||
});
|
||||
@@ -498,10 +501,11 @@ async fn handle_hub_frame(
|
||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||
encode_frame_header(&mut buf, stream_id, FRAME_DATA_BACK, n);
|
||||
let frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
||||
if data_writer_tx.send(frame).await.is_err() {
|
||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
||||
break;
|
||||
}
|
||||
let sent = tokio::select! {
|
||||
result = data_writer_tx.send(frame) => result.is_ok(),
|
||||
_ = stream_token.cancelled() => false,
|
||||
};
|
||||
if !sent { break; }
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
@@ -511,10 +515,13 @@ async fn handle_hub_frame(
|
||||
}
|
||||
|
||||
// Send CLOSE_BACK via DATA channel (must arrive AFTER last DATA_BACK).
|
||||
// Use send().await to guarantee delivery (try_send silently drops if full).
|
||||
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
|
||||
if !stream_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let _ = data_writer_tx.send(close_frame).await;
|
||||
tokio::select! {
|
||||
_ = data_writer_tx.send(close_frame) => {}
|
||||
_ = stream_token.cancelled() => {}
|
||||
}
|
||||
}
|
||||
|
||||
writer_for_edge_data.abort();
|
||||
@@ -525,15 +532,21 @@ async fn handle_hub_frame(
|
||||
if let Err(e) = result {
|
||||
log::error!("Stream {} error: {}", stream_id, e);
|
||||
// Send CLOSE_BACK via DATA channel on error (must arrive after any DATA_BACK).
|
||||
// Use send().await to guarantee delivery.
|
||||
if !stream_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let _ = data_writer_tx.send(close_frame).await;
|
||||
tokio::select! {
|
||||
_ = data_writer_tx.send(close_frame) => {}
|
||||
_ = stream_token.cancelled() => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal main loop to remove stream from the map
|
||||
let _ = cleanup.send(stream_id).await;
|
||||
// Signal main loop to remove stream from the map.
|
||||
// Cancellation guard prevents indefinite blocking if cleanup channel is full.
|
||||
tokio::select! {
|
||||
_ = cleanup.send(stream_id) => {}
|
||||
_ = stream_token.cancelled() => {}
|
||||
}
|
||||
stream_counter.fetch_sub(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
@@ -677,6 +690,13 @@ async fn handle_edge_connection(
|
||||
|
||||
{
|
||||
let mut edges = connected.lock().await;
|
||||
// If this edge already has an active connection (reconnect scenario),
|
||||
// cancel the old connection so it shuts down immediately instead of
|
||||
// lingering until TCP keepalive detects the dead socket.
|
||||
if let Some(old) = edges.remove(&edge_id) {
|
||||
log::info!("Edge {} reconnected, cancelling old connection", edge_id);
|
||||
old.cancel_token.cancel();
|
||||
}
|
||||
edges.insert(
|
||||
edge_id.clone(),
|
||||
ConnectedEdgeInfo {
|
||||
@@ -735,7 +755,10 @@ async fn handle_edge_connection(
|
||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||
|
||||
let mut disconnect_reason = "unknown".to_string();
|
||||
|
||||
// Assigned in every break path of the hub_loop before use at the end.
|
||||
#[allow(unused_assignments)]
|
||||
let mut disconnect_reason = String::new();
|
||||
|
||||
'hub_loop: loop {
|
||||
// Drain completed stream cleanups from spawned tasks
|
||||
@@ -822,9 +845,19 @@ async fn handle_edge_connection(
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup: cancel edge token to propagate to all child tasks
|
||||
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||
edge_token.cancel();
|
||||
config_handle.abort();
|
||||
|
||||
// Graceful TLS shutdown: send close_notify so the edge sees a clean disconnect.
|
||||
// Stream handlers are already cancelled, so no new data is being produced.
|
||||
let mut tls_stream = tunnel_io.into_inner();
|
||||
let _ = tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
tls_stream.shutdown(),
|
||||
).await;
|
||||
{
|
||||
let mut edges = connected.lock().await;
|
||||
edges.remove(&edge_id);
|
||||
|
||||
@@ -312,11 +312,11 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
cancel_token: &tokio_util::sync::CancellationToken,
|
||||
) -> Poll<TunnelEvent> {
|
||||
// 1. WRITE: drain ctrl queue first, then data queue.
|
||||
// TLS poll_write writes plaintext to session buffer (always Ready).
|
||||
// Batch up to 16 frames per poll cycle.
|
||||
// Write one frame, set flush_needed, then flush must complete before
|
||||
// writing more. This prevents unbounded TLS session buffer growth.
|
||||
// Safe: `self.write` and `self.stream` are disjoint fields.
|
||||
let mut writes = 0;
|
||||
while self.write.has_work() && writes < 16 {
|
||||
while self.write.has_work() && writes < 16 && !self.write.flush_needed {
|
||||
let from_ctrl = !self.write.ctrl_queue.is_empty();
|
||||
let frame = if from_ctrl {
|
||||
self.write.ctrl_queue.front().unwrap()
|
||||
@@ -327,6 +327,8 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
|
||||
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
||||
Poll::Ready(Ok(0)) => {
|
||||
log::error!("TunnelIo: poll_write returned 0 (write zero), ctrl_q={} data_q={}",
|
||||
self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||
return Poll::Ready(TunnelEvent::WriteError(
|
||||
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
||||
));
|
||||
@@ -341,7 +343,11 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
writes += 1;
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::error!("TunnelIo: poll_write error: {} (ctrl_q={} data_q={})",
|
||||
e, self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
@@ -349,8 +355,13 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
||||
if self.write.flush_needed {
|
||||
match Pin::new(&mut self.stream).poll_flush(cx) {
|
||||
Poll::Ready(Ok(())) => self.write.flush_needed = false,
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
||||
Poll::Ready(Ok(())) => {
|
||||
self.write.flush_needed = false;
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::error!("TunnelIo: poll_flush error: {}", e);
|
||||
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||
}
|
||||
Poll::Pending => {} // TCP waker will notify us
|
||||
}
|
||||
}
|
||||
@@ -386,12 +397,19 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
// Partial data — loop to call poll_read again so the TCP
|
||||
// waker is re-registered when it finally returns Pending.
|
||||
}
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::ReadError(e)),
|
||||
Poll::Ready(Err(e)) => {
|
||||
log::error!("TunnelIo: poll_read error: {}", e);
|
||||
return Poll::Ready(TunnelEvent::ReadError(e));
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
// 4. CHANNELS: drain ctrl into ctrl_queue, data into data_queue.
|
||||
// 4. CHANNELS: drain ctrl (always — priority), data (only if queue is small).
|
||||
// Ctrl frames must never be delayed — always drain fully.
|
||||
// Data frames are gated: keep data in the bounded channel for proper
|
||||
// backpressure when TLS writes are slow. Without this gate, the internal
|
||||
// data_queue (unbounded VecDeque) grows to hundreds of MB under throttle → OOM.
|
||||
let mut got_new = false;
|
||||
loop {
|
||||
match ctrl_rx.poll_recv(cx) {
|
||||
@@ -404,15 +422,17 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
loop {
|
||||
match data_rx.poll_recv(cx) {
|
||||
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
||||
Poll::Ready(None) => {
|
||||
return Poll::Ready(TunnelEvent::WriteError(
|
||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
||||
));
|
||||
if self.write.data_queue.len() < 64 {
|
||||
loop {
|
||||
match data_rx.poll_recv(cx) {
|
||||
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
||||
Poll::Ready(None) => {
|
||||
return Poll::Ready(TunnelEvent::WriteError(
|
||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
||||
));
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,10 +444,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||
return Poll::Ready(TunnelEvent::Cancelled);
|
||||
}
|
||||
|
||||
// 6. SELF-WAKE: only when we have frames AND flush is done.
|
||||
// If flush is pending, the TCP write-readiness waker will notify us.
|
||||
// If we got new channel frames, wake to write them.
|
||||
if got_new || (!self.write.flush_needed && self.write.has_work()) {
|
||||
// 6. SELF-WAKE: only when flush is complete AND we have work.
|
||||
// When flush is Pending, the TCP write-readiness waker will notify us.
|
||||
// CRITICAL: do NOT self-wake when flush_needed — poll_write always returns
|
||||
// Ready (TLS buffers in-memory), so self-waking causes a tight spin loop
|
||||
// that fills the TLS session buffer unboundedly -> OOM -> ECONNRESET.
|
||||
if !self.write.flush_needed && (got_new || self.write.has_work()) {
|
||||
cx.waker().wake_by_ref();
|
||||
}
|
||||
|
||||
|
||||
402
test/test.loadtest.node.ts
Normal file
402
test/test.loadtest.node.ts
Normal file
@@ -0,0 +1,402 @@
|
||||
import { expect, tap } from '@push.rocks/tapbundle';
|
||||
import * as net from 'net';
|
||||
import * as stream from 'stream';
|
||||
import * as crypto from 'crypto';
|
||||
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers (self-contained — same patterns as test.flowcontrol.node.ts)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function findFreePorts(count: number): Promise<number[]> {
|
||||
const servers: net.Server[] = [];
|
||||
const ports: number[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const server = net.createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
|
||||
ports.push((server.address() as net.AddressInfo).port);
|
||||
servers.push(server);
|
||||
}
|
||||
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
|
||||
return ports;
|
||||
}
|
||||
|
||||
type TrackingServer = net.Server & { destroyAll: () => void };
|
||||
|
||||
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const connections = new Set<net.Socket>();
|
||||
const server = net.createServer((socket) => {
|
||||
connections.add(socket);
|
||||
socket.on('close', () => connections.delete(socket));
|
||||
let proxyHeaderParsed = false;
|
||||
let pendingBuf = Buffer.alloc(0);
|
||||
socket.on('data', (data: Buffer) => {
|
||||
if (!proxyHeaderParsed) {
|
||||
pendingBuf = Buffer.concat([pendingBuf, data]);
|
||||
const idx = pendingBuf.indexOf('\r\n');
|
||||
if (idx !== -1) {
|
||||
proxyHeaderParsed = true;
|
||||
const remainder = pendingBuf.subarray(idx + 2);
|
||||
if (remainder.length > 0) socket.write(remainder);
|
||||
}
|
||||
return;
|
||||
}
|
||||
socket.write(data);
|
||||
});
|
||||
socket.on('error', () => {});
|
||||
}) as TrackingServer;
|
||||
server.destroyAll = () => {
|
||||
for (const conn of connections) conn.destroy();
|
||||
connections.clear();
|
||||
};
|
||||
server.on('error', reject);
|
||||
server.listen(port, host, () => resolve(server));
|
||||
});
|
||||
}
|
||||
|
||||
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
let totalReceived = 0;
|
||||
const expectedLength = data.length;
|
||||
let settled = false;
|
||||
|
||||
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
|
||||
client.write(data);
|
||||
client.end();
|
||||
});
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
client.destroy();
|
||||
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
|
||||
}
|
||||
}, timeoutMs);
|
||||
|
||||
client.on('data', (chunk: Buffer) => {
|
||||
chunks.push(chunk);
|
||||
totalReceived += chunk.length;
|
||||
if (totalReceived >= expectedLength && !settled) {
|
||||
settled = true;
|
||||
clearTimeout(timer);
|
||||
client.destroy();
|
||||
resolve(Buffer.concat(chunks));
|
||||
}
|
||||
});
|
||||
|
||||
client.on('end', () => {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
clearTimeout(timer);
|
||||
resolve(Buffer.concat(chunks));
|
||||
}
|
||||
});
|
||||
|
||||
client.on('error', (err) => {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
clearTimeout(timer);
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sha256(buf: Buffer): string {
|
||||
return crypto.createHash('sha256').update(buf).digest('hex');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Throttle Proxy: rate-limits TCP traffic between edge and hub
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
class ThrottleTransform extends stream.Transform {
|
||||
private bytesPerSec: number;
|
||||
private bucket: number;
|
||||
private lastRefill: number;
|
||||
private destroyed_: boolean = false;
|
||||
|
||||
constructor(bytesPerSecond: number) {
|
||||
super();
|
||||
this.bytesPerSec = bytesPerSecond;
|
||||
this.bucket = bytesPerSecond;
|
||||
this.lastRefill = Date.now();
|
||||
}
|
||||
|
||||
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: stream.TransformCallback) {
|
||||
if (this.destroyed_) return;
|
||||
|
||||
const now = Date.now();
|
||||
const elapsed = (now - this.lastRefill) / 1000;
|
||||
this.bucket = Math.min(this.bytesPerSec, this.bucket + elapsed * this.bytesPerSec);
|
||||
this.lastRefill = now;
|
||||
|
||||
if (chunk.length <= this.bucket) {
|
||||
this.bucket -= chunk.length;
|
||||
callback(null, chunk);
|
||||
} else {
|
||||
// Not enough budget — delay the entire chunk (don't split)
|
||||
const deficit = chunk.length - this.bucket;
|
||||
this.bucket = 0;
|
||||
const delayMs = Math.min((deficit / this.bytesPerSec) * 1000, 1000);
|
||||
setTimeout(() => {
|
||||
if (this.destroyed_) { callback(); return; }
|
||||
this.lastRefill = Date.now();
|
||||
this.bucket = 0;
|
||||
callback(null, chunk);
|
||||
}, delayMs);
|
||||
}
|
||||
}
|
||||
|
||||
_destroy(err: Error | null, callback: (error: Error | null) => void) {
|
||||
this.destroyed_ = true;
|
||||
callback(err);
|
||||
}
|
||||
}
|
||||
|
||||
interface ThrottleProxy {
|
||||
server: net.Server;
|
||||
close: () => Promise<void>;
|
||||
}
|
||||
|
||||
async function startThrottleProxy(
|
||||
listenPort: number,
|
||||
targetHost: string,
|
||||
targetPort: number,
|
||||
bytesPerSecond: number,
|
||||
): Promise<ThrottleProxy> {
|
||||
const connections = new Set<net.Socket>();
|
||||
const server = net.createServer((clientSock) => {
|
||||
connections.add(clientSock);
|
||||
const upstream = net.createConnection({ host: targetHost, port: targetPort });
|
||||
connections.add(upstream);
|
||||
|
||||
const throttleUp = new ThrottleTransform(bytesPerSecond);
|
||||
const throttleDown = new ThrottleTransform(bytesPerSecond);
|
||||
|
||||
clientSock.pipe(throttleUp).pipe(upstream);
|
||||
upstream.pipe(throttleDown).pipe(clientSock);
|
||||
|
||||
let cleaned = false;
|
||||
const cleanup = (source: string, err?: Error) => {
|
||||
if (cleaned) return;
|
||||
cleaned = true;
|
||||
if (err) {
|
||||
console.error(`[ThrottleProxy] cleanup triggered by ${source}: ${err.message}`);
|
||||
} else {
|
||||
console.error(`[ThrottleProxy] cleanup triggered by ${source} (no error)`);
|
||||
}
|
||||
console.error(`[ThrottleProxy] stack:`, new Error().stack);
|
||||
throttleUp.destroy();
|
||||
throttleDown.destroy();
|
||||
clientSock.destroy();
|
||||
upstream.destroy();
|
||||
connections.delete(clientSock);
|
||||
connections.delete(upstream);
|
||||
};
|
||||
clientSock.on('error', (e) => cleanup('clientSock.error', e));
|
||||
upstream.on('error', (e) => cleanup('upstream.error', e));
|
||||
throttleUp.on('error', (e) => cleanup('throttleUp.error', e));
|
||||
throttleDown.on('error', (e) => cleanup('throttleDown.error', e));
|
||||
clientSock.on('close', () => cleanup('clientSock.close'));
|
||||
upstream.on('close', () => cleanup('upstream.close'));
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => server.listen(listenPort, '127.0.0.1', resolve));
|
||||
return {
|
||||
server,
|
||||
close: async () => {
|
||||
for (const c of connections) c.destroy();
|
||||
connections.clear();
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test state
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let hub: RemoteIngressHub;
|
||||
let edge: RemoteIngressEdge;
|
||||
let echoServer: TrackingServer;
|
||||
let throttle: ThrottleProxy;
|
||||
let hubPort: number;
|
||||
let proxyPort: number;
|
||||
let edgePort: number;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
tap.test('setup: start throttled tunnel (100 Mbit/s)', async () => {
|
||||
[hubPort, proxyPort, edgePort] = await findFreePorts(3);
|
||||
|
||||
echoServer = await startEchoServer(edgePort, '127.0.0.2');
|
||||
|
||||
// Throttle proxy: edge → proxy → hub at 100 Mbit/s (12.5 MB/s)
|
||||
throttle = await startThrottleProxy(proxyPort, '127.0.0.1', hubPort, 12.5 * 1024 * 1024);
|
||||
|
||||
hub = new RemoteIngressHub();
|
||||
edge = new RemoteIngressEdge();
|
||||
|
||||
await hub.start({ tunnelPort: hubPort, targetHost: '127.0.0.2' });
|
||||
await hub.updateAllowedEdges([
|
||||
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
|
||||
]);
|
||||
|
||||
const connectedPromise = new Promise<void>((resolve, reject) => {
|
||||
const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000);
|
||||
edge.once('tunnelConnected', () => {
|
||||
clearTimeout(timeout);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Edge connects through throttle proxy
|
||||
await edge.start({
|
||||
hubHost: '127.0.0.1',
|
||||
hubPort: proxyPort,
|
||||
edgeId: 'test-edge',
|
||||
secret: 'test-secret',
|
||||
bindAddress: '127.0.0.1',
|
||||
});
|
||||
|
||||
await connectedPromise;
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('throttled: 5 streams x 20MB each through 100Mbit tunnel', async () => {
|
||||
const streamCount = 5;
|
||||
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB total round-trip
|
||||
|
||||
const payloads = Array.from({ length: streamCount }, () => crypto.randomBytes(payloadSize));
|
||||
const promises = payloads.map((data) => {
|
||||
const hash = sha256(data);
|
||||
return sendAndReceive(edgePort, data, 300000).then((received) => ({
|
||||
sent: hash,
|
||||
received: sha256(received),
|
||||
sizeOk: received.length === payloadSize,
|
||||
}));
|
||||
});
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received);
|
||||
expect(failures.length).toEqual(0);
|
||||
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('throttled: slow consumer with 20MB does not kill other streams', async () => {
|
||||
// Open a connection that creates download-direction backpressure:
|
||||
// send 20MB but DON'T read the response — client TCP receive buffer fills
|
||||
const slowSock = net.createConnection({ host: '127.0.0.1', port: edgePort });
|
||||
await new Promise<void>((resolve) => slowSock.on('connect', resolve));
|
||||
const slowData = crypto.randomBytes(20 * 1024 * 1024);
|
||||
slowSock.write(slowData);
|
||||
slowSock.end();
|
||||
// Don't read — backpressure builds on the download path
|
||||
|
||||
// Wait for backpressure to develop
|
||||
await new Promise((r) => setTimeout(r, 2000));
|
||||
|
||||
// Meanwhile, 5 normal echo streams with 20MB each must complete
|
||||
const payload = crypto.randomBytes(20 * 1024 * 1024);
|
||||
const hash = sha256(payload);
|
||||
const promises = Array.from({ length: 5 }, () =>
|
||||
sendAndReceive(edgePort, payload, 300000).then((r) => ({
|
||||
hash: sha256(r),
|
||||
sizeOk: r.length === payload.length,
|
||||
}))
|
||||
);
|
||||
const results = await Promise.all(promises);
|
||||
const failures = results.filter((r) => !r.sizeOk || r.hash !== hash);
|
||||
expect(failures.length).toEqual(0);
|
||||
|
||||
// Tunnel still alive
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
|
||||
slowSock.destroy();
|
||||
});
|
||||
|
||||
tap.test('throttled: rapid churn — 3 x 20MB long + 50 x 1MB short streams', async () => {
|
||||
// 3 long streams (20MB each) running alongside 50 short streams (1MB each)
|
||||
const longPayload = crypto.randomBytes(20 * 1024 * 1024);
|
||||
const longHash = sha256(longPayload);
|
||||
const longPromises = Array.from({ length: 3 }, () =>
|
||||
sendAndReceive(edgePort, longPayload, 300000).then((r) => ({
|
||||
hash: sha256(r),
|
||||
sizeOk: r.length === longPayload.length,
|
||||
}))
|
||||
);
|
||||
|
||||
const shortPayload = crypto.randomBytes(1024 * 1024);
|
||||
const shortHash = sha256(shortPayload);
|
||||
const shortPromises = Array.from({ length: 50 }, () =>
|
||||
sendAndReceive(edgePort, shortPayload, 300000).then((r) => ({
|
||||
hash: sha256(r),
|
||||
sizeOk: r.length === shortPayload.length,
|
||||
}))
|
||||
);
|
||||
|
||||
const [longResults, shortResults] = await Promise.all([
|
||||
Promise.all(longPromises),
|
||||
Promise.all(shortPromises),
|
||||
]);
|
||||
|
||||
const longFails = longResults.filter((r) => !r.sizeOk || r.hash !== longHash);
|
||||
const shortFails = shortResults.filter((r) => !r.sizeOk || r.hash !== shortHash);
|
||||
expect(longFails.length).toEqual(0);
|
||||
expect(shortFails.length).toEqual(0);
|
||||
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('throttled: 3 burst waves of 5 streams x 20MB each', async () => {
|
||||
for (let wave = 0; wave < 3; wave++) {
|
||||
const streamCount = 5;
|
||||
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB per wave
|
||||
|
||||
const promises = Array.from({ length: streamCount }, () => {
|
||||
const data = crypto.randomBytes(payloadSize);
|
||||
return sendAndReceive(edgePort, data, 300000).then((r) => r.length === payloadSize);
|
||||
});
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
const ok = results.filter(Boolean).length;
|
||||
expect(ok).toEqual(streamCount);
|
||||
|
||||
// Brief pause between waves
|
||||
await new Promise((r) => setTimeout(r, 500));
|
||||
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
}
|
||||
});
|
||||
|
||||
tap.test('throttled: tunnel still works after all load tests', async () => {
|
||||
const data = crypto.randomBytes(1024);
|
||||
const hash = sha256(data);
|
||||
const received = await sendAndReceive(edgePort, data, 30000);
|
||||
expect(sha256(received)).toEqual(hash);
|
||||
|
||||
const status = await edge.getStatus();
|
||||
expect(status.connected).toBeTrue();
|
||||
});
|
||||
|
||||
tap.test('teardown: stop tunnel', async () => {
|
||||
await edge.stop();
|
||||
await hub.stop();
|
||||
if (throttle) await throttle.close();
|
||||
await new Promise<void>((resolve) => echoServer.close(() => resolve()));
|
||||
});
|
||||
|
||||
export default tap.start();
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@serve.zone/remoteingress',
|
||||
version: '4.8.5',
|
||||
version: '4.8.13',
|
||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user