Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e8d429f117 | |||
| 3c2299430a | |||
| 8b5df9a0b7 | |||
| 236d6d16ee | |||
| 81bbb33016 |
14
changelog.md
14
changelog.md
@@ -1,5 +1,19 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.13 - fix(remoteingress-protocol)
|
||||||
|
require a flush after each written frame to bound TLS buffer growth
|
||||||
|
|
||||||
|
- Remove the unflushed byte threshold and stop queueing additional writes while a flush is pending
|
||||||
|
- Simplify write and flush error logging after dropping unflushed byte tracking
|
||||||
|
- Update tunnel I/O comments to reflect the stricter flush behavior that avoids OOM and connection resets
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.12 - fix(tunnel)
|
||||||
|
prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown
|
||||||
|
|
||||||
|
- stop self-waking and writing new frames while a flush is pending to avoid unbounded TLS session buffer growth under load
|
||||||
|
- reorder edge and hub shutdown cleanup so stream cancellation happens before TLS close_notify, preventing handlers from blocking on dead channels
|
||||||
|
- add load tests covering sustained large transfers, burst traffic, and rapid stream churn to verify tunnel stability
|
||||||
|
|
||||||
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
|
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
|
||||||
stop data frame send loops promptly when stream cancellation is triggered
|
stop data frame send loops promptly when stream cancellation is triggered
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@serve.zone/remoteingress",
|
"name": "@serve.zone/remoteingress",
|
||||||
"version": "4.8.11",
|
"version": "4.8.13",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
|
|||||||
@@ -519,6 +519,7 @@ async fn connect_to_hub_and_run(
|
|||||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||||
|
|
||||||
|
|
||||||
let liveness_timeout_dur = Duration::from_secs(45);
|
let liveness_timeout_dur = Duration::from_secs(45);
|
||||||
let mut last_activity = Instant::now();
|
let mut last_activity = Instant::now();
|
||||||
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
||||||
@@ -587,21 +588,23 @@ async fn connect_to_hub_and_run(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect
|
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||||
// instead of "peer closed connection without sending TLS close_notify".
|
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||||
let mut tls_stream = tunnel_io.into_inner();
|
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||||
let _ = tokio::time::timeout(
|
|
||||||
Duration::from_secs(2),
|
|
||||||
tls_stream.shutdown(),
|
|
||||||
).await;
|
|
||||||
|
|
||||||
// Cleanup
|
|
||||||
connection_token.cancel();
|
connection_token.cancel();
|
||||||
stun_handle.abort();
|
stun_handle.abort();
|
||||||
for (_, h) in port_listeners.drain() {
|
for (_, h) in port_listeners.drain() {
|
||||||
h.abort();
|
h.abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect.
|
||||||
|
// Stream handlers are already cancelled, so no new data is being produced.
|
||||||
|
let mut tls_stream = tunnel_io.into_inner();
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
tls_stream.shutdown(),
|
||||||
|
).await;
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -755,6 +755,7 @@ async fn handle_edge_connection(
|
|||||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||||
|
|
||||||
|
|
||||||
// Assigned in every break path of the hub_loop before use at the end.
|
// Assigned in every break path of the hub_loop before use at the end.
|
||||||
#[allow(unused_assignments)]
|
#[allow(unused_assignments)]
|
||||||
let mut disconnect_reason = String::new();
|
let mut disconnect_reason = String::new();
|
||||||
@@ -844,17 +845,19 @@ async fn handle_edge_connection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Graceful TLS shutdown: send close_notify so the edge sees a clean disconnect
|
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||||
// instead of "peer closed connection without sending TLS close_notify".
|
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||||
|
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||||
|
edge_token.cancel();
|
||||||
|
config_handle.abort();
|
||||||
|
|
||||||
|
// Graceful TLS shutdown: send close_notify so the edge sees a clean disconnect.
|
||||||
|
// Stream handlers are already cancelled, so no new data is being produced.
|
||||||
let mut tls_stream = tunnel_io.into_inner();
|
let mut tls_stream = tunnel_io.into_inner();
|
||||||
let _ = tokio::time::timeout(
|
let _ = tokio::time::timeout(
|
||||||
Duration::from_secs(2),
|
Duration::from_secs(2),
|
||||||
tls_stream.shutdown(),
|
tls_stream.shutdown(),
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
// Cleanup: cancel edge token to propagate to all child tasks
|
|
||||||
edge_token.cancel();
|
|
||||||
config_handle.abort();
|
|
||||||
{
|
{
|
||||||
let mut edges = connected.lock().await;
|
let mut edges = connected.lock().await;
|
||||||
edges.remove(&edge_id);
|
edges.remove(&edge_id);
|
||||||
|
|||||||
@@ -312,11 +312,11 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
cancel_token: &tokio_util::sync::CancellationToken,
|
cancel_token: &tokio_util::sync::CancellationToken,
|
||||||
) -> Poll<TunnelEvent> {
|
) -> Poll<TunnelEvent> {
|
||||||
// 1. WRITE: drain ctrl queue first, then data queue.
|
// 1. WRITE: drain ctrl queue first, then data queue.
|
||||||
// TLS poll_write writes plaintext to session buffer (always Ready).
|
// Write one frame, set flush_needed, then flush must complete before
|
||||||
// Batch up to 16 frames per poll cycle.
|
// writing more. This prevents unbounded TLS session buffer growth.
|
||||||
// Safe: `self.write` and `self.stream` are disjoint fields.
|
// Safe: `self.write` and `self.stream` are disjoint fields.
|
||||||
let mut writes = 0;
|
let mut writes = 0;
|
||||||
while self.write.has_work() && writes < 16 {
|
while self.write.has_work() && writes < 16 && !self.write.flush_needed {
|
||||||
let from_ctrl = !self.write.ctrl_queue.is_empty();
|
let from_ctrl = !self.write.ctrl_queue.is_empty();
|
||||||
let frame = if from_ctrl {
|
let frame = if from_ctrl {
|
||||||
self.write.ctrl_queue.front().unwrap()
|
self.write.ctrl_queue.front().unwrap()
|
||||||
@@ -327,6 +327,8 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
|
|
||||||
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
||||||
Poll::Ready(Ok(0)) => {
|
Poll::Ready(Ok(0)) => {
|
||||||
|
log::error!("TunnelIo: poll_write returned 0 (write zero), ctrl_q={} data_q={}",
|
||||||
|
self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
||||||
));
|
));
|
||||||
@@ -341,7 +343,11 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
writes += 1;
|
writes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_write error: {} (ctrl_q={} data_q={})",
|
||||||
|
e, self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -349,8 +355,13 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
||||||
if self.write.flush_needed {
|
if self.write.flush_needed {
|
||||||
match Pin::new(&mut self.stream).poll_flush(cx) {
|
match Pin::new(&mut self.stream).poll_flush(cx) {
|
||||||
Poll::Ready(Ok(())) => self.write.flush_needed = false,
|
Poll::Ready(Ok(())) => {
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
self.write.flush_needed = false;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_flush error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => {} // TCP waker will notify us
|
Poll::Pending => {} // TCP waker will notify us
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,12 +397,19 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
// Partial data — loop to call poll_read again so the TCP
|
// Partial data — loop to call poll_read again so the TCP
|
||||||
// waker is re-registered when it finally returns Pending.
|
// waker is re-registered when it finally returns Pending.
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::ReadError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_read error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::ReadError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. CHANNELS: drain ctrl into ctrl_queue, data into data_queue.
|
// 4. CHANNELS: drain ctrl (always — priority), data (only if queue is small).
|
||||||
|
// Ctrl frames must never be delayed — always drain fully.
|
||||||
|
// Data frames are gated: keep data in the bounded channel for proper
|
||||||
|
// backpressure when TLS writes are slow. Without this gate, the internal
|
||||||
|
// data_queue (unbounded VecDeque) grows to hundreds of MB under throttle → OOM.
|
||||||
let mut got_new = false;
|
let mut got_new = false;
|
||||||
loop {
|
loop {
|
||||||
match ctrl_rx.poll_recv(cx) {
|
match ctrl_rx.poll_recv(cx) {
|
||||||
@@ -404,15 +422,17 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
if self.write.data_queue.len() < 64 {
|
||||||
match data_rx.poll_recv(cx) {
|
loop {
|
||||||
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
match data_rx.poll_recv(cx) {
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
Poll::Ready(None) => {
|
||||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
));
|
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
Poll::Pending => break,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -424,10 +444,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
return Poll::Ready(TunnelEvent::Cancelled);
|
return Poll::Ready(TunnelEvent::Cancelled);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. SELF-WAKE: only when we have frames AND flush is done.
|
// 6. SELF-WAKE: only when flush is complete AND we have work.
|
||||||
// If flush is pending, the TCP write-readiness waker will notify us.
|
// When flush is Pending, the TCP write-readiness waker will notify us.
|
||||||
// If we got new channel frames, wake to write them.
|
// CRITICAL: do NOT self-wake when flush_needed — poll_write always returns
|
||||||
if got_new || (!self.write.flush_needed && self.write.has_work()) {
|
// Ready (TLS buffers in-memory), so self-waking causes a tight spin loop
|
||||||
|
// that fills the TLS session buffer unboundedly -> OOM -> ECONNRESET.
|
||||||
|
if !self.write.flush_needed && (got_new || self.write.has_work()) {
|
||||||
cx.waker().wake_by_ref();
|
cx.waker().wake_by_ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
402
test/test.loadtest.node.ts
Normal file
402
test/test.loadtest.node.ts
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as net from 'net';
|
||||||
|
import * as stream from 'stream';
|
||||||
|
import * as crypto from 'crypto';
|
||||||
|
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers (self-contained — same patterns as test.flowcontrol.node.ts)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
async function findFreePorts(count: number): Promise<number[]> {
|
||||||
|
const servers: net.Server[] = [];
|
||||||
|
const ports: number[] = [];
|
||||||
|
for (let i = 0; i < count; i++) {
|
||||||
|
const server = net.createServer();
|
||||||
|
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
|
||||||
|
ports.push((server.address() as net.AddressInfo).port);
|
||||||
|
servers.push(server);
|
||||||
|
}
|
||||||
|
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrackingServer = net.Server & { destroyAll: () => void };
|
||||||
|
|
||||||
|
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((socket) => {
|
||||||
|
connections.add(socket);
|
||||||
|
socket.on('close', () => connections.delete(socket));
|
||||||
|
let proxyHeaderParsed = false;
|
||||||
|
let pendingBuf = Buffer.alloc(0);
|
||||||
|
socket.on('data', (data: Buffer) => {
|
||||||
|
if (!proxyHeaderParsed) {
|
||||||
|
pendingBuf = Buffer.concat([pendingBuf, data]);
|
||||||
|
const idx = pendingBuf.indexOf('\r\n');
|
||||||
|
if (idx !== -1) {
|
||||||
|
proxyHeaderParsed = true;
|
||||||
|
const remainder = pendingBuf.subarray(idx + 2);
|
||||||
|
if (remainder.length > 0) socket.write(remainder);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
socket.write(data);
|
||||||
|
});
|
||||||
|
socket.on('error', () => {});
|
||||||
|
}) as TrackingServer;
|
||||||
|
server.destroyAll = () => {
|
||||||
|
for (const conn of connections) conn.destroy();
|
||||||
|
connections.clear();
|
||||||
|
};
|
||||||
|
server.on('error', reject);
|
||||||
|
server.listen(port, host, () => resolve(server));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
let totalReceived = 0;
|
||||||
|
const expectedLength = data.length;
|
||||||
|
let settled = false;
|
||||||
|
|
||||||
|
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
|
||||||
|
client.write(data);
|
||||||
|
client.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
client.destroy();
|
||||||
|
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
|
||||||
|
}
|
||||||
|
}, timeoutMs);
|
||||||
|
|
||||||
|
client.on('data', (chunk: Buffer) => {
|
||||||
|
chunks.push(chunk);
|
||||||
|
totalReceived += chunk.length;
|
||||||
|
if (totalReceived >= expectedLength && !settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
client.destroy();
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('end', () => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('error', (err) => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
reject(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sha256(buf: Buffer): string {
|
||||||
|
return crypto.createHash('sha256').update(buf).digest('hex');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Throttle Proxy: rate-limits TCP traffic between edge and hub
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class ThrottleTransform extends stream.Transform {
|
||||||
|
private bytesPerSec: number;
|
||||||
|
private bucket: number;
|
||||||
|
private lastRefill: number;
|
||||||
|
private destroyed_: boolean = false;
|
||||||
|
|
||||||
|
constructor(bytesPerSecond: number) {
|
||||||
|
super();
|
||||||
|
this.bytesPerSec = bytesPerSecond;
|
||||||
|
this.bucket = bytesPerSecond;
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: stream.TransformCallback) {
|
||||||
|
if (this.destroyed_) return;
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
const elapsed = (now - this.lastRefill) / 1000;
|
||||||
|
this.bucket = Math.min(this.bytesPerSec, this.bucket + elapsed * this.bytesPerSec);
|
||||||
|
this.lastRefill = now;
|
||||||
|
|
||||||
|
if (chunk.length <= this.bucket) {
|
||||||
|
this.bucket -= chunk.length;
|
||||||
|
callback(null, chunk);
|
||||||
|
} else {
|
||||||
|
// Not enough budget — delay the entire chunk (don't split)
|
||||||
|
const deficit = chunk.length - this.bucket;
|
||||||
|
this.bucket = 0;
|
||||||
|
const delayMs = Math.min((deficit / this.bytesPerSec) * 1000, 1000);
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.destroyed_) { callback(); return; }
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
this.bucket = 0;
|
||||||
|
callback(null, chunk);
|
||||||
|
}, delayMs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_destroy(err: Error | null, callback: (error: Error | null) => void) {
|
||||||
|
this.destroyed_ = true;
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ThrottleProxy {
|
||||||
|
server: net.Server;
|
||||||
|
close: () => Promise<void>;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function startThrottleProxy(
|
||||||
|
listenPort: number,
|
||||||
|
targetHost: string,
|
||||||
|
targetPort: number,
|
||||||
|
bytesPerSecond: number,
|
||||||
|
): Promise<ThrottleProxy> {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((clientSock) => {
|
||||||
|
connections.add(clientSock);
|
||||||
|
const upstream = net.createConnection({ host: targetHost, port: targetPort });
|
||||||
|
connections.add(upstream);
|
||||||
|
|
||||||
|
const throttleUp = new ThrottleTransform(bytesPerSecond);
|
||||||
|
const throttleDown = new ThrottleTransform(bytesPerSecond);
|
||||||
|
|
||||||
|
clientSock.pipe(throttleUp).pipe(upstream);
|
||||||
|
upstream.pipe(throttleDown).pipe(clientSock);
|
||||||
|
|
||||||
|
let cleaned = false;
|
||||||
|
const cleanup = (source: string, err?: Error) => {
|
||||||
|
if (cleaned) return;
|
||||||
|
cleaned = true;
|
||||||
|
if (err) {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source}: ${err.message}`);
|
||||||
|
} else {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source} (no error)`);
|
||||||
|
}
|
||||||
|
console.error(`[ThrottleProxy] stack:`, new Error().stack);
|
||||||
|
throttleUp.destroy();
|
||||||
|
throttleDown.destroy();
|
||||||
|
clientSock.destroy();
|
||||||
|
upstream.destroy();
|
||||||
|
connections.delete(clientSock);
|
||||||
|
connections.delete(upstream);
|
||||||
|
};
|
||||||
|
clientSock.on('error', (e) => cleanup('clientSock.error', e));
|
||||||
|
upstream.on('error', (e) => cleanup('upstream.error', e));
|
||||||
|
throttleUp.on('error', (e) => cleanup('throttleUp.error', e));
|
||||||
|
throttleDown.on('error', (e) => cleanup('throttleDown.error', e));
|
||||||
|
clientSock.on('close', () => cleanup('clientSock.close'));
|
||||||
|
upstream.on('close', () => cleanup('upstream.close'));
|
||||||
|
});
|
||||||
|
|
||||||
|
await new Promise<void>((resolve) => server.listen(listenPort, '127.0.0.1', resolve));
|
||||||
|
return {
|
||||||
|
server,
|
||||||
|
close: async () => {
|
||||||
|
for (const c of connections) c.destroy();
|
||||||
|
connections.clear();
|
||||||
|
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Test state
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let hub: RemoteIngressHub;
|
||||||
|
let edge: RemoteIngressEdge;
|
||||||
|
let echoServer: TrackingServer;
|
||||||
|
let throttle: ThrottleProxy;
|
||||||
|
let hubPort: number;
|
||||||
|
let proxyPort: number;
|
||||||
|
let edgePort: number;
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Tests
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
tap.test('setup: start throttled tunnel (100 Mbit/s)', async () => {
|
||||||
|
[hubPort, proxyPort, edgePort] = await findFreePorts(3);
|
||||||
|
|
||||||
|
echoServer = await startEchoServer(edgePort, '127.0.0.2');
|
||||||
|
|
||||||
|
// Throttle proxy: edge → proxy → hub at 100 Mbit/s (12.5 MB/s)
|
||||||
|
throttle = await startThrottleProxy(proxyPort, '127.0.0.1', hubPort, 12.5 * 1024 * 1024);
|
||||||
|
|
||||||
|
hub = new RemoteIngressHub();
|
||||||
|
edge = new RemoteIngressEdge();
|
||||||
|
|
||||||
|
await hub.start({ tunnelPort: hubPort, targetHost: '127.0.0.2' });
|
||||||
|
await hub.updateAllowedEdges([
|
||||||
|
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const connectedPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000);
|
||||||
|
edge.once('tunnelConnected', () => {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Edge connects through throttle proxy
|
||||||
|
await edge.start({
|
||||||
|
hubHost: '127.0.0.1',
|
||||||
|
hubPort: proxyPort,
|
||||||
|
edgeId: 'test-edge',
|
||||||
|
secret: 'test-secret',
|
||||||
|
bindAddress: '127.0.0.1',
|
||||||
|
});
|
||||||
|
|
||||||
|
await connectedPromise;
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 5 streams x 20MB each through 100Mbit tunnel', async () => {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB total round-trip
|
||||||
|
|
||||||
|
const payloads = Array.from({ length: streamCount }, () => crypto.randomBytes(payloadSize));
|
||||||
|
const promises = payloads.map((data) => {
|
||||||
|
const hash = sha256(data);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((received) => ({
|
||||||
|
sent: hash,
|
||||||
|
received: sha256(received),
|
||||||
|
sizeOk: received.length === payloadSize,
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: slow consumer with 20MB does not kill other streams', async () => {
|
||||||
|
// Open a connection that creates download-direction backpressure:
|
||||||
|
// send 20MB but DON'T read the response — client TCP receive buffer fills
|
||||||
|
const slowSock = net.createConnection({ host: '127.0.0.1', port: edgePort });
|
||||||
|
await new Promise<void>((resolve) => slowSock.on('connect', resolve));
|
||||||
|
const slowData = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
slowSock.write(slowData);
|
||||||
|
slowSock.end();
|
||||||
|
// Don't read — backpressure builds on the download path
|
||||||
|
|
||||||
|
// Wait for backpressure to develop
|
||||||
|
await new Promise((r) => setTimeout(r, 2000));
|
||||||
|
|
||||||
|
// Meanwhile, 5 normal echo streams with 20MB each must complete
|
||||||
|
const payload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const hash = sha256(payload);
|
||||||
|
const promises = Array.from({ length: 5 }, () =>
|
||||||
|
sendAndReceive(edgePort, payload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === payload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.hash !== hash);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
// Tunnel still alive
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
|
||||||
|
slowSock.destroy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: rapid churn — 3 x 20MB long + 50 x 1MB short streams', async () => {
|
||||||
|
// 3 long streams (20MB each) running alongside 50 short streams (1MB each)
|
||||||
|
const longPayload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const longHash = sha256(longPayload);
|
||||||
|
const longPromises = Array.from({ length: 3 }, () =>
|
||||||
|
sendAndReceive(edgePort, longPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === longPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const shortPayload = crypto.randomBytes(1024 * 1024);
|
||||||
|
const shortHash = sha256(shortPayload);
|
||||||
|
const shortPromises = Array.from({ length: 50 }, () =>
|
||||||
|
sendAndReceive(edgePort, shortPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === shortPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const [longResults, shortResults] = await Promise.all([
|
||||||
|
Promise.all(longPromises),
|
||||||
|
Promise.all(shortPromises),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const longFails = longResults.filter((r) => !r.sizeOk || r.hash !== longHash);
|
||||||
|
const shortFails = shortResults.filter((r) => !r.sizeOk || r.hash !== shortHash);
|
||||||
|
expect(longFails.length).toEqual(0);
|
||||||
|
expect(shortFails.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 3 burst waves of 5 streams x 20MB each', async () => {
|
||||||
|
for (let wave = 0; wave < 3; wave++) {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB per wave
|
||||||
|
|
||||||
|
const promises = Array.from({ length: streamCount }, () => {
|
||||||
|
const data = crypto.randomBytes(payloadSize);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((r) => r.length === payloadSize);
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const ok = results.filter(Boolean).length;
|
||||||
|
expect(ok).toEqual(streamCount);
|
||||||
|
|
||||||
|
// Brief pause between waves
|
||||||
|
await new Promise((r) => setTimeout(r, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: tunnel still works after all load tests', async () => {
|
||||||
|
const data = crypto.randomBytes(1024);
|
||||||
|
const hash = sha256(data);
|
||||||
|
const received = await sendAndReceive(edgePort, data, 30000);
|
||||||
|
expect(sha256(received)).toEqual(hash);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('teardown: stop tunnel', async () => {
|
||||||
|
await edge.stop();
|
||||||
|
await hub.stop();
|
||||||
|
if (throttle) await throttle.close();
|
||||||
|
await new Promise<void>((resolve) => echoServer.close(() => resolve()));
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@serve.zone/remoteingress',
|
name: '@serve.zone/remoteingress',
|
||||||
version: '4.8.11',
|
version: '4.8.13',
|
||||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user