Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0b8420aac9 | |||
| afd193336a | |||
| e8d429f117 | |||
| 3c2299430a | |||
| 8b5df9a0b7 | |||
| 236d6d16ee | |||
| 81bbb33016 | |||
| 79af6fd425 | |||
| f71b2f1876 | |||
| 0161a2589c | |||
| bfd9e58b4f | |||
| 9a8760c18d | |||
| c77caa89fc | |||
| 04586aab39 | |||
| f9a739858d | |||
| da01fbeecd | |||
| 264e8eeb97 | |||
| 9922c3b020 | |||
| 38cde37cff | |||
| 64572827e5 | |||
| c4e26198b9 | |||
| 0b5d72de28 | |||
| e8431c0174 | |||
| d57d6395dd | |||
| 2e5ceeaf5c |
72
changelog.md
72
changelog.md
@@ -1,5 +1,77 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.16 - fix(release)
|
||||||
|
bump package version to 4.8.15
|
||||||
|
|
||||||
|
- Updates the package.json version field from 4.8.13 to 4.8.15.
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.13 - fix(remoteingress-protocol)
|
||||||
|
require a flush after each written frame to bound TLS buffer growth
|
||||||
|
|
||||||
|
- Remove the unflushed byte threshold and stop queueing additional writes while a flush is pending
|
||||||
|
- Simplify write and flush error logging after dropping unflushed byte tracking
|
||||||
|
- Update tunnel I/O comments to reflect the stricter flush behavior that avoids OOM and connection resets
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.12 - fix(tunnel)
|
||||||
|
prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown
|
||||||
|
|
||||||
|
- stop self-waking and writing new frames while a flush is pending to avoid unbounded TLS session buffer growth under load
|
||||||
|
- reorder edge and hub shutdown cleanup so stream cancellation happens before TLS close_notify, preventing handlers from blocking on dead channels
|
||||||
|
- add load tests covering sustained large transfers, burst traffic, and rapid stream churn to verify tunnel stability
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
|
||||||
|
stop data frame send loops promptly when stream cancellation is triggered
|
||||||
|
|
||||||
|
- Use cancellation-aware tokio::select! around data channel sends in both edge and hub stream forwarding paths
|
||||||
|
- Prevent stalled or noisy shutdown behavior when stream or client cancellation happens while awaiting frame delivery
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.10 - fix(remoteingress-core)
|
||||||
|
guard tunnel frame sends with cancellation to prevent async send deadlocks
|
||||||
|
|
||||||
|
- Wrap OPEN, CLOSE, CLOSE_BACK, WINDOW_UPDATE, and cleanup channel sends in cancellation-aware tokio::select! blocks.
|
||||||
|
- Avoid indefinite blocking when tunnel, stream, or writer tasks are cancelled while awaiting channel capacity.
|
||||||
|
- Improve shutdown reliability for edge and hub stream handling under tunnel failure conditions.
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.9 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.8 - fix(remoteingress-core)
|
||||||
|
cancel stale edge connections when an edge reconnects
|
||||||
|
|
||||||
|
- Remove any existing edge entry before registering a reconnected edge
|
||||||
|
- Trigger the previous connection's cancellation token so stale sessions shut down immediately instead of waiting for TCP keepalive
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.7 - fix(remoteingress-core)
|
||||||
|
perform graceful TLS shutdown on edge and hub tunnel streams
|
||||||
|
|
||||||
|
- Send TLS close_notify before cleanup to avoid peer disconnect warnings on both tunnel endpoints
|
||||||
|
- Wrap stream shutdown in a 2 second timeout so connection teardown does not block cleanup
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.6 - fix(remoteingress-core)
|
||||||
|
initialize disconnect reason only when set in hub loop break paths
|
||||||
|
|
||||||
|
- Replace the default "unknown" disconnect reason with an explicitly assigned string and document that all hub loop exits set it before use
|
||||||
|
- Add an allow attribute for unused assignments to avoid warnings around the deferred initialization pattern
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.5 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.4 - fix(remoteingress-core)
|
||||||
|
prevent stream stalls by guaranteeing flow-control updates and avoiding bounded per-stream channel overflows
|
||||||
|
|
||||||
|
- Replace bounded per-stream data channels with unbounded channels on edge and hub, relying on existing WINDOW_UPDATE flow control to limit bytes in flight
|
||||||
|
- Use awaited sends for FRAME_WINDOW_UPDATE and FRAME_WINDOW_UPDATE_BACK so updates are not dropped and streams do not deadlock under backpressure
|
||||||
|
- Clean up stream state when channel receivers have already exited instead of closing active streams because a bounded queue filled
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.3 - fix(protocol,edge)
|
||||||
|
optimize tunnel frame handling and zero-copy uploads in edge I/O
|
||||||
|
|
||||||
|
- extract hub frame processing into a shared edge handler to remove duplicated tunnel logic
|
||||||
|
- add zero-copy frame header encoding and read payloads directly into framed buffers for client-to-hub uploads
|
||||||
|
- refactor TunnelIo read/write state to avoid unsafe queue access and reduce buffer churn with incremental parsing
|
||||||
|
|
||||||
## 2026-03-17 - 4.8.2 - fix(rust-edge)
|
## 2026-03-17 - 4.8.2 - fix(rust-edge)
|
||||||
refactor tunnel I/O to preserve TLS state and prioritize control frames
|
refactor tunnel I/O to preserve TLS state and prioritize control frames
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@serve.zone/remoteingress",
|
"name": "@serve.zone/remoteingress",
|
||||||
"version": "4.8.2",
|
"version": "4.8.16",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
|
|||||||
@@ -13,10 +13,20 @@ use serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use remoteingress_protocol::*;
|
use remoteingress_protocol::*;
|
||||||
|
|
||||||
|
type EdgeTlsStream = tokio_rustls::client::TlsStream<TcpStream>;
|
||||||
|
|
||||||
|
/// Result of processing a frame (shared with hub.rs pattern).
|
||||||
|
#[allow(dead_code)]
|
||||||
|
enum EdgeFrameAction {
|
||||||
|
Continue,
|
||||||
|
Disconnect(String),
|
||||||
|
}
|
||||||
|
|
||||||
/// Per-stream state tracked in the edge's client_writers map.
|
/// Per-stream state tracked in the edge's client_writers map.
|
||||||
struct EdgeStreamState {
|
struct EdgeStreamState {
|
||||||
/// Channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
|
/// Unbounded channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
|
||||||
back_tx: mpsc::Sender<Vec<u8>>,
|
/// Unbounded because flow control (WINDOW_UPDATE) already limits bytes-in-flight.
|
||||||
|
back_tx: mpsc::UnboundedSender<Vec<u8>>,
|
||||||
/// Send window for FRAME_DATA (upload direction).
|
/// Send window for FRAME_DATA (upload direction).
|
||||||
/// Decremented by the client reader, incremented by FRAME_WINDOW_UPDATE_BACK from hub.
|
/// Decremented by the client reader, incremented by FRAME_WINDOW_UPDATE_BACK from hub.
|
||||||
send_window: Arc<AtomicU32>,
|
send_window: Arc<AtomicU32>,
|
||||||
@@ -272,6 +282,86 @@ enum EdgeLoopResult {
|
|||||||
Reconnect(String), // reason for disconnection
|
Reconnect(String), // reason for disconnection
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Process a single frame received from the hub side of the tunnel.
|
||||||
|
/// Handles FRAME_DATA_BACK, FRAME_WINDOW_UPDATE_BACK, FRAME_CLOSE_BACK, FRAME_CONFIG, FRAME_PING.
|
||||||
|
async fn handle_edge_frame(
|
||||||
|
frame: Frame,
|
||||||
|
tunnel_io: &mut remoteingress_protocol::TunnelIo<EdgeTlsStream>,
|
||||||
|
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||||
|
listen_ports: &Arc<RwLock<Vec<u16>>>,
|
||||||
|
event_tx: &mpsc::Sender<EdgeEvent>,
|
||||||
|
tunnel_writer_tx: &mpsc::Sender<Vec<u8>>,
|
||||||
|
tunnel_data_tx: &mpsc::Sender<Vec<u8>>,
|
||||||
|
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
||||||
|
active_streams: &Arc<AtomicU32>,
|
||||||
|
next_stream_id: &Arc<AtomicU32>,
|
||||||
|
edge_id: &str,
|
||||||
|
connection_token: &CancellationToken,
|
||||||
|
bind_address: &str,
|
||||||
|
) -> EdgeFrameAction {
|
||||||
|
match frame.frame_type {
|
||||||
|
FRAME_DATA_BACK => {
|
||||||
|
// Dispatch to per-stream unbounded channel. Flow control (WINDOW_UPDATE)
|
||||||
|
// limits bytes-in-flight, so the channel won't grow unbounded. send() only
|
||||||
|
// fails if the receiver is dropped (hub_to_client task already exited).
|
||||||
|
let mut writers = client_writers.lock().await;
|
||||||
|
if let Some(state) = writers.get(&frame.stream_id) {
|
||||||
|
if state.back_tx.send(frame.payload).is_err() {
|
||||||
|
// Receiver dropped — hub_to_client task already exited, clean up
|
||||||
|
writers.remove(&frame.stream_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FRAME_WINDOW_UPDATE_BACK => {
|
||||||
|
if let Some(increment) = decode_window_update(&frame.payload) {
|
||||||
|
if increment > 0 {
|
||||||
|
let writers = client_writers.lock().await;
|
||||||
|
if let Some(state) = writers.get(&frame.stream_id) {
|
||||||
|
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
||||||
|
if prev + increment > MAX_WINDOW_SIZE {
|
||||||
|
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
||||||
|
}
|
||||||
|
state.window_notify.notify_one();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FRAME_CLOSE_BACK => {
|
||||||
|
let mut writers = client_writers.lock().await;
|
||||||
|
writers.remove(&frame.stream_id);
|
||||||
|
}
|
||||||
|
FRAME_CONFIG => {
|
||||||
|
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
||||||
|
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
||||||
|
*listen_ports.write().await = update.listen_ports.clone();
|
||||||
|
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
||||||
|
listen_ports: update.listen_ports.clone(),
|
||||||
|
});
|
||||||
|
apply_port_config(
|
||||||
|
&update.listen_ports,
|
||||||
|
port_listeners,
|
||||||
|
tunnel_writer_tx,
|
||||||
|
tunnel_data_tx,
|
||||||
|
client_writers,
|
||||||
|
active_streams,
|
||||||
|
next_stream_id,
|
||||||
|
edge_id,
|
||||||
|
connection_token,
|
||||||
|
bind_address,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
FRAME_PING => {
|
||||||
|
// Queue PONG directly — no channel round-trip, guaranteed delivery
|
||||||
|
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EdgeFrameAction::Continue
|
||||||
|
}
|
||||||
|
|
||||||
async fn connect_to_hub_and_run(
|
async fn connect_to_hub_and_run(
|
||||||
config: &EdgeConfig,
|
config: &EdgeConfig,
|
||||||
connected: &Arc<RwLock<bool>>,
|
connected: &Arc<RwLock<bool>>,
|
||||||
@@ -429,6 +519,7 @@ async fn connect_to_hub_and_run(
|
|||||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||||
|
|
||||||
|
|
||||||
let liveness_timeout_dur = Duration::from_secs(45);
|
let liveness_timeout_dur = Duration::from_secs(45);
|
||||||
let mut last_activity = Instant::now();
|
let mut last_activity = Instant::now();
|
||||||
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
||||||
@@ -436,73 +527,22 @@ async fn connect_to_hub_and_run(
|
|||||||
let result = 'io_loop: loop {
|
let result = 'io_loop: loop {
|
||||||
// Drain any buffered frames
|
// Drain any buffered frames
|
||||||
loop {
|
loop {
|
||||||
match tunnel_io.try_parse_frame() {
|
let frame = match tunnel_io.try_parse_frame() {
|
||||||
Some(Ok(frame)) => {
|
Some(Ok(f)) => f,
|
||||||
last_activity = Instant::now();
|
|
||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
|
||||||
match frame.frame_type {
|
|
||||||
FRAME_DATA_BACK => {
|
|
||||||
let mut writers = client_writers.lock().await;
|
|
||||||
if let Some(state) = writers.get(&frame.stream_id) {
|
|
||||||
if state.back_tx.try_send(frame.payload).is_err() {
|
|
||||||
log::warn!("Stream {} back-channel full, closing", frame.stream_id);
|
|
||||||
writers.remove(&frame.stream_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_WINDOW_UPDATE_BACK => {
|
|
||||||
if let Some(increment) = decode_window_update(&frame.payload) {
|
|
||||||
if increment > 0 {
|
|
||||||
let writers = client_writers.lock().await;
|
|
||||||
if let Some(state) = writers.get(&frame.stream_id) {
|
|
||||||
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
|
||||||
if prev + increment > MAX_WINDOW_SIZE {
|
|
||||||
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
|
||||||
}
|
|
||||||
state.window_notify.notify_one();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_CLOSE_BACK => {
|
|
||||||
let mut writers = client_writers.lock().await;
|
|
||||||
writers.remove(&frame.stream_id);
|
|
||||||
}
|
|
||||||
FRAME_CONFIG => {
|
|
||||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
|
||||||
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
|
||||||
*listen_ports.write().await = update.listen_ports.clone();
|
|
||||||
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
|
||||||
listen_ports: update.listen_ports.clone(),
|
|
||||||
});
|
|
||||||
apply_port_config(
|
|
||||||
&update.listen_ports,
|
|
||||||
&mut port_listeners,
|
|
||||||
&tunnel_writer_tx,
|
|
||||||
&tunnel_data_tx,
|
|
||||||
&client_writers,
|
|
||||||
active_streams,
|
|
||||||
next_stream_id,
|
|
||||||
&config.edge_id,
|
|
||||||
connection_token,
|
|
||||||
bind_address,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_PING => {
|
|
||||||
// Queue PONG directly — no channel round-trip, guaranteed delivery
|
|
||||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Err(e)) => {
|
Some(Err(e)) => {
|
||||||
log::error!("Hub frame error: {}", e);
|
log::error!("Hub frame error: {}", e);
|
||||||
break 'io_loop EdgeLoopResult::Reconnect(format!("hub_frame_error: {}", e));
|
break 'io_loop EdgeLoopResult::Reconnect(format!("hub_frame_error: {}", e));
|
||||||
}
|
}
|
||||||
None => break,
|
None => break,
|
||||||
|
};
|
||||||
|
last_activity = Instant::now();
|
||||||
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
|
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||||
|
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||||
|
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
||||||
|
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||||
|
).await {
|
||||||
|
break 'io_loop EdgeLoopResult::Reconnect(reason);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -515,61 +555,12 @@ async fn connect_to_hub_and_run(
|
|||||||
remoteingress_protocol::TunnelEvent::Frame(frame) => {
|
remoteingress_protocol::TunnelEvent::Frame(frame) => {
|
||||||
last_activity = Instant::now();
|
last_activity = Instant::now();
|
||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
match frame.frame_type {
|
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||||
FRAME_DATA_BACK => {
|
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||||
let mut writers = client_writers.lock().await;
|
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
||||||
if let Some(state) = writers.get(&frame.stream_id) {
|
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||||
if state.back_tx.try_send(frame.payload).is_err() {
|
).await {
|
||||||
log::warn!("Stream {} back-channel full, closing", frame.stream_id);
|
break EdgeLoopResult::Reconnect(reason);
|
||||||
writers.remove(&frame.stream_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_WINDOW_UPDATE_BACK => {
|
|
||||||
if let Some(increment) = decode_window_update(&frame.payload) {
|
|
||||||
if increment > 0 {
|
|
||||||
let writers = client_writers.lock().await;
|
|
||||||
if let Some(state) = writers.get(&frame.stream_id) {
|
|
||||||
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
|
||||||
if prev + increment > MAX_WINDOW_SIZE {
|
|
||||||
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
|
||||||
}
|
|
||||||
state.window_notify.notify_one();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_CLOSE_BACK => {
|
|
||||||
let mut writers = client_writers.lock().await;
|
|
||||||
writers.remove(&frame.stream_id);
|
|
||||||
}
|
|
||||||
FRAME_CONFIG => {
|
|
||||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
|
||||||
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
|
||||||
*listen_ports.write().await = update.listen_ports.clone();
|
|
||||||
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
|
||||||
listen_ports: update.listen_ports.clone(),
|
|
||||||
});
|
|
||||||
apply_port_config(
|
|
||||||
&update.listen_ports,
|
|
||||||
&mut port_listeners,
|
|
||||||
&tunnel_writer_tx,
|
|
||||||
&tunnel_data_tx,
|
|
||||||
&client_writers,
|
|
||||||
active_streams,
|
|
||||||
next_stream_id,
|
|
||||||
&config.edge_id,
|
|
||||||
connection_token,
|
|
||||||
bind_address,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FRAME_PING => {
|
|
||||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remoteingress_protocol::TunnelEvent::Eof => {
|
remoteingress_protocol::TunnelEvent::Eof => {
|
||||||
@@ -597,13 +588,23 @@ async fn connect_to_hub_and_run(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Cleanup
|
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||||
|
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||||
|
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||||
connection_token.cancel();
|
connection_token.cancel();
|
||||||
stun_handle.abort();
|
stun_handle.abort();
|
||||||
for (_, h) in port_listeners.drain() {
|
for (_, h) in port_listeners.drain() {
|
||||||
h.abort();
|
h.abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect.
|
||||||
|
// Stream handlers are already cancelled, so no new data is being produced.
|
||||||
|
let mut tls_stream = tunnel_io.into_inner();
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
tls_stream.shutdown(),
|
||||||
|
).await;
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -741,12 +742,18 @@ async fn handle_client_connection(
|
|||||||
// Send OPEN frame with PROXY v1 header via control channel
|
// Send OPEN frame with PROXY v1 header via control channel
|
||||||
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
||||||
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
||||||
if tunnel_ctrl_tx.send(open_frame).await.is_err() {
|
let send_ok = tokio::select! {
|
||||||
|
result = tunnel_ctrl_tx.send(open_frame) => result.is_ok(),
|
||||||
|
_ = client_token.cancelled() => false,
|
||||||
|
};
|
||||||
|
if !send_ok {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up channel for data coming back from hub (capacity 16 is sufficient with flow control)
|
// Per-stream unbounded back-channel. Flow control (WINDOW_UPDATE) limits
|
||||||
let (back_tx, mut back_rx) = mpsc::channel::<Vec<u8>>(1024);
|
// bytes-in-flight, so this won't grow unbounded. Unbounded avoids killing
|
||||||
|
// streams due to channel overflow — backpressure slows streams, never kills them.
|
||||||
|
let (back_tx, mut back_rx) = mpsc::unbounded_channel::<Vec<u8>>();
|
||||||
// Adaptive initial window: scale with current stream count to keep total in-flight
|
// Adaptive initial window: scale with current stream count to keep total in-flight
|
||||||
// data within the 32MB budget. Prevents burst flooding when many streams open.
|
// data within the 32MB budget. Prevents burst flooding when many streams open.
|
||||||
let initial_window = remoteingress_protocol::compute_window_for_stream_count(
|
let initial_window = remoteingress_protocol::compute_window_for_stream_count(
|
||||||
@@ -793,10 +800,16 @@ async fn handle_client_connection(
|
|||||||
if consumed_since_update >= threshold {
|
if consumed_since_update >= threshold {
|
||||||
let increment = consumed_since_update.min(adaptive_window);
|
let increment = consumed_since_update.min(adaptive_window);
|
||||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, increment);
|
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, increment);
|
||||||
if wu_tx.try_send(frame).is_ok() {
|
// Use send().await for guaranteed delivery — dropping WINDOW_UPDATEs
|
||||||
consumed_since_update -= increment;
|
// causes permanent flow stalls. Safe: runs in per-stream task, not main loop.
|
||||||
|
tokio::select! {
|
||||||
|
result = wu_tx.send(frame) => {
|
||||||
|
if result.is_ok() {
|
||||||
|
consumed_since_update -= increment;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = hub_to_client_token.cancelled() => break,
|
||||||
}
|
}
|
||||||
// If try_send fails, keep accumulating — retry on next threshold
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => break,
|
None => break,
|
||||||
@@ -808,20 +821,29 @@ async fn handle_client_connection(
|
|||||||
// Send final window update for any remaining consumed bytes
|
// Send final window update for any remaining consumed bytes
|
||||||
if consumed_since_update > 0 {
|
if consumed_since_update > 0 {
|
||||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
||||||
let _ = wu_tx.try_send(frame);
|
tokio::select! {
|
||||||
|
_ = wu_tx.send(frame) => {}
|
||||||
|
_ = hub_to_client_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let _ = client_write.shutdown().await;
|
let _ = client_write.shutdown().await;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Task: client -> hub (upload direction) with per-stream flow control
|
// Task: client -> hub (upload direction) with per-stream flow control.
|
||||||
let mut buf = vec![0u8; 32768];
|
// Zero-copy: read payload directly after the header, then prepend header.
|
||||||
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
||||||
loop {
|
loop {
|
||||||
// Wait for send window to have capacity (with stall timeout)
|
// Wait for send window to have capacity (with stall timeout).
|
||||||
|
// Safe pattern: register notified BEFORE checking the condition
|
||||||
|
// to avoid missing a notify_one that fires between load and select.
|
||||||
loop {
|
loop {
|
||||||
|
let notified = window_notify.notified();
|
||||||
|
tokio::pin!(notified);
|
||||||
|
notified.as_mut().enable();
|
||||||
let w = send_window.load(Ordering::Acquire);
|
let w = send_window.load(Ordering::Acquire);
|
||||||
if w > 0 { break; }
|
if w > 0 { break; }
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = window_notify.notified() => continue,
|
_ = notified => continue,
|
||||||
_ = client_token.cancelled() => break,
|
_ = client_token.cancelled() => break,
|
||||||
_ = tokio::time::sleep(Duration::from_secs(120)) => {
|
_ = tokio::time::sleep(Duration::from_secs(120)) => {
|
||||||
log::warn!("Stream {} upload stalled (window empty for 120s)", stream_id);
|
log::warn!("Stream {} upload stalled (window empty for 120s)", stream_id);
|
||||||
@@ -844,19 +866,21 @@ async fn handle_client_connection(
|
|||||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
||||||
active_streams.load(Ordering::Relaxed),
|
active_streams.load(Ordering::Relaxed),
|
||||||
) as usize;
|
) as usize;
|
||||||
let max_read = w.min(buf.len()).min(adaptive_cap);
|
let max_read = w.min(32768).min(adaptive_cap);
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
read_result = client_read.read(&mut buf[..max_read]) => {
|
read_result = client_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
||||||
match read_result {
|
match read_result {
|
||||||
Ok(0) => break,
|
Ok(0) => break,
|
||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||||
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
|
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
|
||||||
if tunnel_data_tx.send(data_frame).await.is_err() {
|
let data_frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
||||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
let sent = tokio::select! {
|
||||||
break;
|
result = tunnel_data_tx.send(data_frame) => result.is_ok(),
|
||||||
}
|
_ = client_token.cancelled() => false,
|
||||||
|
};
|
||||||
|
if !sent { break; }
|
||||||
}
|
}
|
||||||
Err(_) => break,
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
@@ -877,9 +901,13 @@ async fn handle_client_connection(
|
|||||||
).await;
|
).await;
|
||||||
|
|
||||||
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
||||||
|
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
|
||||||
if !client_token.is_cancelled() {
|
if !client_token.is_cancelled() {
|
||||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||||
let _ = tunnel_data_tx.send(close_frame).await;
|
tokio::select! {
|
||||||
|
_ = tunnel_data_tx.send(close_frame) => {}
|
||||||
|
_ = client_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up
|
// Clean up
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -72,6 +72,16 @@ pub fn encode_frame(stream_id: u32, frame_type: u8, payload: &[u8]) -> Vec<u8> {
|
|||||||
buf
|
buf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Write a frame header into `buf[0..FRAME_HEADER_SIZE]`.
|
||||||
|
/// The caller must ensure payload is already at `buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + payload_len]`.
|
||||||
|
/// This enables zero-copy encoding: read directly into `buf[FRAME_HEADER_SIZE..]`, then
|
||||||
|
/// prepend the header without copying the payload.
|
||||||
|
pub fn encode_frame_header(buf: &mut [u8], stream_id: u32, frame_type: u8, payload_len: usize) {
|
||||||
|
buf[0..4].copy_from_slice(&stream_id.to_be_bytes());
|
||||||
|
buf[4] = frame_type;
|
||||||
|
buf[5..9].copy_from_slice(&(payload_len as u32).to_be_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
/// Build a PROXY protocol v1 header line.
|
/// Build a PROXY protocol v1 header line.
|
||||||
/// Format: `PROXY TCP4 <client_ip> <edge_ip> <client_port> <dest_port>\r\n`
|
/// Format: `PROXY TCP4 <client_ip> <edge_ip> <client_port> <dest_port>\r\n`
|
||||||
pub fn build_proxy_v1_header(
|
pub fn build_proxy_v1_header(
|
||||||
@@ -173,6 +183,21 @@ pub enum TunnelEvent {
|
|||||||
Cancelled,
|
Cancelled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Write state extracted into a sub-struct so the borrow checker can see
|
||||||
|
/// disjoint field access between `self.write` and `self.stream`.
|
||||||
|
struct WriteState {
|
||||||
|
ctrl_queue: VecDeque<Vec<u8>>, // PONG, WINDOW_UPDATE, CLOSE, OPEN — always first
|
||||||
|
data_queue: VecDeque<Vec<u8>>, // DATA, DATA_BACK — only when ctrl is empty
|
||||||
|
offset: usize, // progress within current frame being written
|
||||||
|
flush_needed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WriteState {
|
||||||
|
fn has_work(&self) -> bool {
|
||||||
|
!self.ctrl_queue.is_empty() || !self.data_queue.is_empty()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Single-owner I/O engine for the tunnel TLS connection.
|
/// Single-owner I/O engine for the tunnel TLS connection.
|
||||||
///
|
///
|
||||||
/// Owns the TLS stream directly — no `tokio::io::split()`, no mutex.
|
/// Owns the TLS stream directly — no `tokio::io::split()`, no mutex.
|
||||||
@@ -184,11 +209,9 @@ pub struct TunnelIo<S> {
|
|||||||
// Read state: accumulate bytes, parse frames incrementally
|
// Read state: accumulate bytes, parse frames incrementally
|
||||||
read_buf: Vec<u8>,
|
read_buf: Vec<u8>,
|
||||||
read_pos: usize,
|
read_pos: usize,
|
||||||
// Write state: dual priority queues
|
parse_pos: usize,
|
||||||
ctrl_queue: VecDeque<Vec<u8>>, // PONG, WINDOW_UPDATE, CLOSE, OPEN — always first
|
// Write state: extracted sub-struct for safe disjoint borrows
|
||||||
data_queue: VecDeque<Vec<u8>>, // DATA, DATA_BACK — only when ctrl is empty
|
write: WriteState,
|
||||||
write_offset: usize, // progress within current frame being written
|
|
||||||
flush_needed: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
||||||
@@ -202,42 +225,52 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
stream,
|
stream,
|
||||||
read_buf,
|
read_buf,
|
||||||
read_pos,
|
read_pos,
|
||||||
ctrl_queue: VecDeque::new(),
|
parse_pos: 0,
|
||||||
data_queue: VecDeque::new(),
|
write: WriteState {
|
||||||
write_offset: 0,
|
ctrl_queue: VecDeque::new(),
|
||||||
flush_needed: false,
|
data_queue: VecDeque::new(),
|
||||||
|
offset: 0,
|
||||||
|
flush_needed: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a high-priority control frame (PONG, WINDOW_UPDATE, CLOSE, OPEN).
|
/// Queue a high-priority control frame (PONG, WINDOW_UPDATE, CLOSE, OPEN).
|
||||||
pub fn queue_ctrl(&mut self, frame: Vec<u8>) {
|
pub fn queue_ctrl(&mut self, frame: Vec<u8>) {
|
||||||
self.ctrl_queue.push_back(frame);
|
self.write.ctrl_queue.push_back(frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a lower-priority data frame (DATA, DATA_BACK).
|
/// Queue a lower-priority data frame (DATA, DATA_BACK).
|
||||||
pub fn queue_data(&mut self, frame: Vec<u8>) {
|
pub fn queue_data(&mut self, frame: Vec<u8>) {
|
||||||
self.data_queue.push_back(frame);
|
self.write.data_queue.push_back(frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to parse a complete frame from the read buffer.
|
/// Try to parse a complete frame from the read buffer.
|
||||||
|
/// Uses a parse_pos cursor to avoid drain() on every frame.
|
||||||
pub fn try_parse_frame(&mut self) -> Option<Result<Frame, std::io::Error>> {
|
pub fn try_parse_frame(&mut self) -> Option<Result<Frame, std::io::Error>> {
|
||||||
if self.read_pos < FRAME_HEADER_SIZE {
|
let available = self.read_pos - self.parse_pos;
|
||||||
|
if available < FRAME_HEADER_SIZE {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let base = self.parse_pos;
|
||||||
let stream_id = u32::from_be_bytes([
|
let stream_id = u32::from_be_bytes([
|
||||||
self.read_buf[0], self.read_buf[1], self.read_buf[2], self.read_buf[3],
|
self.read_buf[base], self.read_buf[base + 1],
|
||||||
|
self.read_buf[base + 2], self.read_buf[base + 3],
|
||||||
]);
|
]);
|
||||||
let frame_type = self.read_buf[4];
|
let frame_type = self.read_buf[base + 4];
|
||||||
let length = u32::from_be_bytes([
|
let length = u32::from_be_bytes([
|
||||||
self.read_buf[5], self.read_buf[6], self.read_buf[7], self.read_buf[8],
|
self.read_buf[base + 5], self.read_buf[base + 6],
|
||||||
|
self.read_buf[base + 7], self.read_buf[base + 8],
|
||||||
]);
|
]);
|
||||||
|
|
||||||
if length > MAX_PAYLOAD_SIZE {
|
if length > MAX_PAYLOAD_SIZE {
|
||||||
let header = [
|
let header = [
|
||||||
self.read_buf[0], self.read_buf[1], self.read_buf[2], self.read_buf[3],
|
self.read_buf[base], self.read_buf[base + 1],
|
||||||
self.read_buf[4], self.read_buf[5], self.read_buf[6], self.read_buf[7],
|
self.read_buf[base + 2], self.read_buf[base + 3],
|
||||||
self.read_buf[8],
|
self.read_buf[base + 4], self.read_buf[base + 5],
|
||||||
|
self.read_buf[base + 6], self.read_buf[base + 7],
|
||||||
|
self.read_buf[base + 8],
|
||||||
];
|
];
|
||||||
log::error!(
|
log::error!(
|
||||||
"CORRUPT FRAME HEADER: raw={:02x?} stream_id={} type=0x{:02x} length={}",
|
"CORRUPT FRAME HEADER: raw={:02x?} stream_id={} type=0x{:02x} length={}",
|
||||||
@@ -250,21 +283,23 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let total_frame_size = FRAME_HEADER_SIZE + length as usize;
|
let total_frame_size = FRAME_HEADER_SIZE + length as usize;
|
||||||
if self.read_pos < total_frame_size {
|
if available < total_frame_size {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let payload = self.read_buf[FRAME_HEADER_SIZE..total_frame_size].to_vec();
|
let payload = self.read_buf[base + FRAME_HEADER_SIZE..base + total_frame_size].to_vec();
|
||||||
self.read_buf.drain(..total_frame_size);
|
self.parse_pos += total_frame_size;
|
||||||
self.read_pos -= total_frame_size;
|
|
||||||
|
// Compact when parse_pos > half the data to reclaim memory
|
||||||
|
if self.parse_pos > self.read_pos / 2 && self.parse_pos > 0 {
|
||||||
|
self.read_buf.drain(..self.parse_pos);
|
||||||
|
self.read_pos -= self.parse_pos;
|
||||||
|
self.parse_pos = 0;
|
||||||
|
}
|
||||||
|
|
||||||
Some(Ok(Frame { stream_id, frame_type, payload }))
|
Some(Ok(Frame { stream_id, frame_type, payload }))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_write_work(&self) -> bool {
|
|
||||||
!self.ctrl_queue.is_empty() || !self.data_queue.is_empty()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Poll-based I/O step. Returns Ready on events, Pending when idle.
|
/// Poll-based I/O step. Returns Ready on events, Pending when idle.
|
||||||
///
|
///
|
||||||
/// Order: write(ctrl→data) → flush → read → channels → timers
|
/// Order: write(ctrl→data) → flush → read → channels → timers
|
||||||
@@ -277,49 +312,56 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
cancel_token: &tokio_util::sync::CancellationToken,
|
cancel_token: &tokio_util::sync::CancellationToken,
|
||||||
) -> Poll<TunnelEvent> {
|
) -> Poll<TunnelEvent> {
|
||||||
// 1. WRITE: drain ctrl queue first, then data queue.
|
// 1. WRITE: drain ctrl queue first, then data queue.
|
||||||
// TLS poll_write writes plaintext to session buffer (always Ready).
|
// Write one frame, set flush_needed, then flush must complete before
|
||||||
// Batch up to 16 frames per poll cycle.
|
// writing more. This prevents unbounded TLS session buffer growth.
|
||||||
|
// Safe: `self.write` and `self.stream` are disjoint fields.
|
||||||
let mut writes = 0;
|
let mut writes = 0;
|
||||||
while self.has_write_work() && writes < 16 {
|
while self.write.has_work() && writes < 16 && !self.write.flush_needed {
|
||||||
// Determine which queue to write from and the frame data.
|
let from_ctrl = !self.write.ctrl_queue.is_empty();
|
||||||
// We access the queues via raw pointers to avoid borrow conflicts with self.stream.
|
let frame = if from_ctrl {
|
||||||
let from_ctrl = !self.ctrl_queue.is_empty();
|
self.write.ctrl_queue.front().unwrap()
|
||||||
let frame_ptr: *const Vec<u8> = if from_ctrl {
|
|
||||||
self.ctrl_queue.front().unwrap()
|
|
||||||
} else {
|
} else {
|
||||||
self.data_queue.front().unwrap()
|
self.write.data_queue.front().unwrap()
|
||||||
};
|
};
|
||||||
// SAFETY: the frame is not modified while we hold the pointer — poll_write
|
let remaining = &frame[self.write.offset..];
|
||||||
// only writes to self.stream, and advance_write only runs after poll_write returns.
|
|
||||||
let frame = unsafe { &*frame_ptr };
|
|
||||||
let remaining = &frame[self.write_offset..];
|
|
||||||
|
|
||||||
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
||||||
Poll::Ready(Ok(0)) => {
|
Poll::Ready(Ok(0)) => {
|
||||||
|
log::error!("TunnelIo: poll_write returned 0 (write zero), ctrl_q={} data_q={}",
|
||||||
|
self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
Poll::Ready(Ok(n)) => {
|
Poll::Ready(Ok(n)) => {
|
||||||
self.write_offset += n;
|
self.write.offset += n;
|
||||||
self.flush_needed = true;
|
self.write.flush_needed = true;
|
||||||
if self.write_offset >= frame.len() {
|
if self.write.offset >= frame.len() {
|
||||||
if from_ctrl { self.ctrl_queue.pop_front(); }
|
if from_ctrl { self.write.ctrl_queue.pop_front(); }
|
||||||
else { self.data_queue.pop_front(); }
|
else { self.write.data_queue.pop_front(); }
|
||||||
self.write_offset = 0;
|
self.write.offset = 0;
|
||||||
writes += 1;
|
writes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_write error: {} (ctrl_q={} data_q={})",
|
||||||
|
e, self.write.ctrl_queue.len(), self.write.data_queue.len());
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
||||||
if self.flush_needed {
|
if self.write.flush_needed {
|
||||||
match Pin::new(&mut self.stream).poll_flush(cx) {
|
match Pin::new(&mut self.stream).poll_flush(cx) {
|
||||||
Poll::Ready(Ok(())) => self.flush_needed = false,
|
Poll::Ready(Ok(())) => {
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
self.write.flush_needed = false;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_flush error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => {} // TCP waker will notify us
|
Poll::Pending => {} // TCP waker will notify us
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -329,6 +371,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
// the waker without re-registering it, causing the task to sleep until a
|
// the waker without re-registering it, causing the task to sleep until a
|
||||||
// timer or channel wakes it (potentially 15+ seconds of lost reads).
|
// timer or channel wakes it (potentially 15+ seconds of lost reads).
|
||||||
loop {
|
loop {
|
||||||
|
// Compact if needed to make room for reads
|
||||||
|
if self.parse_pos > 0 && self.read_buf.len() - self.read_pos < 32768 {
|
||||||
|
self.read_buf.drain(..self.parse_pos);
|
||||||
|
self.read_pos -= self.parse_pos;
|
||||||
|
self.parse_pos = 0;
|
||||||
|
}
|
||||||
if self.read_buf.len() < self.read_pos + 32768 {
|
if self.read_buf.len() < self.read_pos + 32768 {
|
||||||
self.read_buf.resize(self.read_pos + 32768, 0);
|
self.read_buf.resize(self.read_pos + 32768, 0);
|
||||||
}
|
}
|
||||||
@@ -349,16 +397,23 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
// Partial data — loop to call poll_read again so the TCP
|
// Partial data — loop to call poll_read again so the TCP
|
||||||
// waker is re-registered when it finally returns Pending.
|
// waker is re-registered when it finally returns Pending.
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::ReadError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_read error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::ReadError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. CHANNELS: drain ctrl into ctrl_queue, data into data_queue.
|
// 4. CHANNELS: drain ctrl (always — priority), data (only if queue is small).
|
||||||
|
// Ctrl frames must never be delayed — always drain fully.
|
||||||
|
// Data frames are gated: keep data in the bounded channel for proper
|
||||||
|
// backpressure when TLS writes are slow. Without this gate, the internal
|
||||||
|
// data_queue (unbounded VecDeque) grows to hundreds of MB under throttle → OOM.
|
||||||
let mut got_new = false;
|
let mut got_new = false;
|
||||||
loop {
|
loop {
|
||||||
match ctrl_rx.poll_recv(cx) {
|
match ctrl_rx.poll_recv(cx) {
|
||||||
Poll::Ready(Some(frame)) => { self.ctrl_queue.push_back(frame); got_new = true; }
|
Poll::Ready(Some(frame)) => { self.write.ctrl_queue.push_back(frame); got_new = true; }
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(None) => {
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "ctrl channel closed"),
|
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "ctrl channel closed"),
|
||||||
@@ -367,15 +422,17 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
if self.write.data_queue.len() < 64 {
|
||||||
match data_rx.poll_recv(cx) {
|
loop {
|
||||||
Poll::Ready(Some(frame)) => { self.data_queue.push_back(frame); got_new = true; }
|
match data_rx.poll_recv(cx) {
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
Poll::Ready(None) => {
|
||||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
));
|
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
Poll::Pending => break,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -387,10 +444,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
return Poll::Ready(TunnelEvent::Cancelled);
|
return Poll::Ready(TunnelEvent::Cancelled);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. SELF-WAKE: only when we have frames AND flush is done.
|
// 6. SELF-WAKE: only when flush is complete AND we have work.
|
||||||
// If flush is pending, the TCP write-readiness waker will notify us.
|
// When flush is Pending, the TCP write-readiness waker will notify us.
|
||||||
// If we got new channel frames, wake to write them.
|
// CRITICAL: do NOT self-wake when flush_needed — poll_write always returns
|
||||||
if got_new || (!self.flush_needed && self.has_write_work()) {
|
// Ready (TLS buffers in-memory), so self-waking causes a tight spin loop
|
||||||
|
// that fills the TLS session buffer unboundedly -> OOM -> ECONNRESET.
|
||||||
|
if !self.write.flush_needed && (got_new || self.write.has_work()) {
|
||||||
cx.waker().wake_by_ref();
|
cx.waker().wake_by_ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -406,6 +465,22 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_frame_header() {
|
||||||
|
let payload = b"hello";
|
||||||
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE + payload.len()];
|
||||||
|
buf[FRAME_HEADER_SIZE..].copy_from_slice(payload);
|
||||||
|
encode_frame_header(&mut buf, 42, FRAME_DATA, payload.len());
|
||||||
|
assert_eq!(buf, encode_frame(42, FRAME_DATA, payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_frame_header_empty_payload() {
|
||||||
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE];
|
||||||
|
encode_frame_header(&mut buf, 99, FRAME_CLOSE, 0);
|
||||||
|
assert_eq!(buf, encode_frame(99, FRAME_CLOSE, &[]));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_frame() {
|
fn test_encode_frame() {
|
||||||
let data = b"hello";
|
let data = b"hello";
|
||||||
|
|||||||
402
test/test.loadtest.node.ts
Normal file
402
test/test.loadtest.node.ts
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as net from 'net';
|
||||||
|
import * as stream from 'stream';
|
||||||
|
import * as crypto from 'crypto';
|
||||||
|
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers (self-contained — same patterns as test.flowcontrol.node.ts)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
async function findFreePorts(count: number): Promise<number[]> {
|
||||||
|
const servers: net.Server[] = [];
|
||||||
|
const ports: number[] = [];
|
||||||
|
for (let i = 0; i < count; i++) {
|
||||||
|
const server = net.createServer();
|
||||||
|
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
|
||||||
|
ports.push((server.address() as net.AddressInfo).port);
|
||||||
|
servers.push(server);
|
||||||
|
}
|
||||||
|
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrackingServer = net.Server & { destroyAll: () => void };
|
||||||
|
|
||||||
|
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((socket) => {
|
||||||
|
connections.add(socket);
|
||||||
|
socket.on('close', () => connections.delete(socket));
|
||||||
|
let proxyHeaderParsed = false;
|
||||||
|
let pendingBuf = Buffer.alloc(0);
|
||||||
|
socket.on('data', (data: Buffer) => {
|
||||||
|
if (!proxyHeaderParsed) {
|
||||||
|
pendingBuf = Buffer.concat([pendingBuf, data]);
|
||||||
|
const idx = pendingBuf.indexOf('\r\n');
|
||||||
|
if (idx !== -1) {
|
||||||
|
proxyHeaderParsed = true;
|
||||||
|
const remainder = pendingBuf.subarray(idx + 2);
|
||||||
|
if (remainder.length > 0) socket.write(remainder);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
socket.write(data);
|
||||||
|
});
|
||||||
|
socket.on('error', () => {});
|
||||||
|
}) as TrackingServer;
|
||||||
|
server.destroyAll = () => {
|
||||||
|
for (const conn of connections) conn.destroy();
|
||||||
|
connections.clear();
|
||||||
|
};
|
||||||
|
server.on('error', reject);
|
||||||
|
server.listen(port, host, () => resolve(server));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
let totalReceived = 0;
|
||||||
|
const expectedLength = data.length;
|
||||||
|
let settled = false;
|
||||||
|
|
||||||
|
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
|
||||||
|
client.write(data);
|
||||||
|
client.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
client.destroy();
|
||||||
|
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
|
||||||
|
}
|
||||||
|
}, timeoutMs);
|
||||||
|
|
||||||
|
client.on('data', (chunk: Buffer) => {
|
||||||
|
chunks.push(chunk);
|
||||||
|
totalReceived += chunk.length;
|
||||||
|
if (totalReceived >= expectedLength && !settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
client.destroy();
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('end', () => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('error', (err) => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
reject(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sha256(buf: Buffer): string {
|
||||||
|
return crypto.createHash('sha256').update(buf).digest('hex');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Throttle Proxy: rate-limits TCP traffic between edge and hub
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class ThrottleTransform extends stream.Transform {
|
||||||
|
private bytesPerSec: number;
|
||||||
|
private bucket: number;
|
||||||
|
private lastRefill: number;
|
||||||
|
private destroyed_: boolean = false;
|
||||||
|
|
||||||
|
constructor(bytesPerSecond: number) {
|
||||||
|
super();
|
||||||
|
this.bytesPerSec = bytesPerSecond;
|
||||||
|
this.bucket = bytesPerSecond;
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: stream.TransformCallback) {
|
||||||
|
if (this.destroyed_) return;
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
const elapsed = (now - this.lastRefill) / 1000;
|
||||||
|
this.bucket = Math.min(this.bytesPerSec, this.bucket + elapsed * this.bytesPerSec);
|
||||||
|
this.lastRefill = now;
|
||||||
|
|
||||||
|
if (chunk.length <= this.bucket) {
|
||||||
|
this.bucket -= chunk.length;
|
||||||
|
callback(null, chunk);
|
||||||
|
} else {
|
||||||
|
// Not enough budget — delay the entire chunk (don't split)
|
||||||
|
const deficit = chunk.length - this.bucket;
|
||||||
|
this.bucket = 0;
|
||||||
|
const delayMs = Math.min((deficit / this.bytesPerSec) * 1000, 1000);
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.destroyed_) { callback(); return; }
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
this.bucket = 0;
|
||||||
|
callback(null, chunk);
|
||||||
|
}, delayMs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_destroy(err: Error | null, callback: (error: Error | null) => void) {
|
||||||
|
this.destroyed_ = true;
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ThrottleProxy {
|
||||||
|
server: net.Server;
|
||||||
|
close: () => Promise<void>;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function startThrottleProxy(
|
||||||
|
listenPort: number,
|
||||||
|
targetHost: string,
|
||||||
|
targetPort: number,
|
||||||
|
bytesPerSecond: number,
|
||||||
|
): Promise<ThrottleProxy> {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((clientSock) => {
|
||||||
|
connections.add(clientSock);
|
||||||
|
const upstream = net.createConnection({ host: targetHost, port: targetPort });
|
||||||
|
connections.add(upstream);
|
||||||
|
|
||||||
|
const throttleUp = new ThrottleTransform(bytesPerSecond);
|
||||||
|
const throttleDown = new ThrottleTransform(bytesPerSecond);
|
||||||
|
|
||||||
|
clientSock.pipe(throttleUp).pipe(upstream);
|
||||||
|
upstream.pipe(throttleDown).pipe(clientSock);
|
||||||
|
|
||||||
|
let cleaned = false;
|
||||||
|
const cleanup = (source: string, err?: Error) => {
|
||||||
|
if (cleaned) return;
|
||||||
|
cleaned = true;
|
||||||
|
if (err) {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source}: ${err.message}`);
|
||||||
|
} else {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source} (no error)`);
|
||||||
|
}
|
||||||
|
console.error(`[ThrottleProxy] stack:`, new Error().stack);
|
||||||
|
throttleUp.destroy();
|
||||||
|
throttleDown.destroy();
|
||||||
|
clientSock.destroy();
|
||||||
|
upstream.destroy();
|
||||||
|
connections.delete(clientSock);
|
||||||
|
connections.delete(upstream);
|
||||||
|
};
|
||||||
|
clientSock.on('error', (e) => cleanup('clientSock.error', e));
|
||||||
|
upstream.on('error', (e) => cleanup('upstream.error', e));
|
||||||
|
throttleUp.on('error', (e) => cleanup('throttleUp.error', e));
|
||||||
|
throttleDown.on('error', (e) => cleanup('throttleDown.error', e));
|
||||||
|
clientSock.on('close', () => cleanup('clientSock.close'));
|
||||||
|
upstream.on('close', () => cleanup('upstream.close'));
|
||||||
|
});
|
||||||
|
|
||||||
|
await new Promise<void>((resolve) => server.listen(listenPort, '127.0.0.1', resolve));
|
||||||
|
return {
|
||||||
|
server,
|
||||||
|
close: async () => {
|
||||||
|
for (const c of connections) c.destroy();
|
||||||
|
connections.clear();
|
||||||
|
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Test state
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let hub: RemoteIngressHub;
|
||||||
|
let edge: RemoteIngressEdge;
|
||||||
|
let echoServer: TrackingServer;
|
||||||
|
let throttle: ThrottleProxy;
|
||||||
|
let hubPort: number;
|
||||||
|
let proxyPort: number;
|
||||||
|
let edgePort: number;
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Tests
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
tap.test('setup: start throttled tunnel (100 Mbit/s)', async () => {
|
||||||
|
[hubPort, proxyPort, edgePort] = await findFreePorts(3);
|
||||||
|
|
||||||
|
echoServer = await startEchoServer(edgePort, '127.0.0.2');
|
||||||
|
|
||||||
|
// Throttle proxy: edge → proxy → hub at 100 Mbit/s (12.5 MB/s)
|
||||||
|
throttle = await startThrottleProxy(proxyPort, '127.0.0.1', hubPort, 12.5 * 1024 * 1024);
|
||||||
|
|
||||||
|
hub = new RemoteIngressHub();
|
||||||
|
edge = new RemoteIngressEdge();
|
||||||
|
|
||||||
|
await hub.start({ tunnelPort: hubPort, targetHost: '127.0.0.2' });
|
||||||
|
await hub.updateAllowedEdges([
|
||||||
|
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const connectedPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000);
|
||||||
|
edge.once('tunnelConnected', () => {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Edge connects through throttle proxy
|
||||||
|
await edge.start({
|
||||||
|
hubHost: '127.0.0.1',
|
||||||
|
hubPort: proxyPort,
|
||||||
|
edgeId: 'test-edge',
|
||||||
|
secret: 'test-secret',
|
||||||
|
bindAddress: '127.0.0.1',
|
||||||
|
});
|
||||||
|
|
||||||
|
await connectedPromise;
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 5 streams x 20MB each through 100Mbit tunnel', async () => {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB total round-trip
|
||||||
|
|
||||||
|
const payloads = Array.from({ length: streamCount }, () => crypto.randomBytes(payloadSize));
|
||||||
|
const promises = payloads.map((data) => {
|
||||||
|
const hash = sha256(data);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((received) => ({
|
||||||
|
sent: hash,
|
||||||
|
received: sha256(received),
|
||||||
|
sizeOk: received.length === payloadSize,
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: slow consumer with 20MB does not kill other streams', async () => {
|
||||||
|
// Open a connection that creates download-direction backpressure:
|
||||||
|
// send 20MB but DON'T read the response — client TCP receive buffer fills
|
||||||
|
const slowSock = net.createConnection({ host: '127.0.0.1', port: edgePort });
|
||||||
|
await new Promise<void>((resolve) => slowSock.on('connect', resolve));
|
||||||
|
const slowData = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
slowSock.write(slowData);
|
||||||
|
slowSock.end();
|
||||||
|
// Don't read — backpressure builds on the download path
|
||||||
|
|
||||||
|
// Wait for backpressure to develop
|
||||||
|
await new Promise((r) => setTimeout(r, 2000));
|
||||||
|
|
||||||
|
// Meanwhile, 5 normal echo streams with 20MB each must complete
|
||||||
|
const payload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const hash = sha256(payload);
|
||||||
|
const promises = Array.from({ length: 5 }, () =>
|
||||||
|
sendAndReceive(edgePort, payload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === payload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.hash !== hash);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
// Tunnel still alive
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
|
||||||
|
slowSock.destroy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: rapid churn — 3 x 20MB long + 50 x 1MB short streams', async () => {
|
||||||
|
// 3 long streams (20MB each) running alongside 50 short streams (1MB each)
|
||||||
|
const longPayload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const longHash = sha256(longPayload);
|
||||||
|
const longPromises = Array.from({ length: 3 }, () =>
|
||||||
|
sendAndReceive(edgePort, longPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === longPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const shortPayload = crypto.randomBytes(1024 * 1024);
|
||||||
|
const shortHash = sha256(shortPayload);
|
||||||
|
const shortPromises = Array.from({ length: 50 }, () =>
|
||||||
|
sendAndReceive(edgePort, shortPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === shortPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const [longResults, shortResults] = await Promise.all([
|
||||||
|
Promise.all(longPromises),
|
||||||
|
Promise.all(shortPromises),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const longFails = longResults.filter((r) => !r.sizeOk || r.hash !== longHash);
|
||||||
|
const shortFails = shortResults.filter((r) => !r.sizeOk || r.hash !== shortHash);
|
||||||
|
expect(longFails.length).toEqual(0);
|
||||||
|
expect(shortFails.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 3 burst waves of 5 streams x 20MB each', async () => {
|
||||||
|
for (let wave = 0; wave < 3; wave++) {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB per wave
|
||||||
|
|
||||||
|
const promises = Array.from({ length: streamCount }, () => {
|
||||||
|
const data = crypto.randomBytes(payloadSize);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((r) => r.length === payloadSize);
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const ok = results.filter(Boolean).length;
|
||||||
|
expect(ok).toEqual(streamCount);
|
||||||
|
|
||||||
|
// Brief pause between waves
|
||||||
|
await new Promise((r) => setTimeout(r, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: tunnel still works after all load tests', async () => {
|
||||||
|
const data = crypto.randomBytes(1024);
|
||||||
|
const hash = sha256(data);
|
||||||
|
const received = await sendAndReceive(edgePort, data, 30000);
|
||||||
|
expect(sha256(received)).toEqual(hash);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('teardown: stop tunnel', async () => {
|
||||||
|
await edge.stop();
|
||||||
|
await hub.stop();
|
||||||
|
if (throttle) await throttle.close();
|
||||||
|
await new Promise<void>((resolve) => echoServer.close(() => resolve()));
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@serve.zone/remoteingress',
|
name: '@serve.zone/remoteingress',
|
||||||
version: '4.8.2',
|
version: '4.8.16',
|
||||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user