Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d89d1cfbbf | |||
| 6cbe8bee5e | |||
| a63247af3e | |||
| 28a0c769d9 | |||
| ce7ccd83dc | |||
| 93578d7034 | |||
| 4cfc518301 | |||
| 124df129ec | |||
| 0b8420aac9 | |||
| afd193336a | |||
| e8d429f117 | |||
| 3c2299430a | |||
| 8b5df9a0b7 | |||
| 236d6d16ee | |||
| 81bbb33016 | |||
| 79af6fd425 | |||
| f71b2f1876 | |||
| 0161a2589c | |||
| bfd9e58b4f | |||
| 9a8760c18d | |||
| c77caa89fc | |||
| 04586aab39 | |||
| f9a739858d | |||
| da01fbeecd | |||
| 264e8eeb97 |
76
changelog.md
76
changelog.md
@@ -1,5 +1,81 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-03-18 - 4.9.0 - feat(protocol)
|
||||||
|
add sustained-stream tunnel scheduling to isolate high-throughput traffic
|
||||||
|
|
||||||
|
- Introduce a third low-priority sustained queue in TunnelIo with a forced drain budget to prevent long-lived high-bandwidth streams from starving control and normal data frames.
|
||||||
|
- Classify upload and download streams as sustained after exceeding the throughput threshold for the minimum duration, and route their DATA and CLOSE frames through the sustained channel.
|
||||||
|
- Wire the new sustained channel through edge and hub stream handling so sustained traffic is scheduled consistently on both sides of the tunnel.
|
||||||
|
|
||||||
|
## 2026-03-18 - 4.8.19 - fix(remoteingress-protocol)
|
||||||
|
reduce per-stream flow control windows and increase control channel buffering
|
||||||
|
|
||||||
|
- Lower the initial and maximum per-stream window from 16MB to 4MB and scale adaptive windows against a 200MB total budget with a 1MB minimum.
|
||||||
|
- Increase edge and hub control frame channel capacity from 256 to 512 to better handle prioritized control traffic.
|
||||||
|
- Update flow-control tests and comments to reflect the new window sizing and budget behavior.
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.18 - fix(rust-protocol)
|
||||||
|
switch tunnel frame buffers from Vec<u8> to Bytes to reduce copying and memory overhead
|
||||||
|
|
||||||
|
- Add the bytes crate to core and protocol crates
|
||||||
|
- Update frame encoding, reader payloads, channel queues, and stream backchannels to use Bytes
|
||||||
|
- Adjust edge and hub data/control paths to send framed payloads as Bytes
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.17 - fix(protocol)
|
||||||
|
increase per-stream flow control windows and remove adaptive read caps
|
||||||
|
|
||||||
|
- Raise the initial per-stream window from 4MB to 16MB and expand the adaptive window budget to 800MB with a 4MB floor
|
||||||
|
- Stop limiting edge and hub reads by the adaptive per-stream target window, keeping reads capped only by the current window and 32KB chunk size
|
||||||
|
- Update protocol tests to match the new adaptive window scaling and budget boundaries
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.16 - fix(release)
|
||||||
|
bump package version to 4.8.15
|
||||||
|
|
||||||
|
- Updates the package.json version field from 4.8.13 to 4.8.15.
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.13 - fix(remoteingress-protocol)
|
||||||
|
require a flush after each written frame to bound TLS buffer growth
|
||||||
|
|
||||||
|
- Remove the unflushed byte threshold and stop queueing additional writes while a flush is pending
|
||||||
|
- Simplify write and flush error logging after dropping unflushed byte tracking
|
||||||
|
- Update tunnel I/O comments to reflect the stricter flush behavior that avoids OOM and connection resets
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.12 - fix(tunnel)
|
||||||
|
prevent tunnel backpressure buffering from exhausting memory and cancel stream handlers before TLS shutdown
|
||||||
|
|
||||||
|
- stop self-waking and writing new frames while a flush is pending to avoid unbounded TLS session buffer growth under load
|
||||||
|
- reorder edge and hub shutdown cleanup so stream cancellation happens before TLS close_notify, preventing handlers from blocking on dead channels
|
||||||
|
- add load tests covering sustained large transfers, burst traffic, and rapid stream churn to verify tunnel stability
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.11 - fix(remoteingress-core)
|
||||||
|
stop data frame send loops promptly when stream cancellation is triggered
|
||||||
|
|
||||||
|
- Use cancellation-aware tokio::select! around data channel sends in both edge and hub stream forwarding paths
|
||||||
|
- Prevent stalled or noisy shutdown behavior when stream or client cancellation happens while awaiting frame delivery
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.10 - fix(remoteingress-core)
|
||||||
|
guard tunnel frame sends with cancellation to prevent async send deadlocks
|
||||||
|
|
||||||
|
- Wrap OPEN, CLOSE, CLOSE_BACK, WINDOW_UPDATE, and cleanup channel sends in cancellation-aware tokio::select! blocks.
|
||||||
|
- Avoid indefinite blocking when tunnel, stream, or writer tasks are cancelled while awaiting channel capacity.
|
||||||
|
- Improve shutdown reliability for edge and hub stream handling under tunnel failure conditions.
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.9 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.8 - fix(remoteingress-core)
|
||||||
|
cancel stale edge connections when an edge reconnects
|
||||||
|
|
||||||
|
- Remove any existing edge entry before registering a reconnected edge
|
||||||
|
- Trigger the previous connection's cancellation token so stale sessions shut down immediately instead of waiting for TCP keepalive
|
||||||
|
|
||||||
|
## 2026-03-17 - 4.8.7 - fix(remoteingress-core)
|
||||||
|
perform graceful TLS shutdown on edge and hub tunnel streams
|
||||||
|
|
||||||
|
- Send TLS close_notify before cleanup to avoid peer disconnect warnings on both tunnel endpoints
|
||||||
|
- Wrap stream shutdown in a 2 second timeout so connection teardown does not block cleanup
|
||||||
|
|
||||||
## 2026-03-17 - 4.8.6 - fix(remoteingress-core)
|
## 2026-03-17 - 4.8.6 - fix(remoteingress-core)
|
||||||
initialize disconnect reason only when set in hub loop break paths
|
initialize disconnect reason only when set in hub loop break paths
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@serve.zone/remoteingress",
|
"name": "@serve.zone/remoteingress",
|
||||||
"version": "4.8.6",
|
"version": "4.9.0",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
|
|||||||
2
rust/Cargo.lock
generated
2
rust/Cargo.lock
generated
@@ -551,6 +551,7 @@ dependencies = [
|
|||||||
name = "remoteingress-core"
|
name = "remoteingress-core"
|
||||||
version = "2.0.0"
|
version = "2.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
"log",
|
"log",
|
||||||
"rcgen",
|
"rcgen",
|
||||||
"remoteingress-protocol",
|
"remoteingress-protocol",
|
||||||
@@ -568,6 +569,7 @@ dependencies = [
|
|||||||
name = "remoteingress-protocol"
|
name = "remoteingress-protocol"
|
||||||
version = "2.0.0"
|
version = "2.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
"log",
|
"log",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ edition = "2021"
|
|||||||
remoteingress-protocol = { path = "../remoteingress-protocol" }
|
remoteingress-protocol = { path = "../remoteingress-protocol" }
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
tokio-rustls = "0.26"
|
tokio-rustls = "0.26"
|
||||||
|
bytes = "1"
|
||||||
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
rustls = { version = "0.23", default-features = false, features = ["ring", "logging", "std", "tls12"] }
|
||||||
rcgen = "0.13"
|
rcgen = "0.13"
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ use tokio_rustls::TlsConnector;
|
|||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use remoteingress_protocol::*;
|
use remoteingress_protocol::*;
|
||||||
|
|
||||||
type EdgeTlsStream = tokio_rustls::client::TlsStream<TcpStream>;
|
type EdgeTlsStream = tokio_rustls::client::TlsStream<TcpStream>;
|
||||||
@@ -26,7 +27,7 @@ enum EdgeFrameAction {
|
|||||||
struct EdgeStreamState {
|
struct EdgeStreamState {
|
||||||
/// Unbounded channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
|
/// Unbounded channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
|
||||||
/// Unbounded because flow control (WINDOW_UPDATE) already limits bytes-in-flight.
|
/// Unbounded because flow control (WINDOW_UPDATE) already limits bytes-in-flight.
|
||||||
back_tx: mpsc::UnboundedSender<Vec<u8>>,
|
back_tx: mpsc::UnboundedSender<Bytes>,
|
||||||
/// Send window for FRAME_DATA (upload direction).
|
/// Send window for FRAME_DATA (upload direction).
|
||||||
/// Decremented by the client reader, incremented by FRAME_WINDOW_UPDATE_BACK from hub.
|
/// Decremented by the client reader, incremented by FRAME_WINDOW_UPDATE_BACK from hub.
|
||||||
send_window: Arc<AtomicU32>,
|
send_window: Arc<AtomicU32>,
|
||||||
@@ -290,8 +291,9 @@ async fn handle_edge_frame(
|
|||||||
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||||
listen_ports: &Arc<RwLock<Vec<u16>>>,
|
listen_ports: &Arc<RwLock<Vec<u16>>>,
|
||||||
event_tx: &mpsc::Sender<EdgeEvent>,
|
event_tx: &mpsc::Sender<EdgeEvent>,
|
||||||
tunnel_writer_tx: &mpsc::Sender<Vec<u8>>,
|
tunnel_writer_tx: &mpsc::Sender<Bytes>,
|
||||||
tunnel_data_tx: &mpsc::Sender<Vec<u8>>,
|
tunnel_data_tx: &mpsc::Sender<Bytes>,
|
||||||
|
tunnel_sustained_tx: &mpsc::Sender<Bytes>,
|
||||||
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
||||||
active_streams: &Arc<AtomicU32>,
|
active_streams: &Arc<AtomicU32>,
|
||||||
next_stream_id: &Arc<AtomicU32>,
|
next_stream_id: &Arc<AtomicU32>,
|
||||||
@@ -342,6 +344,7 @@ async fn handle_edge_frame(
|
|||||||
port_listeners,
|
port_listeners,
|
||||||
tunnel_writer_tx,
|
tunnel_writer_tx,
|
||||||
tunnel_data_tx,
|
tunnel_data_tx,
|
||||||
|
tunnel_sustained_tx,
|
||||||
client_writers,
|
client_writers,
|
||||||
active_streams,
|
active_streams,
|
||||||
next_stream_id,
|
next_stream_id,
|
||||||
@@ -496,8 +499,9 @@ async fn connect_to_hub_and_run(
|
|||||||
|
|
||||||
// QoS dual-channel: ctrl frames have priority over data frames.
|
// QoS dual-channel: ctrl frames have priority over data frames.
|
||||||
// Stream handlers send through these channels → TunnelIo drains them.
|
// Stream handlers send through these channels → TunnelIo drains them.
|
||||||
let (tunnel_ctrl_tx, mut tunnel_ctrl_rx) = mpsc::channel::<Vec<u8>>(256);
|
let (tunnel_ctrl_tx, mut tunnel_ctrl_rx) = mpsc::channel::<Bytes>(512);
|
||||||
let (tunnel_data_tx, mut tunnel_data_rx) = mpsc::channel::<Vec<u8>>(4096);
|
let (tunnel_data_tx, mut tunnel_data_rx) = mpsc::channel::<Bytes>(4096);
|
||||||
|
let (tunnel_sustained_tx, mut tunnel_sustained_rx) = mpsc::channel::<Bytes>(4096);
|
||||||
let tunnel_writer_tx = tunnel_ctrl_tx.clone();
|
let tunnel_writer_tx = tunnel_ctrl_tx.clone();
|
||||||
|
|
||||||
// Start TCP listeners for initial ports
|
// Start TCP listeners for initial ports
|
||||||
@@ -508,6 +512,7 @@ async fn connect_to_hub_and_run(
|
|||||||
&mut port_listeners,
|
&mut port_listeners,
|
||||||
&tunnel_writer_tx,
|
&tunnel_writer_tx,
|
||||||
&tunnel_data_tx,
|
&tunnel_data_tx,
|
||||||
|
&tunnel_sustained_tx,
|
||||||
&client_writers,
|
&client_writers,
|
||||||
active_streams,
|
active_streams,
|
||||||
next_stream_id,
|
next_stream_id,
|
||||||
@@ -519,6 +524,7 @@ async fn connect_to_hub_and_run(
|
|||||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||||
|
|
||||||
|
|
||||||
let liveness_timeout_dur = Duration::from_secs(45);
|
let liveness_timeout_dur = Duration::from_secs(45);
|
||||||
let mut last_activity = Instant::now();
|
let mut last_activity = Instant::now();
|
||||||
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
|
||||||
@@ -538,7 +544,7 @@ async fn connect_to_hub_and_run(
|
|||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||||
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||||
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
&tunnel_writer_tx, &tunnel_data_tx, &tunnel_sustained_tx, &mut port_listeners,
|
||||||
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||||
).await {
|
).await {
|
||||||
break 'io_loop EdgeLoopResult::Reconnect(reason);
|
break 'io_loop EdgeLoopResult::Reconnect(reason);
|
||||||
@@ -547,7 +553,7 @@ async fn connect_to_hub_and_run(
|
|||||||
|
|
||||||
// Poll I/O: write(ctrl→data), flush, read, channels, timers
|
// Poll I/O: write(ctrl→data), flush, read, channels, timers
|
||||||
let event = std::future::poll_fn(|cx| {
|
let event = std::future::poll_fn(|cx| {
|
||||||
tunnel_io.poll_step(cx, &mut tunnel_ctrl_rx, &mut tunnel_data_rx, &mut liveness_deadline, connection_token)
|
tunnel_io.poll_step(cx, &mut tunnel_ctrl_rx, &mut tunnel_data_rx, &mut tunnel_sustained_rx, &mut liveness_deadline, connection_token)
|
||||||
}).await;
|
}).await;
|
||||||
|
|
||||||
match event {
|
match event {
|
||||||
@@ -556,7 +562,7 @@ async fn connect_to_hub_and_run(
|
|||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||||
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||||
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
&tunnel_writer_tx, &tunnel_data_tx, &tunnel_sustained_tx, &mut port_listeners,
|
||||||
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||||
).await {
|
).await {
|
||||||
break EdgeLoopResult::Reconnect(reason);
|
break EdgeLoopResult::Reconnect(reason);
|
||||||
@@ -587,13 +593,23 @@ async fn connect_to_hub_and_run(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Cleanup
|
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||||
|
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||||
|
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||||
connection_token.cancel();
|
connection_token.cancel();
|
||||||
stun_handle.abort();
|
stun_handle.abort();
|
||||||
for (_, h) in port_listeners.drain() {
|
for (_, h) in port_listeners.drain() {
|
||||||
h.abort();
|
h.abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Graceful TLS shutdown: send close_notify so the hub sees a clean disconnect.
|
||||||
|
// Stream handlers are already cancelled, so no new data is being produced.
|
||||||
|
let mut tls_stream = tunnel_io.into_inner();
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
tls_stream.shutdown(),
|
||||||
|
).await;
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -601,8 +617,9 @@ async fn connect_to_hub_and_run(
|
|||||||
fn apply_port_config(
|
fn apply_port_config(
|
||||||
new_ports: &[u16],
|
new_ports: &[u16],
|
||||||
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
||||||
tunnel_ctrl_tx: &mpsc::Sender<Vec<u8>>,
|
tunnel_ctrl_tx: &mpsc::Sender<Bytes>,
|
||||||
tunnel_data_tx: &mpsc::Sender<Vec<u8>>,
|
tunnel_data_tx: &mpsc::Sender<Bytes>,
|
||||||
|
tunnel_sustained_tx: &mpsc::Sender<Bytes>,
|
||||||
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||||
active_streams: &Arc<AtomicU32>,
|
active_streams: &Arc<AtomicU32>,
|
||||||
next_stream_id: &Arc<AtomicU32>,
|
next_stream_id: &Arc<AtomicU32>,
|
||||||
@@ -625,6 +642,7 @@ fn apply_port_config(
|
|||||||
for &port in new_set.difference(&old_set) {
|
for &port in new_set.difference(&old_set) {
|
||||||
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
|
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
|
||||||
let tunnel_data_tx = tunnel_data_tx.clone();
|
let tunnel_data_tx = tunnel_data_tx.clone();
|
||||||
|
let tunnel_sustained_tx = tunnel_sustained_tx.clone();
|
||||||
let client_writers = client_writers.clone();
|
let client_writers = client_writers.clone();
|
||||||
let active_streams = active_streams.clone();
|
let active_streams = active_streams.clone();
|
||||||
let next_stream_id = next_stream_id.clone();
|
let next_stream_id = next_stream_id.clone();
|
||||||
@@ -659,6 +677,7 @@ fn apply_port_config(
|
|||||||
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
|
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
|
||||||
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
|
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
|
||||||
let tunnel_data_tx = tunnel_data_tx.clone();
|
let tunnel_data_tx = tunnel_data_tx.clone();
|
||||||
|
let tunnel_sustained_tx = tunnel_sustained_tx.clone();
|
||||||
let client_writers = client_writers.clone();
|
let client_writers = client_writers.clone();
|
||||||
let active_streams = active_streams.clone();
|
let active_streams = active_streams.clone();
|
||||||
let edge_id = edge_id.clone();
|
let edge_id = edge_id.clone();
|
||||||
@@ -675,6 +694,7 @@ fn apply_port_config(
|
|||||||
&edge_id,
|
&edge_id,
|
||||||
tunnel_ctrl_tx,
|
tunnel_ctrl_tx,
|
||||||
tunnel_data_tx,
|
tunnel_data_tx,
|
||||||
|
tunnel_sustained_tx,
|
||||||
client_writers,
|
client_writers,
|
||||||
client_token,
|
client_token,
|
||||||
Arc::clone(&active_streams),
|
Arc::clone(&active_streams),
|
||||||
@@ -716,8 +736,9 @@ async fn handle_client_connection(
|
|||||||
stream_id: u32,
|
stream_id: u32,
|
||||||
dest_port: u16,
|
dest_port: u16,
|
||||||
edge_id: &str,
|
edge_id: &str,
|
||||||
tunnel_ctrl_tx: mpsc::Sender<Vec<u8>>,
|
tunnel_ctrl_tx: mpsc::Sender<Bytes>,
|
||||||
tunnel_data_tx: mpsc::Sender<Vec<u8>>,
|
tunnel_data_tx: mpsc::Sender<Bytes>,
|
||||||
|
tunnel_sustained_tx: mpsc::Sender<Bytes>,
|
||||||
client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||||
client_token: CancellationToken,
|
client_token: CancellationToken,
|
||||||
active_streams: Arc<AtomicU32>,
|
active_streams: Arc<AtomicU32>,
|
||||||
@@ -731,16 +752,20 @@ async fn handle_client_connection(
|
|||||||
// Send OPEN frame with PROXY v1 header via control channel
|
// Send OPEN frame with PROXY v1 header via control channel
|
||||||
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
||||||
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
||||||
if tunnel_ctrl_tx.send(open_frame).await.is_err() {
|
let send_ok = tokio::select! {
|
||||||
|
result = tunnel_ctrl_tx.send(open_frame) => result.is_ok(),
|
||||||
|
_ = client_token.cancelled() => false,
|
||||||
|
};
|
||||||
|
if !send_ok {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Per-stream unbounded back-channel. Flow control (WINDOW_UPDATE) limits
|
// Per-stream unbounded back-channel. Flow control (WINDOW_UPDATE) limits
|
||||||
// bytes-in-flight, so this won't grow unbounded. Unbounded avoids killing
|
// bytes-in-flight, so this won't grow unbounded. Unbounded avoids killing
|
||||||
// streams due to channel overflow — backpressure slows streams, never kills them.
|
// streams due to channel overflow — backpressure slows streams, never kills them.
|
||||||
let (back_tx, mut back_rx) = mpsc::unbounded_channel::<Vec<u8>>();
|
let (back_tx, mut back_rx) = mpsc::unbounded_channel::<Bytes>();
|
||||||
// Adaptive initial window: scale with current stream count to keep total in-flight
|
// Adaptive initial window: scale with current stream count to keep total in-flight
|
||||||
// data within the 32MB budget. Prevents burst flooding when many streams open.
|
// data within the 200MB budget. Prevents burst flooding when many streams open.
|
||||||
let initial_window = remoteingress_protocol::compute_window_for_stream_count(
|
let initial_window = remoteingress_protocol::compute_window_for_stream_count(
|
||||||
active_streams.load(Ordering::Relaxed),
|
active_streams.load(Ordering::Relaxed),
|
||||||
);
|
);
|
||||||
@@ -806,7 +831,10 @@ async fn handle_client_connection(
|
|||||||
// Send final window update for any remaining consumed bytes
|
// Send final window update for any remaining consumed bytes
|
||||||
if consumed_since_update > 0 {
|
if consumed_since_update > 0 {
|
||||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
||||||
let _ = wu_tx.send(frame).await;
|
tokio::select! {
|
||||||
|
_ = wu_tx.send(frame) => {}
|
||||||
|
_ = hub_to_client_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let _ = client_write.shutdown().await;
|
let _ = client_write.shutdown().await;
|
||||||
});
|
});
|
||||||
@@ -814,6 +842,9 @@ async fn handle_client_connection(
|
|||||||
// Task: client -> hub (upload direction) with per-stream flow control.
|
// Task: client -> hub (upload direction) with per-stream flow control.
|
||||||
// Zero-copy: read payload directly after the header, then prepend header.
|
// Zero-copy: read payload directly after the header, then prepend header.
|
||||||
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
||||||
|
let mut stream_bytes_sent: u64 = 0;
|
||||||
|
let stream_start = tokio::time::Instant::now();
|
||||||
|
let mut is_sustained = false;
|
||||||
loop {
|
loop {
|
||||||
// Wait for send window to have capacity (with stall timeout).
|
// Wait for send window to have capacity (with stall timeout).
|
||||||
// Safe pattern: register notified BEFORE checking the condition
|
// Safe pattern: register notified BEFORE checking the condition
|
||||||
@@ -844,11 +875,7 @@ async fn handle_client_connection(
|
|||||||
log::warn!("Stream {} upload: window still 0 after stall timeout, closing", stream_id);
|
log::warn!("Stream {} upload: window still 0 after stall timeout, closing", stream_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Adaptive: cap read to current per-stream target window
|
let max_read = w.min(32768);
|
||||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
|
||||||
active_streams.load(Ordering::Relaxed),
|
|
||||||
) as usize;
|
|
||||||
let max_read = w.min(32768).min(adaptive_cap);
|
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
read_result = client_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
read_result = client_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
||||||
@@ -857,11 +884,25 @@ async fn handle_client_connection(
|
|||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||||
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
|
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
|
||||||
let data_frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
let data_frame = Bytes::copy_from_slice(&buf[..FRAME_HEADER_SIZE + n]);
|
||||||
if tunnel_data_tx.send(data_frame).await.is_err() {
|
// Sustained classification: >2.5 MB/s for >10 seconds
|
||||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
stream_bytes_sent += n as u64;
|
||||||
break;
|
if !is_sustained {
|
||||||
|
let elapsed = stream_start.elapsed().as_secs();
|
||||||
|
if elapsed >= remoteingress_protocol::SUSTAINED_MIN_DURATION_SECS
|
||||||
|
&& stream_bytes_sent / elapsed >= remoteingress_protocol::SUSTAINED_THRESHOLD_BPS
|
||||||
|
{
|
||||||
|
is_sustained = true;
|
||||||
|
log::debug!("Stream {} classified as sustained (upload, {} bytes in {}s)",
|
||||||
|
stream_id, stream_bytes_sent, elapsed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
let tx = if is_sustained { &tunnel_sustained_tx } else { &tunnel_data_tx };
|
||||||
|
let sent = tokio::select! {
|
||||||
|
result = tx.send(data_frame) => result.is_ok(),
|
||||||
|
_ = client_token.cancelled() => false,
|
||||||
|
};
|
||||||
|
if !sent { break; }
|
||||||
}
|
}
|
||||||
Err(_) => break,
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
@@ -882,9 +923,14 @@ async fn handle_client_connection(
|
|||||||
).await;
|
).await;
|
||||||
|
|
||||||
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
||||||
|
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
|
||||||
if !client_token.is_cancelled() {
|
if !client_token.is_cancelled() {
|
||||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||||
let _ = tunnel_data_tx.send(close_frame).await;
|
let tx = if is_sustained { &tunnel_sustained_tx } else { &tunnel_data_tx };
|
||||||
|
tokio::select! {
|
||||||
|
_ = tx.send(close_frame) => {}
|
||||||
|
_ = client_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up
|
// Clean up
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ use tokio_rustls::TlsAcceptor;
|
|||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use remoteingress_protocol::*;
|
use remoteingress_protocol::*;
|
||||||
|
|
||||||
type HubTlsStream = tokio_rustls::server::TlsStream<TcpStream>;
|
type HubTlsStream = tokio_rustls::server::TlsStream<TcpStream>;
|
||||||
@@ -26,7 +27,7 @@ struct HubStreamState {
|
|||||||
/// Unbounded channel to deliver FRAME_DATA payloads to the upstream writer task.
|
/// Unbounded channel to deliver FRAME_DATA payloads to the upstream writer task.
|
||||||
/// Unbounded because flow control (WINDOW_UPDATE) already limits bytes-in-flight.
|
/// Unbounded because flow control (WINDOW_UPDATE) already limits bytes-in-flight.
|
||||||
/// A bounded channel would kill streams instead of applying backpressure.
|
/// A bounded channel would kill streams instead of applying backpressure.
|
||||||
data_tx: mpsc::UnboundedSender<Vec<u8>>,
|
data_tx: mpsc::UnboundedSender<Bytes>,
|
||||||
/// Cancellation token for this stream.
|
/// Cancellation token for this stream.
|
||||||
cancel_token: CancellationToken,
|
cancel_token: CancellationToken,
|
||||||
/// Send window for FRAME_DATA_BACK (download direction).
|
/// Send window for FRAME_DATA_BACK (download direction).
|
||||||
@@ -136,7 +137,7 @@ struct ConnectedEdgeInfo {
|
|||||||
peer_addr: String,
|
peer_addr: String,
|
||||||
edge_stream_count: Arc<AtomicU32>,
|
edge_stream_count: Arc<AtomicU32>,
|
||||||
config_tx: mpsc::Sender<EdgeConfigUpdate>,
|
config_tx: mpsc::Sender<EdgeConfigUpdate>,
|
||||||
#[allow(dead_code)] // kept alive for Drop — cancels child tokens when edge is removed
|
/// Used to cancel the old connection when an edge reconnects.
|
||||||
cancel_token: CancellationToken,
|
cancel_token: CancellationToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,8 +308,9 @@ async fn handle_hub_frame(
|
|||||||
edge_stream_count: &Arc<AtomicU32>,
|
edge_stream_count: &Arc<AtomicU32>,
|
||||||
edge_id: &str,
|
edge_id: &str,
|
||||||
event_tx: &mpsc::Sender<HubEvent>,
|
event_tx: &mpsc::Sender<HubEvent>,
|
||||||
ctrl_tx: &mpsc::Sender<Vec<u8>>,
|
ctrl_tx: &mpsc::Sender<Bytes>,
|
||||||
data_tx: &mpsc::Sender<Vec<u8>>,
|
data_tx: &mpsc::Sender<Bytes>,
|
||||||
|
sustained_tx: &mpsc::Sender<Bytes>,
|
||||||
target_host: &str,
|
target_host: &str,
|
||||||
edge_token: &CancellationToken,
|
edge_token: &CancellationToken,
|
||||||
cleanup_tx: &mpsc::Sender<u32>,
|
cleanup_tx: &mpsc::Sender<u32>,
|
||||||
@@ -337,6 +339,7 @@ async fn handle_hub_frame(
|
|||||||
let cleanup = cleanup_tx.clone();
|
let cleanup = cleanup_tx.clone();
|
||||||
let writer_tx = ctrl_tx.clone(); // control: CLOSE_BACK, WINDOW_UPDATE_BACK
|
let writer_tx = ctrl_tx.clone(); // control: CLOSE_BACK, WINDOW_UPDATE_BACK
|
||||||
let data_writer_tx = data_tx.clone(); // data: DATA_BACK
|
let data_writer_tx = data_tx.clone(); // data: DATA_BACK
|
||||||
|
let sustained_writer_tx = sustained_tx.clone(); // sustained: DATA_BACK from elephant flows
|
||||||
let target = target_host.to_string();
|
let target = target_host.to_string();
|
||||||
let stream_token = edge_token.child_token();
|
let stream_token = edge_token.child_token();
|
||||||
|
|
||||||
@@ -346,9 +349,9 @@ async fn handle_hub_frame(
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Create channel for data from edge to this stream
|
// Create channel for data from edge to this stream
|
||||||
let (stream_data_tx, mut stream_data_rx) = mpsc::unbounded_channel::<Vec<u8>>();
|
let (stream_data_tx, mut stream_data_rx) = mpsc::unbounded_channel::<Bytes>();
|
||||||
// Adaptive initial window: scale with current stream count
|
// Adaptive initial window: scale with current stream count
|
||||||
// to keep total in-flight data within the 32MB budget.
|
// to keep total in-flight data within the 200MB budget.
|
||||||
let initial_window = compute_window_for_stream_count(
|
let initial_window = compute_window_for_stream_count(
|
||||||
edge_stream_count.load(Ordering::Relaxed),
|
edge_stream_count.load(Ordering::Relaxed),
|
||||||
);
|
);
|
||||||
@@ -445,7 +448,10 @@ async fn handle_hub_frame(
|
|||||||
// Send final window update for remaining consumed bytes
|
// Send final window update for remaining consumed bytes
|
||||||
if consumed_since_update > 0 {
|
if consumed_since_update > 0 {
|
||||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
|
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
|
||||||
let _ = wub_tx.send(frame).await;
|
tokio::select! {
|
||||||
|
_ = wub_tx.send(frame) => {}
|
||||||
|
_ = writer_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let _ = up_write.shutdown().await;
|
let _ = up_write.shutdown().await;
|
||||||
});
|
});
|
||||||
@@ -454,6 +460,9 @@ async fn handle_hub_frame(
|
|||||||
// with per-stream flow control (check send_window before reading).
|
// with per-stream flow control (check send_window before reading).
|
||||||
// Zero-copy: read payload directly after the header, then prepend header.
|
// Zero-copy: read payload directly after the header, then prepend header.
|
||||||
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
||||||
|
let mut dl_bytes_sent: u64 = 0;
|
||||||
|
let dl_start = tokio::time::Instant::now();
|
||||||
|
let mut is_sustained = false;
|
||||||
loop {
|
loop {
|
||||||
// Wait for send window to have capacity (with stall timeout).
|
// Wait for send window to have capacity (with stall timeout).
|
||||||
// Safe pattern: register notified BEFORE checking the condition
|
// Safe pattern: register notified BEFORE checking the condition
|
||||||
@@ -484,11 +493,7 @@ async fn handle_hub_frame(
|
|||||||
log::warn!("Stream {} download: window still 0 after stall timeout, closing", stream_id);
|
log::warn!("Stream {} download: window still 0 after stall timeout, closing", stream_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Adaptive: cap read to current per-stream target window
|
let max_read = w.min(32768);
|
||||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
|
||||||
stream_counter.load(Ordering::Relaxed),
|
|
||||||
) as usize;
|
|
||||||
let max_read = w.min(32768).min(adaptive_cap);
|
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
read_result = up_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
read_result = up_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
||||||
@@ -497,11 +502,25 @@ async fn handle_hub_frame(
|
|||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||||
encode_frame_header(&mut buf, stream_id, FRAME_DATA_BACK, n);
|
encode_frame_header(&mut buf, stream_id, FRAME_DATA_BACK, n);
|
||||||
let frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
let frame = Bytes::copy_from_slice(&buf[..FRAME_HEADER_SIZE + n]);
|
||||||
if data_writer_tx.send(frame).await.is_err() {
|
// Sustained classification: >2.5 MB/s for >10 seconds
|
||||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
dl_bytes_sent += n as u64;
|
||||||
break;
|
if !is_sustained {
|
||||||
|
let elapsed = dl_start.elapsed().as_secs();
|
||||||
|
if elapsed >= remoteingress_protocol::SUSTAINED_MIN_DURATION_SECS
|
||||||
|
&& dl_bytes_sent / elapsed >= remoteingress_protocol::SUSTAINED_THRESHOLD_BPS
|
||||||
|
{
|
||||||
|
is_sustained = true;
|
||||||
|
log::debug!("Stream {} classified as sustained (download, {} bytes in {}s)",
|
||||||
|
stream_id, dl_bytes_sent, elapsed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
let tx = if is_sustained { &sustained_writer_tx } else { &data_writer_tx };
|
||||||
|
let sent = tokio::select! {
|
||||||
|
result = tx.send(frame) => result.is_ok(),
|
||||||
|
_ = stream_token.cancelled() => false,
|
||||||
|
};
|
||||||
|
if !sent { break; }
|
||||||
}
|
}
|
||||||
Err(_) => break,
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
@@ -510,11 +529,15 @@ async fn handle_hub_frame(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send CLOSE_BACK via DATA channel (must arrive AFTER last DATA_BACK).
|
// Send CLOSE_BACK via same channel as DATA_BACK (must arrive AFTER last DATA_BACK).
|
||||||
// Use send().await to guarantee delivery (try_send silently drops if full).
|
// select! with cancellation guard prevents indefinite blocking if tunnel dies.
|
||||||
if !stream_token.is_cancelled() {
|
if !stream_token.is_cancelled() {
|
||||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||||
let _ = data_writer_tx.send(close_frame).await;
|
let tx = if is_sustained { &sustained_writer_tx } else { &data_writer_tx };
|
||||||
|
tokio::select! {
|
||||||
|
_ = tx.send(close_frame) => {}
|
||||||
|
_ = stream_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writer_for_edge_data.abort();
|
writer_for_edge_data.abort();
|
||||||
@@ -524,16 +547,24 @@ async fn handle_hub_frame(
|
|||||||
|
|
||||||
if let Err(e) = result {
|
if let Err(e) = result {
|
||||||
log::error!("Stream {} error: {}", stream_id, e);
|
log::error!("Stream {} error: {}", stream_id, e);
|
||||||
// Send CLOSE_BACK via DATA channel on error (must arrive after any DATA_BACK).
|
// Send CLOSE_BACK on error (must arrive after any DATA_BACK).
|
||||||
// Use send().await to guarantee delivery.
|
// Error path: is_sustained not available here, use data channel (safe —
|
||||||
|
// if error occurs before classification, no sustained frames were sent).
|
||||||
if !stream_token.is_cancelled() {
|
if !stream_token.is_cancelled() {
|
||||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||||
let _ = data_writer_tx.send(close_frame).await;
|
tokio::select! {
|
||||||
|
_ = data_writer_tx.send(close_frame) => {}
|
||||||
|
_ = stream_token.cancelled() => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Signal main loop to remove stream from the map
|
// Signal main loop to remove stream from the map.
|
||||||
let _ = cleanup.send(stream_id).await;
|
// Cancellation guard prevents indefinite blocking if cleanup channel is full.
|
||||||
|
tokio::select! {
|
||||||
|
_ = cleanup.send(stream_id) => {}
|
||||||
|
_ = stream_token.cancelled() => {}
|
||||||
|
}
|
||||||
stream_counter.fetch_sub(1, Ordering::Relaxed);
|
stream_counter.fetch_sub(1, Ordering::Relaxed);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -677,6 +708,13 @@ async fn handle_edge_connection(
|
|||||||
|
|
||||||
{
|
{
|
||||||
let mut edges = connected.lock().await;
|
let mut edges = connected.lock().await;
|
||||||
|
// If this edge already has an active connection (reconnect scenario),
|
||||||
|
// cancel the old connection so it shuts down immediately instead of
|
||||||
|
// lingering until TCP keepalive detects the dead socket.
|
||||||
|
if let Some(old) = edges.remove(&edge_id) {
|
||||||
|
log::info!("Edge {} reconnected, cancelling old connection", edge_id);
|
||||||
|
old.cancel_token.cancel();
|
||||||
|
}
|
||||||
edges.insert(
|
edges.insert(
|
||||||
edge_id.clone(),
|
edge_id.clone(),
|
||||||
ConnectedEdgeInfo {
|
ConnectedEdgeInfo {
|
||||||
@@ -691,8 +729,9 @@ async fn handle_edge_connection(
|
|||||||
|
|
||||||
// QoS dual-channel: ctrl frames have priority over data frames.
|
// QoS dual-channel: ctrl frames have priority over data frames.
|
||||||
// Stream handlers send through these channels -> TunnelIo drains them.
|
// Stream handlers send through these channels -> TunnelIo drains them.
|
||||||
let (ctrl_tx, mut ctrl_rx) = mpsc::channel::<Vec<u8>>(256);
|
let (ctrl_tx, mut ctrl_rx) = mpsc::channel::<Bytes>(512);
|
||||||
let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(4096);
|
let (data_tx, mut data_rx) = mpsc::channel::<Bytes>(4096);
|
||||||
|
let (sustained_tx, mut sustained_rx) = mpsc::channel::<Bytes>(4096);
|
||||||
|
|
||||||
// Spawn task to forward config updates as FRAME_CONFIG frames
|
// Spawn task to forward config updates as FRAME_CONFIG frames
|
||||||
let config_writer_tx = ctrl_tx.clone();
|
let config_writer_tx = ctrl_tx.clone();
|
||||||
@@ -735,6 +774,7 @@ async fn handle_edge_connection(
|
|||||||
// Single-owner I/O engine — no tokio::io::split, no mutex
|
// Single-owner I/O engine — no tokio::io::split, no mutex
|
||||||
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
let mut tunnel_io = remoteingress_protocol::TunnelIo::new(tls_stream, Vec::new());
|
||||||
|
|
||||||
|
|
||||||
// Assigned in every break path of the hub_loop before use at the end.
|
// Assigned in every break path of the hub_loop before use at the end.
|
||||||
#[allow(unused_assignments)]
|
#[allow(unused_assignments)]
|
||||||
let mut disconnect_reason = String::new();
|
let mut disconnect_reason = String::new();
|
||||||
@@ -765,7 +805,7 @@ async fn handle_edge_connection(
|
|||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
if let FrameAction::Disconnect(reason) = handle_hub_frame(
|
if let FrameAction::Disconnect(reason) = handle_hub_frame(
|
||||||
frame, &mut tunnel_io, &mut streams, &stream_semaphore, &edge_stream_count,
|
frame, &mut tunnel_io, &mut streams, &stream_semaphore, &edge_stream_count,
|
||||||
&edge_id, &event_tx, &ctrl_tx, &data_tx, &target_host, &edge_token,
|
&edge_id, &event_tx, &ctrl_tx, &data_tx, &sustained_tx, &target_host, &edge_token,
|
||||||
&cleanup_tx,
|
&cleanup_tx,
|
||||||
).await {
|
).await {
|
||||||
disconnect_reason = reason;
|
disconnect_reason = reason;
|
||||||
@@ -779,7 +819,7 @@ async fn handle_edge_connection(
|
|||||||
if ping_ticker.poll_tick(cx).is_ready() {
|
if ping_ticker.poll_tick(cx).is_ready() {
|
||||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PING, &[]));
|
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PING, &[]));
|
||||||
}
|
}
|
||||||
tunnel_io.poll_step(cx, &mut ctrl_rx, &mut data_rx, &mut liveness_deadline, &edge_token)
|
tunnel_io.poll_step(cx, &mut ctrl_rx, &mut data_rx, &mut sustained_rx, &mut liveness_deadline, &edge_token)
|
||||||
}).await;
|
}).await;
|
||||||
|
|
||||||
match event {
|
match event {
|
||||||
@@ -788,7 +828,7 @@ async fn handle_edge_connection(
|
|||||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||||
if let FrameAction::Disconnect(reason) = handle_hub_frame(
|
if let FrameAction::Disconnect(reason) = handle_hub_frame(
|
||||||
frame, &mut tunnel_io, &mut streams, &stream_semaphore, &edge_stream_count,
|
frame, &mut tunnel_io, &mut streams, &stream_semaphore, &edge_stream_count,
|
||||||
&edge_id, &event_tx, &ctrl_tx, &data_tx, &target_host, &edge_token,
|
&edge_id, &event_tx, &ctrl_tx, &data_tx, &sustained_tx, &target_host, &edge_token,
|
||||||
&cleanup_tx,
|
&cleanup_tx,
|
||||||
).await {
|
).await {
|
||||||
disconnect_reason = reason;
|
disconnect_reason = reason;
|
||||||
@@ -824,9 +864,19 @@ async fn handle_edge_connection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup: cancel edge token to propagate to all child tasks
|
// Cancel stream tokens FIRST so stream handlers exit immediately.
|
||||||
|
// If we TLS-shutdown first, stream handlers are stuck sending to dead channels
|
||||||
|
// for up to 2 seconds while the shutdown times out on a dead connection.
|
||||||
edge_token.cancel();
|
edge_token.cancel();
|
||||||
config_handle.abort();
|
config_handle.abort();
|
||||||
|
|
||||||
|
// Graceful TLS shutdown: send close_notify so the edge sees a clean disconnect.
|
||||||
|
// Stream handlers are already cancelled, so no new data is being produced.
|
||||||
|
let mut tls_stream = tunnel_io.into_inner();
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
tls_stream.shutdown(),
|
||||||
|
).await;
|
||||||
{
|
{
|
||||||
let mut edges = connected.lock().await;
|
let mut edges = connected.lock().await;
|
||||||
edges.remove(&edge_id);
|
edges.remove(&edge_id);
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ edition = "2021"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1", features = ["io-util", "sync", "time"] }
|
tokio = { version = "1", features = ["io-util", "sync", "time"] }
|
||||||
tokio-util = "0.7"
|
tokio-util = "0.7"
|
||||||
|
bytes = "1"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
@@ -2,7 +2,10 @@ use std::collections::VecDeque;
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
use std::time::Duration;
|
||||||
|
use bytes::{Bytes, BytesMut, BufMut};
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, ReadBuf};
|
||||||
|
use tokio::time::Instant;
|
||||||
|
|
||||||
// Frame type constants
|
// Frame type constants
|
||||||
pub const FRAME_OPEN: u8 = 0x01;
|
pub const FRAME_OPEN: u8 = 0x01;
|
||||||
@@ -23,26 +26,34 @@ pub const FRAME_HEADER_SIZE: usize = 9;
|
|||||||
pub const MAX_PAYLOAD_SIZE: u32 = 16 * 1024 * 1024;
|
pub const MAX_PAYLOAD_SIZE: u32 = 16 * 1024 * 1024;
|
||||||
|
|
||||||
// Per-stream flow control constants
|
// Per-stream flow control constants
|
||||||
/// Initial per-stream window size (4 MB). Sized for full throughput at high RTT:
|
/// Initial (and maximum) per-stream window size (4 MB).
|
||||||
/// at 100ms RTT, this sustains ~40 MB/s per stream.
|
|
||||||
pub const INITIAL_STREAM_WINDOW: u32 = 4 * 1024 * 1024;
|
pub const INITIAL_STREAM_WINDOW: u32 = 4 * 1024 * 1024;
|
||||||
/// Send WINDOW_UPDATE after consuming this many bytes (half the initial window).
|
/// Send WINDOW_UPDATE after consuming this many bytes (half the initial window).
|
||||||
pub const WINDOW_UPDATE_THRESHOLD: u32 = INITIAL_STREAM_WINDOW / 2;
|
pub const WINDOW_UPDATE_THRESHOLD: u32 = INITIAL_STREAM_WINDOW / 2;
|
||||||
/// Maximum window size to prevent overflow.
|
/// Maximum window size to prevent overflow.
|
||||||
pub const MAX_WINDOW_SIZE: u32 = 16 * 1024 * 1024;
|
pub const MAX_WINDOW_SIZE: u32 = 4 * 1024 * 1024;
|
||||||
|
|
||||||
|
// Sustained stream classification constants
|
||||||
|
/// Throughput threshold for sustained classification (2.5 MB/s = 20 Mbit/s).
|
||||||
|
pub const SUSTAINED_THRESHOLD_BPS: u64 = 2_500_000;
|
||||||
|
/// Minimum duration before a stream can be classified as sustained.
|
||||||
|
pub const SUSTAINED_MIN_DURATION_SECS: u64 = 10;
|
||||||
|
/// Fixed window for sustained streams (1 MB — the floor).
|
||||||
|
pub const SUSTAINED_WINDOW: u32 = 1 * 1024 * 1024;
|
||||||
|
/// Maximum bytes written from sustained queue per forced drain (1 MB/s guarantee).
|
||||||
|
pub const SUSTAINED_FORCED_DRAIN_CAP: usize = 1_048_576;
|
||||||
|
|
||||||
/// Encode a WINDOW_UPDATE frame for a specific stream.
|
/// Encode a WINDOW_UPDATE frame for a specific stream.
|
||||||
pub fn encode_window_update(stream_id: u32, frame_type: u8, increment: u32) -> Vec<u8> {
|
pub fn encode_window_update(stream_id: u32, frame_type: u8, increment: u32) -> Bytes {
|
||||||
encode_frame(stream_id, frame_type, &increment.to_be_bytes())
|
encode_frame(stream_id, frame_type, &increment.to_be_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute the target per-stream window size based on the number of active streams.
|
/// Compute the target per-stream window size based on the number of active streams.
|
||||||
/// Total memory budget is ~32MB shared across all streams. As more streams are active,
|
/// Total memory budget is ~200MB shared across all streams. Up to 50 streams get the
|
||||||
/// each gets a smaller window. This adapts to current demand — few streams get high
|
/// full 4MB window; above that the window scales down to a 1MB floor at 200+ streams.
|
||||||
/// throughput, many streams save memory and reduce control frame pressure.
|
|
||||||
pub fn compute_window_for_stream_count(active: u32) -> u32 {
|
pub fn compute_window_for_stream_count(active: u32) -> u32 {
|
||||||
let per_stream = (32 * 1024 * 1024u64) / (active.max(1) as u64);
|
let per_stream = (200 * 1024 * 1024u64) / (active.max(1) as u64);
|
||||||
per_stream.clamp(64 * 1024, INITIAL_STREAM_WINDOW as u64) as u32
|
per_stream.clamp(1 * 1024 * 1024, INITIAL_STREAM_WINDOW as u64) as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Decode a WINDOW_UPDATE payload into a byte increment. Returns None if payload is malformed.
|
/// Decode a WINDOW_UPDATE payload into a byte increment. Returns None if payload is malformed.
|
||||||
@@ -58,18 +69,18 @@ pub fn decode_window_update(payload: &[u8]) -> Option<u32> {
|
|||||||
pub struct Frame {
|
pub struct Frame {
|
||||||
pub stream_id: u32,
|
pub stream_id: u32,
|
||||||
pub frame_type: u8,
|
pub frame_type: u8,
|
||||||
pub payload: Vec<u8>,
|
pub payload: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Encode a frame into bytes: [stream_id:4][type:1][length:4][payload]
|
/// Encode a frame into bytes: [stream_id:4][type:1][length:4][payload]
|
||||||
pub fn encode_frame(stream_id: u32, frame_type: u8, payload: &[u8]) -> Vec<u8> {
|
pub fn encode_frame(stream_id: u32, frame_type: u8, payload: &[u8]) -> Bytes {
|
||||||
let len = payload.len() as u32;
|
let len = payload.len() as u32;
|
||||||
let mut buf = Vec::with_capacity(FRAME_HEADER_SIZE + payload.len());
|
let mut buf = BytesMut::with_capacity(FRAME_HEADER_SIZE + payload.len());
|
||||||
buf.extend_from_slice(&stream_id.to_be_bytes());
|
buf.put_slice(&stream_id.to_be_bytes());
|
||||||
buf.push(frame_type);
|
buf.put_u8(frame_type);
|
||||||
buf.extend_from_slice(&len.to_be_bytes());
|
buf.put_slice(&len.to_be_bytes());
|
||||||
buf.extend_from_slice(payload);
|
buf.put_slice(payload);
|
||||||
buf
|
buf.freeze()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a frame header into `buf[0..FRAME_HEADER_SIZE]`.
|
/// Write a frame header into `buf[0..FRAME_HEADER_SIZE]`.
|
||||||
@@ -144,7 +155,7 @@ impl<R: AsyncRead + Unpin> FrameReader<R> {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut payload = vec![0u8; length as usize];
|
let mut payload = BytesMut::zeroed(length as usize);
|
||||||
if length > 0 {
|
if length > 0 {
|
||||||
self.reader.read_exact(&mut payload).await?;
|
self.reader.read_exact(&mut payload).await?;
|
||||||
}
|
}
|
||||||
@@ -152,7 +163,7 @@ impl<R: AsyncRead + Unpin> FrameReader<R> {
|
|||||||
Ok(Some(Frame {
|
Ok(Some(Frame {
|
||||||
stream_id,
|
stream_id,
|
||||||
frame_type,
|
frame_type,
|
||||||
payload,
|
payload: payload.freeze(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,24 +197,30 @@ pub enum TunnelEvent {
|
|||||||
/// Write state extracted into a sub-struct so the borrow checker can see
|
/// Write state extracted into a sub-struct so the borrow checker can see
|
||||||
/// disjoint field access between `self.write` and `self.stream`.
|
/// disjoint field access between `self.write` and `self.stream`.
|
||||||
struct WriteState {
|
struct WriteState {
|
||||||
ctrl_queue: VecDeque<Vec<u8>>, // PONG, WINDOW_UPDATE, CLOSE, OPEN — always first
|
ctrl_queue: VecDeque<Bytes>, // PONG, WINDOW_UPDATE, CLOSE, OPEN — always first
|
||||||
data_queue: VecDeque<Vec<u8>>, // DATA, DATA_BACK — only when ctrl is empty
|
data_queue: VecDeque<Bytes>, // DATA, DATA_BACK — only when ctrl is empty
|
||||||
offset: usize, // progress within current frame being written
|
sustained_queue: VecDeque<Bytes>, // DATA, DATA_BACK from sustained streams — lowest priority
|
||||||
|
offset: usize, // progress within current frame being written
|
||||||
flush_needed: bool,
|
flush_needed: bool,
|
||||||
|
// Sustained starvation prevention: guaranteed 1 MB/s drain
|
||||||
|
sustained_last_drain: Instant,
|
||||||
|
sustained_bytes_this_period: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WriteState {
|
impl WriteState {
|
||||||
fn has_work(&self) -> bool {
|
fn has_work(&self) -> bool {
|
||||||
!self.ctrl_queue.is_empty() || !self.data_queue.is_empty()
|
!self.ctrl_queue.is_empty() || !self.data_queue.is_empty() || !self.sustained_queue.is_empty()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Single-owner I/O engine for the tunnel TLS connection.
|
/// Single-owner I/O engine for the tunnel TLS connection.
|
||||||
///
|
///
|
||||||
/// Owns the TLS stream directly — no `tokio::io::split()`, no mutex.
|
/// Owns the TLS stream directly — no `tokio::io::split()`, no mutex.
|
||||||
/// Uses two priority write queues: ctrl frames (PONG, WINDOW_UPDATE, CLOSE, OPEN)
|
/// Uses three priority write queues:
|
||||||
/// are ALWAYS written before data frames (DATA, DATA_BACK). This prevents
|
/// 1. ctrl (PONG, WINDOW_UPDATE, CLOSE, OPEN) — always first
|
||||||
/// WINDOW_UPDATE starvation that causes flow control deadlocks.
|
/// 2. data (DATA, DATA_BACK from normal streams) — when ctrl empty
|
||||||
|
/// 3. sustained (DATA, DATA_BACK from sustained streams) — lowest priority,
|
||||||
|
/// drained freely when ctrl+data empty, or forced 1MB/s when they're not
|
||||||
pub struct TunnelIo<S> {
|
pub struct TunnelIo<S> {
|
||||||
stream: S,
|
stream: S,
|
||||||
// Read state: accumulate bytes, parse frames incrementally
|
// Read state: accumulate bytes, parse frames incrementally
|
||||||
@@ -229,22 +246,30 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
write: WriteState {
|
write: WriteState {
|
||||||
ctrl_queue: VecDeque::new(),
|
ctrl_queue: VecDeque::new(),
|
||||||
data_queue: VecDeque::new(),
|
data_queue: VecDeque::new(),
|
||||||
|
sustained_queue: VecDeque::new(),
|
||||||
offset: 0,
|
offset: 0,
|
||||||
flush_needed: false,
|
flush_needed: false,
|
||||||
|
sustained_last_drain: Instant::now(),
|
||||||
|
sustained_bytes_this_period: 0,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a high-priority control frame (PONG, WINDOW_UPDATE, CLOSE, OPEN).
|
/// Queue a high-priority control frame (PONG, WINDOW_UPDATE, CLOSE, OPEN).
|
||||||
pub fn queue_ctrl(&mut self, frame: Vec<u8>) {
|
pub fn queue_ctrl(&mut self, frame: Bytes) {
|
||||||
self.write.ctrl_queue.push_back(frame);
|
self.write.ctrl_queue.push_back(frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a lower-priority data frame (DATA, DATA_BACK).
|
/// Queue a lower-priority data frame (DATA, DATA_BACK).
|
||||||
pub fn queue_data(&mut self, frame: Vec<u8>) {
|
pub fn queue_data(&mut self, frame: Bytes) {
|
||||||
self.write.data_queue.push_back(frame);
|
self.write.data_queue.push_back(frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Queue a lowest-priority sustained data frame.
|
||||||
|
pub fn queue_sustained(&mut self, frame: Bytes) {
|
||||||
|
self.write.sustained_queue.push_back(frame);
|
||||||
|
}
|
||||||
|
|
||||||
/// Try to parse a complete frame from the read buffer.
|
/// Try to parse a complete frame from the read buffer.
|
||||||
/// Uses a parse_pos cursor to avoid drain() on every frame.
|
/// Uses a parse_pos cursor to avoid drain() on every frame.
|
||||||
pub fn try_parse_frame(&mut self) -> Option<Result<Frame, std::io::Error>> {
|
pub fn try_parse_frame(&mut self) -> Option<Result<Frame, std::io::Error>> {
|
||||||
@@ -287,7 +312,9 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let payload = self.read_buf[base + FRAME_HEADER_SIZE..base + total_frame_size].to_vec();
|
let payload = Bytes::copy_from_slice(
|
||||||
|
&self.read_buf[base + FRAME_HEADER_SIZE..base + total_frame_size],
|
||||||
|
);
|
||||||
self.parse_pos += total_frame_size;
|
self.parse_pos += total_frame_size;
|
||||||
|
|
||||||
// Compact when parse_pos > half the data to reclaim memory
|
// Compact when parse_pos > half the data to reclaim memory
|
||||||
@@ -302,31 +329,42 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
|
|
||||||
/// Poll-based I/O step. Returns Ready on events, Pending when idle.
|
/// Poll-based I/O step. Returns Ready on events, Pending when idle.
|
||||||
///
|
///
|
||||||
/// Order: write(ctrl→data) → flush → read → channels → timers
|
/// Order: write(ctrl->data->sustained) -> flush -> read -> channels -> timers
|
||||||
pub fn poll_step(
|
pub fn poll_step(
|
||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut Context<'_>,
|
cx: &mut Context<'_>,
|
||||||
ctrl_rx: &mut tokio::sync::mpsc::Receiver<Vec<u8>>,
|
ctrl_rx: &mut tokio::sync::mpsc::Receiver<Bytes>,
|
||||||
data_rx: &mut tokio::sync::mpsc::Receiver<Vec<u8>>,
|
data_rx: &mut tokio::sync::mpsc::Receiver<Bytes>,
|
||||||
|
sustained_rx: &mut tokio::sync::mpsc::Receiver<Bytes>,
|
||||||
liveness_deadline: &mut Pin<Box<tokio::time::Sleep>>,
|
liveness_deadline: &mut Pin<Box<tokio::time::Sleep>>,
|
||||||
cancel_token: &tokio_util::sync::CancellationToken,
|
cancel_token: &tokio_util::sync::CancellationToken,
|
||||||
) -> Poll<TunnelEvent> {
|
) -> Poll<TunnelEvent> {
|
||||||
// 1. WRITE: drain ctrl queue first, then data queue.
|
// 1. WRITE: 3-tier priority — ctrl first, then data, then sustained.
|
||||||
// TLS poll_write writes plaintext to session buffer (always Ready).
|
// Sustained drains freely when ctrl+data are empty.
|
||||||
// Batch up to 16 frames per poll cycle.
|
// Write one frame, set flush_needed, then flush must complete before
|
||||||
|
// writing more. This prevents unbounded TLS session buffer growth.
|
||||||
// Safe: `self.write` and `self.stream` are disjoint fields.
|
// Safe: `self.write` and `self.stream` are disjoint fields.
|
||||||
let mut writes = 0;
|
let mut writes = 0;
|
||||||
while self.write.has_work() && writes < 16 {
|
while self.write.has_work() && writes < 16 && !self.write.flush_needed {
|
||||||
let from_ctrl = !self.write.ctrl_queue.is_empty();
|
// Pick queue: ctrl > data > sustained
|
||||||
let frame = if from_ctrl {
|
let queue_id = if !self.write.ctrl_queue.is_empty() {
|
||||||
self.write.ctrl_queue.front().unwrap()
|
0 // ctrl
|
||||||
|
} else if !self.write.data_queue.is_empty() {
|
||||||
|
1 // data
|
||||||
} else {
|
} else {
|
||||||
self.write.data_queue.front().unwrap()
|
2 // sustained
|
||||||
|
};
|
||||||
|
let frame = match queue_id {
|
||||||
|
0 => self.write.ctrl_queue.front().unwrap(),
|
||||||
|
1 => self.write.data_queue.front().unwrap(),
|
||||||
|
_ => self.write.sustained_queue.front().unwrap(),
|
||||||
};
|
};
|
||||||
let remaining = &frame[self.write.offset..];
|
let remaining = &frame[self.write.offset..];
|
||||||
|
|
||||||
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
||||||
Poll::Ready(Ok(0)) => {
|
Poll::Ready(Ok(0)) => {
|
||||||
|
log::error!("TunnelIo: poll_write returned 0 (write zero), ctrl_q={} data_q={} sustained_q={}",
|
||||||
|
self.write.ctrl_queue.len(), self.write.data_queue.len(), self.write.sustained_queue.len());
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
||||||
));
|
));
|
||||||
@@ -335,22 +373,80 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
self.write.offset += n;
|
self.write.offset += n;
|
||||||
self.write.flush_needed = true;
|
self.write.flush_needed = true;
|
||||||
if self.write.offset >= frame.len() {
|
if self.write.offset >= frame.len() {
|
||||||
if from_ctrl { self.write.ctrl_queue.pop_front(); }
|
match queue_id {
|
||||||
else { self.write.data_queue.pop_front(); }
|
0 => { self.write.ctrl_queue.pop_front(); }
|
||||||
|
1 => { self.write.data_queue.pop_front(); }
|
||||||
|
_ => {
|
||||||
|
self.write.sustained_queue.pop_front();
|
||||||
|
self.write.sustained_last_drain = Instant::now();
|
||||||
|
self.write.sustained_bytes_this_period = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
self.write.offset = 0;
|
self.write.offset = 0;
|
||||||
writes += 1;
|
writes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_write error: {} (ctrl_q={} data_q={} sustained_q={})",
|
||||||
|
e, self.write.ctrl_queue.len(), self.write.data_queue.len(), self.write.sustained_queue.len());
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 1b. FORCED SUSTAINED DRAIN: when ctrl/data have work but sustained is waiting,
|
||||||
|
// guarantee at least 1 MB/s by draining up to SUSTAINED_FORCED_DRAIN_CAP
|
||||||
|
// once per second.
|
||||||
|
if !self.write.sustained_queue.is_empty()
|
||||||
|
&& (!self.write.ctrl_queue.is_empty() || !self.write.data_queue.is_empty())
|
||||||
|
&& !self.write.flush_needed
|
||||||
|
{
|
||||||
|
let now = Instant::now();
|
||||||
|
if now.duration_since(self.write.sustained_last_drain) >= Duration::from_secs(1) {
|
||||||
|
self.write.sustained_bytes_this_period = 0;
|
||||||
|
self.write.sustained_last_drain = now;
|
||||||
|
|
||||||
|
while !self.write.sustained_queue.is_empty()
|
||||||
|
&& self.write.sustained_bytes_this_period < SUSTAINED_FORCED_DRAIN_CAP
|
||||||
|
&& !self.write.flush_needed
|
||||||
|
{
|
||||||
|
let frame = self.write.sustained_queue.front().unwrap();
|
||||||
|
let remaining = &frame[self.write.offset..];
|
||||||
|
match Pin::new(&mut self.stream).poll_write(cx, remaining) {
|
||||||
|
Poll::Ready(Ok(0)) => {
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
|
std::io::Error::new(std::io::ErrorKind::WriteZero, "write zero"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Poll::Ready(Ok(n)) => {
|
||||||
|
self.write.offset += n;
|
||||||
|
self.write.flush_needed = true;
|
||||||
|
self.write.sustained_bytes_this_period += n;
|
||||||
|
if self.write.offset >= frame.len() {
|
||||||
|
self.write.sustained_queue.pop_front();
|
||||||
|
self.write.offset = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(e)) => {
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
|
Poll::Pending => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
// 2. FLUSH: push encrypted data from TLS session to TCP.
|
||||||
if self.write.flush_needed {
|
if self.write.flush_needed {
|
||||||
match Pin::new(&mut self.stream).poll_flush(cx) {
|
match Pin::new(&mut self.stream).poll_flush(cx) {
|
||||||
Poll::Ready(Ok(())) => self.write.flush_needed = false,
|
Poll::Ready(Ok(())) => {
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::WriteError(e)),
|
self.write.flush_needed = false;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_flush error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::WriteError(e));
|
||||||
|
}
|
||||||
Poll::Pending => {} // TCP waker will notify us
|
Poll::Pending => {} // TCP waker will notify us
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,12 +482,19 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
// Partial data — loop to call poll_read again so the TCP
|
// Partial data — loop to call poll_read again so the TCP
|
||||||
// waker is re-registered when it finally returns Pending.
|
// waker is re-registered when it finally returns Pending.
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(TunnelEvent::ReadError(e)),
|
Poll::Ready(Err(e)) => {
|
||||||
|
log::error!("TunnelIo: poll_read error: {}", e);
|
||||||
|
return Poll::Ready(TunnelEvent::ReadError(e));
|
||||||
|
}
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. CHANNELS: drain ctrl into ctrl_queue, data into data_queue.
|
// 4. CHANNELS: drain ctrl (always — priority), data (only if queue is small).
|
||||||
|
// Ctrl frames must never be delayed — always drain fully.
|
||||||
|
// Data frames are gated: keep data in the bounded channel for proper
|
||||||
|
// backpressure when TLS writes are slow. Without this gate, the internal
|
||||||
|
// data_queue (unbounded VecDeque) grows to hundreds of MB under throttle -> OOM.
|
||||||
let mut got_new = false;
|
let mut got_new = false;
|
||||||
loop {
|
loop {
|
||||||
match ctrl_rx.poll_recv(cx) {
|
match ctrl_rx.poll_recv(cx) {
|
||||||
@@ -404,15 +507,27 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
if self.write.data_queue.len() < 64 {
|
||||||
match data_rx.poll_recv(cx) {
|
loop {
|
||||||
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
match data_rx.poll_recv(cx) {
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(Some(frame)) => { self.write.data_queue.push_back(frame); got_new = true; }
|
||||||
return Poll::Ready(TunnelEvent::WriteError(
|
Poll::Ready(None) => {
|
||||||
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
return Poll::Ready(TunnelEvent::WriteError(
|
||||||
));
|
std::io::Error::new(std::io::ErrorKind::BrokenPipe, "data channel closed"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Poll::Pending => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Sustained channel: drain when sustained_queue is small (same backpressure pattern).
|
||||||
|
// Channel close is non-fatal — not all connections have sustained streams.
|
||||||
|
if self.write.sustained_queue.len() < 64 {
|
||||||
|
loop {
|
||||||
|
match sustained_rx.poll_recv(cx) {
|
||||||
|
Poll::Ready(Some(frame)) => { self.write.sustained_queue.push_back(frame); got_new = true; }
|
||||||
|
Poll::Ready(None) | Poll::Pending => break,
|
||||||
}
|
}
|
||||||
Poll::Pending => break,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -424,10 +539,12 @@ impl<S: AsyncRead + AsyncWrite + Unpin> TunnelIo<S> {
|
|||||||
return Poll::Ready(TunnelEvent::Cancelled);
|
return Poll::Ready(TunnelEvent::Cancelled);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. SELF-WAKE: only when we have frames AND flush is done.
|
// 6. SELF-WAKE: only when flush is complete AND we have work.
|
||||||
// If flush is pending, the TCP write-readiness waker will notify us.
|
// When flush is Pending, the TCP write-readiness waker will notify us.
|
||||||
// If we got new channel frames, wake to write them.
|
// CRITICAL: do NOT self-wake when flush_needed — poll_write always returns
|
||||||
if got_new || (!self.write.flush_needed && self.write.has_work()) {
|
// Ready (TLS buffers in-memory), so self-waking causes a tight spin loop
|
||||||
|
// that fills the TLS session buffer unboundedly -> OOM -> ECONNRESET.
|
||||||
|
if !self.write.flush_needed && (got_new || self.write.has_work()) {
|
||||||
cx.waker().wake_by_ref();
|
cx.waker().wake_by_ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -449,14 +566,14 @@ mod tests {
|
|||||||
let mut buf = vec![0u8; FRAME_HEADER_SIZE + payload.len()];
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE + payload.len()];
|
||||||
buf[FRAME_HEADER_SIZE..].copy_from_slice(payload);
|
buf[FRAME_HEADER_SIZE..].copy_from_slice(payload);
|
||||||
encode_frame_header(&mut buf, 42, FRAME_DATA, payload.len());
|
encode_frame_header(&mut buf, 42, FRAME_DATA, payload.len());
|
||||||
assert_eq!(buf, encode_frame(42, FRAME_DATA, payload));
|
assert_eq!(buf, &encode_frame(42, FRAME_DATA, payload)[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_frame_header_empty_payload() {
|
fn test_encode_frame_header_empty_payload() {
|
||||||
let mut buf = vec![0u8; FRAME_HEADER_SIZE];
|
let mut buf = vec![0u8; FRAME_HEADER_SIZE];
|
||||||
encode_frame_header(&mut buf, 99, FRAME_CLOSE, 0);
|
encode_frame_header(&mut buf, 99, FRAME_CLOSE, 0);
|
||||||
assert_eq!(buf, encode_frame(99, FRAME_CLOSE, &[]));
|
assert_eq!(buf, &encode_frame(99, FRAME_CLOSE, &[])[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -624,7 +741,7 @@ mod tests {
|
|||||||
let frame = reader.next_frame().await.unwrap().unwrap();
|
let frame = reader.next_frame().await.unwrap().unwrap();
|
||||||
assert_eq!(frame.stream_id, i as u32);
|
assert_eq!(frame.stream_id, i as u32);
|
||||||
assert_eq!(frame.frame_type, ft);
|
assert_eq!(frame.frame_type, ft);
|
||||||
assert_eq!(frame.payload, format!("payload_{}", i).as_bytes());
|
assert_eq!(&frame.payload[..], format!("payload_{}", i).as_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(reader.next_frame().await.unwrap().is_none());
|
assert!(reader.next_frame().await.unwrap().is_none());
|
||||||
@@ -633,7 +750,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_frame_reader_zero_length_payload() {
|
async fn test_frame_reader_zero_length_payload() {
|
||||||
let data = encode_frame(42, FRAME_CLOSE, &[]);
|
let data = encode_frame(42, FRAME_CLOSE, &[]);
|
||||||
let cursor = std::io::Cursor::new(data);
|
let cursor = std::io::Cursor::new(data.to_vec());
|
||||||
let mut reader = FrameReader::new(cursor);
|
let mut reader = FrameReader::new(cursor);
|
||||||
|
|
||||||
let frame = reader.next_frame().await.unwrap().unwrap();
|
let frame = reader.next_frame().await.unwrap().unwrap();
|
||||||
@@ -661,90 +778,57 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_zero_streams() {
|
fn test_adaptive_window_zero_streams() {
|
||||||
// 0 streams treated as 1: 32MB/1 = 32MB → clamped to 4MB max
|
// 0 streams treated as 1: 200MB/1 -> clamped to 4MB max
|
||||||
assert_eq!(compute_window_for_stream_count(0), INITIAL_STREAM_WINDOW);
|
assert_eq!(compute_window_for_stream_count(0), INITIAL_STREAM_WINDOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_one_stream() {
|
fn test_adaptive_window_one_stream() {
|
||||||
// 32MB/1 = 32MB → clamped to 4MB max
|
|
||||||
assert_eq!(compute_window_for_stream_count(1), INITIAL_STREAM_WINDOW);
|
assert_eq!(compute_window_for_stream_count(1), INITIAL_STREAM_WINDOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_at_max_boundary() {
|
fn test_adaptive_window_50_streams_full() {
|
||||||
// 32MB/8 = 4MB = exactly INITIAL_STREAM_WINDOW
|
// 200MB/50 = 4MB = exactly INITIAL_STREAM_WINDOW
|
||||||
assert_eq!(compute_window_for_stream_count(8), INITIAL_STREAM_WINDOW);
|
assert_eq!(compute_window_for_stream_count(50), INITIAL_STREAM_WINDOW);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_just_below_max() {
|
fn test_adaptive_window_51_streams_starts_scaling() {
|
||||||
// 32MB/9 = 3,728,270 — first value below INITIAL_STREAM_WINDOW
|
// 200MB/51 < 4MB — first value below max
|
||||||
let w = compute_window_for_stream_count(9);
|
let w = compute_window_for_stream_count(51);
|
||||||
assert!(w < INITIAL_STREAM_WINDOW);
|
assert!(w < INITIAL_STREAM_WINDOW);
|
||||||
assert_eq!(w, (32 * 1024 * 1024u64 / 9) as u32);
|
assert_eq!(w, (200 * 1024 * 1024u64 / 51) as u32);
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_adaptive_window_16_streams() {
|
|
||||||
// 32MB/16 = 2MB
|
|
||||||
assert_eq!(compute_window_for_stream_count(16), 2 * 1024 * 1024);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_100_streams() {
|
fn test_adaptive_window_100_streams() {
|
||||||
// 32MB/100 = 335,544 bytes (~327KB)
|
// 200MB/100 = 2MB
|
||||||
let w = compute_window_for_stream_count(100);
|
assert_eq!(compute_window_for_stream_count(100), 2 * 1024 * 1024);
|
||||||
assert_eq!(w, (32 * 1024 * 1024u64 / 100) as u32);
|
|
||||||
assert!(w > 64 * 1024); // above floor
|
|
||||||
assert!(w < INITIAL_STREAM_WINDOW as u32); // below ceiling
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_200_streams() {
|
fn test_adaptive_window_200_streams_at_floor() {
|
||||||
// 32MB/200 = 167,772 bytes (~163KB), above 64KB floor
|
// 200MB/200 = 1MB = exactly the floor
|
||||||
let w = compute_window_for_stream_count(200);
|
assert_eq!(compute_window_for_stream_count(200), 1 * 1024 * 1024);
|
||||||
assert_eq!(w, (32 * 1024 * 1024u64 / 200) as u32);
|
|
||||||
assert!(w > 64 * 1024);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_500_streams() {
|
fn test_adaptive_window_500_streams_clamped() {
|
||||||
// 32MB/500 = 67,108 bytes (~65.5KB), just above 64KB floor
|
// 200MB/500 = 0.4MB -> clamped up to 1MB floor
|
||||||
let w = compute_window_for_stream_count(500);
|
assert_eq!(compute_window_for_stream_count(500), 1 * 1024 * 1024);
|
||||||
assert_eq!(w, (32 * 1024 * 1024u64 / 500) as u32);
|
|
||||||
assert!(w > 64 * 1024);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_adaptive_window_at_min_boundary() {
|
|
||||||
// 32MB/512 = 65,536 = exactly 64KB floor
|
|
||||||
assert_eq!(compute_window_for_stream_count(512), 64 * 1024);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_adaptive_window_below_min_clamped() {
|
|
||||||
// 32MB/513 = 65,408 → clamped up to 64KB
|
|
||||||
assert_eq!(compute_window_for_stream_count(513), 64 * 1024);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_adaptive_window_1000_streams() {
|
|
||||||
// 32MB/1000 = 33,554 → clamped to 64KB
|
|
||||||
assert_eq!(compute_window_for_stream_count(1000), 64 * 1024);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_max_u32() {
|
fn test_adaptive_window_max_u32() {
|
||||||
// Extreme: u32::MAX streams → tiny value → clamped to 64KB
|
// Extreme: u32::MAX streams -> tiny value -> clamped to 1MB
|
||||||
assert_eq!(compute_window_for_stream_count(u32::MAX), 64 * 1024);
|
assert_eq!(compute_window_for_stream_count(u32::MAX), 1 * 1024 * 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_monotonically_decreasing() {
|
fn test_adaptive_window_monotonically_decreasing() {
|
||||||
// Window should decrease (or stay same) as stream count increases
|
|
||||||
let mut prev = compute_window_for_stream_count(1);
|
let mut prev = compute_window_for_stream_count(1);
|
||||||
for n in [2, 5, 10, 50, 100, 200, 500, 512, 1000] {
|
for n in [2, 10, 50, 51, 100, 200, 500, 1000] {
|
||||||
let w = compute_window_for_stream_count(n);
|
let w = compute_window_for_stream_count(n);
|
||||||
assert!(w <= prev, "window increased from {} to {} at n={}", prev, w, n);
|
assert!(w <= prev, "window increased from {} to {} at n={}", prev, w, n);
|
||||||
prev = w;
|
prev = w;
|
||||||
@@ -753,11 +837,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_adaptive_window_total_budget_bounded() {
|
fn test_adaptive_window_total_budget_bounded() {
|
||||||
// active × per_stream_window should never exceed 32MB (+ clamp overhead for high N)
|
// active x per_stream_window should never exceed 200MB (+ clamp overhead for high N)
|
||||||
for n in [1, 10, 50, 100, 200, 500] {
|
for n in [1, 10, 50, 100, 200] {
|
||||||
let w = compute_window_for_stream_count(n);
|
let w = compute_window_for_stream_count(n);
|
||||||
let total = w as u64 * n as u64;
|
let total = w as u64 * n as u64;
|
||||||
assert!(total <= 32 * 1024 * 1024, "total {}MB exceeds budget at n={}", total / (1024*1024), n);
|
assert!(total <= 200 * 1024 * 1024, "total {}MB exceeds budget at n={}", total / (1024*1024), n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -324,7 +324,7 @@ tap.test('setup: start echo server and tunnel', async () => {
|
|||||||
expect(tunnel.hub.running).toBeTrue();
|
expect(tunnel.hub.running).toBeTrue();
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('single stream: 32MB transfer exceeding initial 4MB window', async () => {
|
tap.test('single stream: 32MB transfer exceeding initial 4MB window (multiple refills)', async () => {
|
||||||
const size = 32 * 1024 * 1024;
|
const size = 32 * 1024 * 1024;
|
||||||
const data = crypto.randomBytes(size);
|
const data = crypto.randomBytes(size);
|
||||||
const expectedHash = sha256(data);
|
const expectedHash = sha256(data);
|
||||||
@@ -392,7 +392,7 @@ tap.test('asymmetric transfer: 4KB request -> 4MB response', async () => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
tap.test('100 streams x 1MB each (100MB total exceeding 32MB budget)', async () => {
|
tap.test('100 streams x 1MB each (100MB total exceeding 200MB budget)', async () => {
|
||||||
const streamCount = 100;
|
const streamCount = 100;
|
||||||
const payloadSize = 1 * 1024 * 1024;
|
const payloadSize = 1 * 1024 * 1024;
|
||||||
|
|
||||||
@@ -446,7 +446,7 @@ tap.test('active stream counter tracks concurrent connections', async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
tap.test('50 streams x 2MB each (forces multiple window refills per stream)', async () => {
|
tap.test('50 streams x 2MB each (forces multiple window refills per stream)', async () => {
|
||||||
// At 50 concurrent streams: adaptive window = 32MB/50 = 655KB per stream
|
// At 50 concurrent streams: adaptive window = 200MB/50 = 4MB per stream
|
||||||
// Each stream sends 2MB → needs ~3 WINDOW_UPDATE refill cycles per stream
|
// Each stream sends 2MB → needs ~3 WINDOW_UPDATE refill cycles per stream
|
||||||
const streamCount = 50;
|
const streamCount = 50;
|
||||||
const payloadSize = 2 * 1024 * 1024;
|
const payloadSize = 2 * 1024 * 1024;
|
||||||
|
|||||||
402
test/test.loadtest.node.ts
Normal file
402
test/test.loadtest.node.ts
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
|
import * as net from 'net';
|
||||||
|
import * as stream from 'stream';
|
||||||
|
import * as crypto from 'crypto';
|
||||||
|
import { RemoteIngressHub, RemoteIngressEdge } from '../ts/index.js';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers (self-contained — same patterns as test.flowcontrol.node.ts)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
async function findFreePorts(count: number): Promise<number[]> {
|
||||||
|
const servers: net.Server[] = [];
|
||||||
|
const ports: number[] = [];
|
||||||
|
for (let i = 0; i < count; i++) {
|
||||||
|
const server = net.createServer();
|
||||||
|
await new Promise<void>((resolve) => server.listen(0, '127.0.0.1', resolve));
|
||||||
|
ports.push((server.address() as net.AddressInfo).port);
|
||||||
|
servers.push(server);
|
||||||
|
}
|
||||||
|
await Promise.all(servers.map((s) => new Promise<void>((resolve) => s.close(() => resolve()))));
|
||||||
|
return ports;
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrackingServer = net.Server & { destroyAll: () => void };
|
||||||
|
|
||||||
|
function startEchoServer(port: number, host: string): Promise<TrackingServer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((socket) => {
|
||||||
|
connections.add(socket);
|
||||||
|
socket.on('close', () => connections.delete(socket));
|
||||||
|
let proxyHeaderParsed = false;
|
||||||
|
let pendingBuf = Buffer.alloc(0);
|
||||||
|
socket.on('data', (data: Buffer) => {
|
||||||
|
if (!proxyHeaderParsed) {
|
||||||
|
pendingBuf = Buffer.concat([pendingBuf, data]);
|
||||||
|
const idx = pendingBuf.indexOf('\r\n');
|
||||||
|
if (idx !== -1) {
|
||||||
|
proxyHeaderParsed = true;
|
||||||
|
const remainder = pendingBuf.subarray(idx + 2);
|
||||||
|
if (remainder.length > 0) socket.write(remainder);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
socket.write(data);
|
||||||
|
});
|
||||||
|
socket.on('error', () => {});
|
||||||
|
}) as TrackingServer;
|
||||||
|
server.destroyAll = () => {
|
||||||
|
for (const conn of connections) conn.destroy();
|
||||||
|
connections.clear();
|
||||||
|
};
|
||||||
|
server.on('error', reject);
|
||||||
|
server.listen(port, host, () => resolve(server));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendAndReceive(port: number, data: Buffer, timeoutMs = 30000): Promise<Buffer> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
let totalReceived = 0;
|
||||||
|
const expectedLength = data.length;
|
||||||
|
let settled = false;
|
||||||
|
|
||||||
|
const client = net.createConnection({ host: '127.0.0.1', port }, () => {
|
||||||
|
client.write(data);
|
||||||
|
client.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
const timer = setTimeout(() => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
client.destroy();
|
||||||
|
reject(new Error(`Timeout after ${timeoutMs}ms — received ${totalReceived}/${expectedLength} bytes`));
|
||||||
|
}
|
||||||
|
}, timeoutMs);
|
||||||
|
|
||||||
|
client.on('data', (chunk: Buffer) => {
|
||||||
|
chunks.push(chunk);
|
||||||
|
totalReceived += chunk.length;
|
||||||
|
if (totalReceived >= expectedLength && !settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
client.destroy();
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('end', () => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
resolve(Buffer.concat(chunks));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on('error', (err) => {
|
||||||
|
if (!settled) {
|
||||||
|
settled = true;
|
||||||
|
clearTimeout(timer);
|
||||||
|
reject(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function sha256(buf: Buffer): string {
|
||||||
|
return crypto.createHash('sha256').update(buf).digest('hex');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Throttle Proxy: rate-limits TCP traffic between edge and hub
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class ThrottleTransform extends stream.Transform {
|
||||||
|
private bytesPerSec: number;
|
||||||
|
private bucket: number;
|
||||||
|
private lastRefill: number;
|
||||||
|
private destroyed_: boolean = false;
|
||||||
|
|
||||||
|
constructor(bytesPerSecond: number) {
|
||||||
|
super();
|
||||||
|
this.bytesPerSec = bytesPerSecond;
|
||||||
|
this.bucket = bytesPerSecond;
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(chunk: Buffer, _encoding: BufferEncoding, callback: stream.TransformCallback) {
|
||||||
|
if (this.destroyed_) return;
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
const elapsed = (now - this.lastRefill) / 1000;
|
||||||
|
this.bucket = Math.min(this.bytesPerSec, this.bucket + elapsed * this.bytesPerSec);
|
||||||
|
this.lastRefill = now;
|
||||||
|
|
||||||
|
if (chunk.length <= this.bucket) {
|
||||||
|
this.bucket -= chunk.length;
|
||||||
|
callback(null, chunk);
|
||||||
|
} else {
|
||||||
|
// Not enough budget — delay the entire chunk (don't split)
|
||||||
|
const deficit = chunk.length - this.bucket;
|
||||||
|
this.bucket = 0;
|
||||||
|
const delayMs = Math.min((deficit / this.bytesPerSec) * 1000, 1000);
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.destroyed_) { callback(); return; }
|
||||||
|
this.lastRefill = Date.now();
|
||||||
|
this.bucket = 0;
|
||||||
|
callback(null, chunk);
|
||||||
|
}, delayMs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_destroy(err: Error | null, callback: (error: Error | null) => void) {
|
||||||
|
this.destroyed_ = true;
|
||||||
|
callback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ThrottleProxy {
|
||||||
|
server: net.Server;
|
||||||
|
close: () => Promise<void>;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function startThrottleProxy(
|
||||||
|
listenPort: number,
|
||||||
|
targetHost: string,
|
||||||
|
targetPort: number,
|
||||||
|
bytesPerSecond: number,
|
||||||
|
): Promise<ThrottleProxy> {
|
||||||
|
const connections = new Set<net.Socket>();
|
||||||
|
const server = net.createServer((clientSock) => {
|
||||||
|
connections.add(clientSock);
|
||||||
|
const upstream = net.createConnection({ host: targetHost, port: targetPort });
|
||||||
|
connections.add(upstream);
|
||||||
|
|
||||||
|
const throttleUp = new ThrottleTransform(bytesPerSecond);
|
||||||
|
const throttleDown = new ThrottleTransform(bytesPerSecond);
|
||||||
|
|
||||||
|
clientSock.pipe(throttleUp).pipe(upstream);
|
||||||
|
upstream.pipe(throttleDown).pipe(clientSock);
|
||||||
|
|
||||||
|
let cleaned = false;
|
||||||
|
const cleanup = (source: string, err?: Error) => {
|
||||||
|
if (cleaned) return;
|
||||||
|
cleaned = true;
|
||||||
|
if (err) {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source}: ${err.message}`);
|
||||||
|
} else {
|
||||||
|
console.error(`[ThrottleProxy] cleanup triggered by ${source} (no error)`);
|
||||||
|
}
|
||||||
|
console.error(`[ThrottleProxy] stack:`, new Error().stack);
|
||||||
|
throttleUp.destroy();
|
||||||
|
throttleDown.destroy();
|
||||||
|
clientSock.destroy();
|
||||||
|
upstream.destroy();
|
||||||
|
connections.delete(clientSock);
|
||||||
|
connections.delete(upstream);
|
||||||
|
};
|
||||||
|
clientSock.on('error', (e) => cleanup('clientSock.error', e));
|
||||||
|
upstream.on('error', (e) => cleanup('upstream.error', e));
|
||||||
|
throttleUp.on('error', (e) => cleanup('throttleUp.error', e));
|
||||||
|
throttleDown.on('error', (e) => cleanup('throttleDown.error', e));
|
||||||
|
clientSock.on('close', () => cleanup('clientSock.close'));
|
||||||
|
upstream.on('close', () => cleanup('upstream.close'));
|
||||||
|
});
|
||||||
|
|
||||||
|
await new Promise<void>((resolve) => server.listen(listenPort, '127.0.0.1', resolve));
|
||||||
|
return {
|
||||||
|
server,
|
||||||
|
close: async () => {
|
||||||
|
for (const c of connections) c.destroy();
|
||||||
|
connections.clear();
|
||||||
|
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Test state
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
let hub: RemoteIngressHub;
|
||||||
|
let edge: RemoteIngressEdge;
|
||||||
|
let echoServer: TrackingServer;
|
||||||
|
let throttle: ThrottleProxy;
|
||||||
|
let hubPort: number;
|
||||||
|
let proxyPort: number;
|
||||||
|
let edgePort: number;
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Tests
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
tap.test('setup: start throttled tunnel (100 Mbit/s)', async () => {
|
||||||
|
[hubPort, proxyPort, edgePort] = await findFreePorts(3);
|
||||||
|
|
||||||
|
echoServer = await startEchoServer(edgePort, '127.0.0.2');
|
||||||
|
|
||||||
|
// Throttle proxy: edge → proxy → hub at 100 Mbit/s (12.5 MB/s)
|
||||||
|
throttle = await startThrottleProxy(proxyPort, '127.0.0.1', hubPort, 12.5 * 1024 * 1024);
|
||||||
|
|
||||||
|
hub = new RemoteIngressHub();
|
||||||
|
edge = new RemoteIngressEdge();
|
||||||
|
|
||||||
|
await hub.start({ tunnelPort: hubPort, targetHost: '127.0.0.2' });
|
||||||
|
await hub.updateAllowedEdges([
|
||||||
|
{ id: 'test-edge', secret: 'test-secret', listenPorts: [edgePort] },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const connectedPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
const timeout = setTimeout(() => reject(new Error('Edge did not connect within 10s')), 10000);
|
||||||
|
edge.once('tunnelConnected', () => {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Edge connects through throttle proxy
|
||||||
|
await edge.start({
|
||||||
|
hubHost: '127.0.0.1',
|
||||||
|
hubPort: proxyPort,
|
||||||
|
edgeId: 'test-edge',
|
||||||
|
secret: 'test-secret',
|
||||||
|
bindAddress: '127.0.0.1',
|
||||||
|
});
|
||||||
|
|
||||||
|
await connectedPromise;
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 5 streams x 20MB each through 100Mbit tunnel', async () => {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB total round-trip
|
||||||
|
|
||||||
|
const payloads = Array.from({ length: streamCount }, () => crypto.randomBytes(payloadSize));
|
||||||
|
const promises = payloads.map((data) => {
|
||||||
|
const hash = sha256(data);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((received) => ({
|
||||||
|
sent: hash,
|
||||||
|
received: sha256(received),
|
||||||
|
sizeOk: received.length === payloadSize,
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.sent !== r.received);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: slow consumer with 20MB does not kill other streams', async () => {
|
||||||
|
// Open a connection that creates download-direction backpressure:
|
||||||
|
// send 20MB but DON'T read the response — client TCP receive buffer fills
|
||||||
|
const slowSock = net.createConnection({ host: '127.0.0.1', port: edgePort });
|
||||||
|
await new Promise<void>((resolve) => slowSock.on('connect', resolve));
|
||||||
|
const slowData = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
slowSock.write(slowData);
|
||||||
|
slowSock.end();
|
||||||
|
// Don't read — backpressure builds on the download path
|
||||||
|
|
||||||
|
// Wait for backpressure to develop
|
||||||
|
await new Promise((r) => setTimeout(r, 2000));
|
||||||
|
|
||||||
|
// Meanwhile, 5 normal echo streams with 20MB each must complete
|
||||||
|
const payload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const hash = sha256(payload);
|
||||||
|
const promises = Array.from({ length: 5 }, () =>
|
||||||
|
sendAndReceive(edgePort, payload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === payload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const failures = results.filter((r) => !r.sizeOk || r.hash !== hash);
|
||||||
|
expect(failures.length).toEqual(0);
|
||||||
|
|
||||||
|
// Tunnel still alive
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
|
||||||
|
slowSock.destroy();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: rapid churn — 3 x 20MB long + 50 x 1MB short streams', async () => {
|
||||||
|
// 3 long streams (20MB each) running alongside 50 short streams (1MB each)
|
||||||
|
const longPayload = crypto.randomBytes(20 * 1024 * 1024);
|
||||||
|
const longHash = sha256(longPayload);
|
||||||
|
const longPromises = Array.from({ length: 3 }, () =>
|
||||||
|
sendAndReceive(edgePort, longPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === longPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const shortPayload = crypto.randomBytes(1024 * 1024);
|
||||||
|
const shortHash = sha256(shortPayload);
|
||||||
|
const shortPromises = Array.from({ length: 50 }, () =>
|
||||||
|
sendAndReceive(edgePort, shortPayload, 300000).then((r) => ({
|
||||||
|
hash: sha256(r),
|
||||||
|
sizeOk: r.length === shortPayload.length,
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
const [longResults, shortResults] = await Promise.all([
|
||||||
|
Promise.all(longPromises),
|
||||||
|
Promise.all(shortPromises),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const longFails = longResults.filter((r) => !r.sizeOk || r.hash !== longHash);
|
||||||
|
const shortFails = shortResults.filter((r) => !r.sizeOk || r.hash !== shortHash);
|
||||||
|
expect(longFails.length).toEqual(0);
|
||||||
|
expect(shortFails.length).toEqual(0);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: 3 burst waves of 5 streams x 20MB each', async () => {
|
||||||
|
for (let wave = 0; wave < 3; wave++) {
|
||||||
|
const streamCount = 5;
|
||||||
|
const payloadSize = 20 * 1024 * 1024; // 20MB per stream = 100MB per wave
|
||||||
|
|
||||||
|
const promises = Array.from({ length: streamCount }, () => {
|
||||||
|
const data = crypto.randomBytes(payloadSize);
|
||||||
|
return sendAndReceive(edgePort, data, 300000).then((r) => r.length === payloadSize);
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
const ok = results.filter(Boolean).length;
|
||||||
|
expect(ok).toEqual(streamCount);
|
||||||
|
|
||||||
|
// Brief pause between waves
|
||||||
|
await new Promise((r) => setTimeout(r, 500));
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('throttled: tunnel still works after all load tests', async () => {
|
||||||
|
const data = crypto.randomBytes(1024);
|
||||||
|
const hash = sha256(data);
|
||||||
|
const received = await sendAndReceive(edgePort, data, 30000);
|
||||||
|
expect(sha256(received)).toEqual(hash);
|
||||||
|
|
||||||
|
const status = await edge.getStatus();
|
||||||
|
expect(status.connected).toBeTrue();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('teardown: stop tunnel', async () => {
|
||||||
|
await edge.stop();
|
||||||
|
await hub.stop();
|
||||||
|
if (throttle) await throttle.close();
|
||||||
|
await new Promise<void>((resolve) => echoServer.close(() => resolve()));
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@serve.zone/remoteingress',
|
name: '@serve.zone/remoteingress',
|
||||||
version: '4.8.6',
|
version: '4.9.0',
|
||||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user