Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e0c469147e | |||
| 0fdcdf566e | |||
| a808d4c9de | |||
| f8a0171ef3 | |||
| 1d59a48648 | |||
| af2ec11a2d | |||
| b6e66a7fa6 | |||
| 1391b39601 | |||
| e813c2f044 | |||
| 0b8c1f0b57 | |||
| a63dbf2502 | |||
| 4b95a3c999 | |||
| 51ab32f6c3 | |||
| ed52520d50 | |||
| a08011d2da | |||
| 679b247c8a |
49
changelog.md
49
changelog.md
@@ -1,5 +1,54 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-03-16 - 4.6.0 - feat(remoteingress-core)
|
||||
add adaptive per-stream flow control based on active stream counts
|
||||
|
||||
- Track active stream counts on edge and hub connections to size per-stream flow control windows dynamically.
|
||||
- Cap WINDOW_UPDATE increments and read sizes to the adaptive window so bandwidth is shared more evenly across concurrent streams.
|
||||
- Apply the adaptive logic to both upload and download paths on edge and hub stream handlers.
|
||||
|
||||
## 2026-03-16 - 4.5.12 - fix(remoteingress-core)
|
||||
improve tunnel liveness handling and enable TCP keepalive for accepted client sockets
|
||||
|
||||
- Avoid disconnecting edges when PING or PONG frames cannot be queued because the control channel is temporarily full.
|
||||
- Enable TCP_NODELAY and TCP keepalive on accepted client connections to help detect stale or dropped clients.
|
||||
|
||||
## 2026-03-16 - 4.5.11 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-16 - 4.5.10 - fix(remoteingress-core)
|
||||
guard zero-window reads to avoid false EOF handling on stalled streams
|
||||
|
||||
- Prevent upload and download loops from calling read on an empty buffer when flow-control window remains at 0 after stall timeout
|
||||
- Log a warning and close the affected stream instead of misinterpreting Ok(0) as end-of-file
|
||||
|
||||
## 2026-03-16 - 4.5.9 - fix(remoteingress-core)
|
||||
delay stream close until downstream response draining finishes to prevent truncated transfers
|
||||
|
||||
- Waits for the hub-to-client download task to finish before sending the stream CLOSE frame
|
||||
- Prevents upstream reads from being cancelled mid-response during asymmetric transfers such as git fetch
|
||||
- Retains the existing timeout so stalled downloads still clean up safely
|
||||
|
||||
## 2026-03-16 - 4.5.8 - fix(remoteingress-core)
|
||||
ensure upstream writes cancel promptly and reliably deliver CLOSE_BACK frames
|
||||
|
||||
- listen for stream cancellation while waiting on upstream write timeouts so FRAME_CLOSE does not block for up to 60 seconds
|
||||
- replace try_send with send().await when emitting CLOSE_BACK frames to avoid silently dropping close notifications when the data channel is full
|
||||
|
||||
## 2026-03-16 - 4.5.7 - fix(remoteingress-core)
|
||||
improve tunnel reconnect and frame write efficiency
|
||||
|
||||
- Reuse the TLS connector across edge reconnections to preserve session resumption state and reduce reconnect latency.
|
||||
- Buffer hub and edge frame writes to coalesce small control and data frames into fewer TLS records and syscalls while still flushing each frame promptly.
|
||||
|
||||
## 2026-03-16 - 4.5.6 - fix(remoteingress-core)
|
||||
disable Nagle's algorithm on edge, hub, and upstream TCP sockets to reduce control-frame latency
|
||||
|
||||
- Enable TCP_NODELAY on the edge connection to the hub for faster PING/PONG and WINDOW_UPDATE delivery
|
||||
- Apply TCP_NODELAY on accepted hub streams before TLS handling
|
||||
- Enable TCP_NODELAY on SmartProxy upstream connections before sending the PROXY header
|
||||
|
||||
## 2026-03-16 - 4.5.5 - fix(remoteingress-core)
|
||||
wait for hub-to-client draining before cleanup and reliably send close frames
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@serve.zone/remoteingress",
|
||||
"version": "4.5.5",
|
||||
"version": "4.6.0",
|
||||
"private": false,
|
||||
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
|
||||
"main": "dist_ts/index.js",
|
||||
|
||||
13
rust/Cargo.lock
generated
13
rust/Cargo.lock
generated
@@ -558,6 +558,7 @@ dependencies = [
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"socket2 0.5.10",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"tokio-util",
|
||||
@@ -701,6 +702,16 @@ version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.6.2"
|
||||
@@ -765,7 +776,7 @@ dependencies = [
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"socket2 0.6.2",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
@@ -14,3 +14,4 @@ serde_json = "1"
|
||||
log = "0.4"
|
||||
rustls-pemfile = "2"
|
||||
tokio-util = "0.7"
|
||||
socket2 = "0.5"
|
||||
|
||||
@@ -194,6 +194,14 @@ async fn edge_main_loop(
|
||||
let mut backoff_ms: u64 = 1000;
|
||||
let max_backoff_ms: u64 = 30000;
|
||||
|
||||
// Build TLS config ONCE outside the reconnect loop — preserves session
|
||||
// cache across reconnections for TLS session resumption (saves 1 RTT).
|
||||
let tls_config = rustls::ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(NoCertVerifier))
|
||||
.with_no_client_auth();
|
||||
let connector = TlsConnector::from(Arc::new(tls_config));
|
||||
|
||||
loop {
|
||||
// Create a per-connection child token
|
||||
let connection_token = cancel_token.child_token();
|
||||
@@ -209,6 +217,7 @@ async fn edge_main_loop(
|
||||
&listen_ports,
|
||||
&mut shutdown_rx,
|
||||
&connection_token,
|
||||
&connector,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -259,18 +268,16 @@ async fn connect_to_hub_and_run(
|
||||
listen_ports: &Arc<RwLock<Vec<u16>>>,
|
||||
shutdown_rx: &mut mpsc::Receiver<()>,
|
||||
connection_token: &CancellationToken,
|
||||
connector: &TlsConnector,
|
||||
) -> EdgeLoopResult {
|
||||
// Build TLS connector that skips cert verification (auth is via secret)
|
||||
let tls_config = rustls::ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(NoCertVerifier))
|
||||
.with_no_client_auth();
|
||||
|
||||
let connector = TlsConnector::from(Arc::new(tls_config));
|
||||
|
||||
let addr = format!("{}:{}", config.hub_host, config.hub_port);
|
||||
let tcp = match TcpStream::connect(&addr).await {
|
||||
Ok(s) => s,
|
||||
Ok(s) => {
|
||||
// Disable Nagle's algorithm for low-latency control frames (PING/PONG, WINDOW_UPDATE)
|
||||
let _ = s.set_nodelay(true);
|
||||
s
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to connect to hub at {}: {}", addr, e);
|
||||
return EdgeLoopResult::Reconnect;
|
||||
@@ -374,15 +381,17 @@ async fn connect_to_hub_and_run(
|
||||
let tunnel_writer_tx = tunnel_ctrl_tx.clone();
|
||||
let tw_token = connection_token.clone();
|
||||
let tunnel_writer_handle = tokio::spawn(async move {
|
||||
// BufWriter coalesces small writes (frame headers, control frames) into fewer
|
||||
// TLS records and syscalls. Flushed after each frame to avoid holding data.
|
||||
let mut writer = tokio::io::BufWriter::with_capacity(65536, write_half);
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased; // control frames always take priority over data
|
||||
ctrl = tunnel_ctrl_rx.recv() => {
|
||||
match ctrl {
|
||||
Some(frame_data) => {
|
||||
if write_half.write_all(&frame_data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&frame_data).await.is_err() { break; }
|
||||
if writer.flush().await.is_err() { break; }
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
@@ -390,9 +399,8 @@ async fn connect_to_hub_and_run(
|
||||
data = tunnel_data_rx.recv() => {
|
||||
match data {
|
||||
Some(frame_data) => {
|
||||
if write_half.write_all(&frame_data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&frame_data).await.is_err() { break; }
|
||||
if writer.flush().await.is_err() { break; }
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
@@ -486,8 +494,10 @@ async fn connect_to_hub_and_run(
|
||||
FRAME_PING => {
|
||||
let pong_frame = encode_frame(0, FRAME_PONG, &[]);
|
||||
if tunnel_writer_tx.try_send(pong_frame).is_err() {
|
||||
log::warn!("Failed to send PONG, writer channel full/closed");
|
||||
break EdgeLoopResult::Reconnect;
|
||||
// Control channel full (WINDOW_UPDATE burst from many streams).
|
||||
// DON'T disconnect — the 45s liveness timeout gives margin
|
||||
// for the channel to drain and the next PONG to succeed.
|
||||
log::warn!("PONG send failed, control channel full — skipping this cycle");
|
||||
}
|
||||
log::trace!("Received PING from hub, sent PONG");
|
||||
}
|
||||
@@ -580,6 +590,15 @@ fn apply_port_config(
|
||||
accept_result = listener.accept() => {
|
||||
match accept_result {
|
||||
Ok((client_stream, client_addr)) => {
|
||||
// TCP keepalive detects dead clients that disappear without FIN.
|
||||
// Without this, zombie streams accumulate and never get cleaned up.
|
||||
let _ = client_stream.set_nodelay(true);
|
||||
let ka = socket2::TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(60));
|
||||
#[cfg(target_os = "linux")]
|
||||
let ka = ka.with_interval(Duration::from_secs(60));
|
||||
let _ = socket2::SockRef::from(&client_stream).set_tcp_keepalive(&ka);
|
||||
|
||||
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
|
||||
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
|
||||
let tunnel_data_tx = tunnel_data_tx.clone();
|
||||
@@ -601,6 +620,7 @@ fn apply_port_config(
|
||||
tunnel_data_tx,
|
||||
client_writers,
|
||||
client_token,
|
||||
Arc::clone(&active_streams),
|
||||
)
|
||||
.await;
|
||||
active_streams.fetch_sub(1, Ordering::Relaxed);
|
||||
@@ -632,6 +652,7 @@ async fn handle_client_connection(
|
||||
tunnel_data_tx: mpsc::Sender<Vec<u8>>,
|
||||
client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||
client_token: CancellationToken,
|
||||
active_streams: Arc<AtomicU32>,
|
||||
) {
|
||||
let client_ip = client_addr.ip().to_string();
|
||||
let client_port = client_addr.port();
|
||||
@@ -665,6 +686,7 @@ async fn handle_client_connection(
|
||||
// After writing to client TCP, send WINDOW_UPDATE to hub so it can send more
|
||||
let hub_to_client_token = client_token.clone();
|
||||
let wu_tx = tunnel_ctrl_tx.clone();
|
||||
let active_streams_h2c = Arc::clone(&active_streams);
|
||||
let mut hub_to_client = tokio::spawn(async move {
|
||||
let mut consumed_since_update: u32 = 0;
|
||||
loop {
|
||||
@@ -676,12 +698,20 @@ async fn handle_client_connection(
|
||||
if client_write.write_all(&data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
// Track consumption for flow control
|
||||
// Track consumption for adaptive flow control.
|
||||
// The increment is capped to the adaptive window so the sender's
|
||||
// effective window shrinks to match current demand (fewer streams
|
||||
// = larger window, more streams = smaller window per stream).
|
||||
consumed_since_update += len;
|
||||
if consumed_since_update >= WINDOW_UPDATE_THRESHOLD {
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
|
||||
let adaptive_window = remoteingress_protocol::compute_window_for_stream_count(
|
||||
active_streams_h2c.load(Ordering::Relaxed),
|
||||
);
|
||||
let threshold = adaptive_window / 2;
|
||||
if consumed_since_update >= threshold {
|
||||
let increment = consumed_since_update.min(adaptive_window);
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, increment);
|
||||
if wu_tx.try_send(frame).is_ok() {
|
||||
consumed_since_update = 0;
|
||||
consumed_since_update -= increment;
|
||||
}
|
||||
// If try_send fails, keep accumulating — retry on next threshold
|
||||
}
|
||||
@@ -718,9 +748,20 @@ async fn handle_client_connection(
|
||||
}
|
||||
if client_token.is_cancelled() { break; }
|
||||
|
||||
// Limit read size to available window
|
||||
// Limit read size to available window.
|
||||
// IMPORTANT: if window is 0 (stall timeout fired), we must NOT
|
||||
// read into an empty buffer — read(&mut buf[..0]) returns Ok(0)
|
||||
// which would be falsely interpreted as EOF.
|
||||
let w = send_window.load(Ordering::Acquire) as usize;
|
||||
let max_read = w.min(buf.len());
|
||||
if w == 0 {
|
||||
log::warn!("Stream {} upload: window still 0 after stall timeout, closing", stream_id);
|
||||
break;
|
||||
}
|
||||
// Adaptive: cap read to current per-stream target window
|
||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
||||
active_streams.load(Ordering::Relaxed),
|
||||
) as usize;
|
||||
let max_read = w.min(buf.len()).min(adaptive_cap);
|
||||
|
||||
tokio::select! {
|
||||
read_result = client_read.read(&mut buf[..max_read]) => {
|
||||
@@ -741,27 +782,24 @@ async fn handle_client_connection(
|
||||
}
|
||||
}
|
||||
|
||||
// Send CLOSE frame via DATA channel (must arrive AFTER last DATA for this stream).
|
||||
// Use send().await to guarantee delivery (try_send silently drops if channel full).
|
||||
if !client_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||
let _ = tunnel_data_tx.send(close_frame).await;
|
||||
}
|
||||
|
||||
// Wait for the download task (hub → client) to finish draining all buffered
|
||||
// response data. Upload EOF just means the client is done sending; the download
|
||||
// must continue until all response data has been written to the client.
|
||||
// This is critical for asymmetric transfers like git fetch (small request, large response).
|
||||
// The download task will exit when:
|
||||
// - back_rx returns None (back_tx dropped below after await, or hub sent CLOSE_BACK)
|
||||
// - client_write fails (client disconnected)
|
||||
// - client_token is cancelled
|
||||
// Wait for the download task (hub → client) to finish BEFORE sending CLOSE.
|
||||
// Upload EOF (client done sending) does NOT mean the response is done.
|
||||
// For asymmetric transfers like git fetch (small request, large response),
|
||||
// the response is still streaming when the upload finishes.
|
||||
// Sending CLOSE before the response finishes would cause the hub to cancel
|
||||
// the upstream reader mid-response, truncating the data.
|
||||
let _ = tokio::time::timeout(
|
||||
Duration::from_secs(300), // 5 min max wait for download to finish
|
||||
&mut hub_to_client,
|
||||
).await;
|
||||
|
||||
// Now safe to clean up — download has finished or timed out
|
||||
// NOW send CLOSE — the response has been fully delivered (or timed out).
|
||||
if !client_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||
let _ = tunnel_data_tx.send(close_frame).await;
|
||||
}
|
||||
|
||||
// Clean up
|
||||
{
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&stream_id);
|
||||
|
||||
@@ -298,6 +298,8 @@ async fn handle_edge_connection(
|
||||
edge_token: CancellationToken,
|
||||
peer_addr: String,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Disable Nagle's algorithm for low-latency control frames (PING/PONG, WINDOW_UPDATE)
|
||||
stream.set_nodelay(true)?;
|
||||
let tls_stream = acceptor.accept(stream).await?;
|
||||
let (read_half, mut write_half) = tokio::io::split(tls_stream);
|
||||
let mut buf_reader = BufReader::new(read_half);
|
||||
@@ -371,6 +373,9 @@ async fn handle_edge_connection(
|
||||
);
|
||||
}
|
||||
|
||||
// Per-edge active stream counter for adaptive flow control
|
||||
let edge_stream_count = Arc::new(AtomicU32::new(0));
|
||||
|
||||
// QoS dual-channel tunnel writer: control frames (PING/PONG/WINDOW_UPDATE/CLOSE)
|
||||
// have priority over data frames (DATA_BACK). This prevents PING starvation under load.
|
||||
let (ctrl_tx, mut ctrl_rx) = mpsc::channel::<Vec<u8>>(64);
|
||||
@@ -379,15 +384,17 @@ async fn handle_edge_connection(
|
||||
let frame_writer_tx = ctrl_tx.clone();
|
||||
let writer_token = edge_token.clone();
|
||||
let writer_handle = tokio::spawn(async move {
|
||||
// BufWriter coalesces small writes (frame headers, control frames) into fewer
|
||||
// TLS records and syscalls. Flushed after each frame to avoid holding data.
|
||||
let mut writer = tokio::io::BufWriter::with_capacity(65536, write_half);
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased; // control frames always take priority over data
|
||||
ctrl = ctrl_rx.recv() => {
|
||||
match ctrl {
|
||||
Some(frame_data) => {
|
||||
if write_half.write_all(&frame_data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&frame_data).await.is_err() { break; }
|
||||
if writer.flush().await.is_err() { break; }
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
@@ -395,9 +402,8 @@ async fn handle_edge_connection(
|
||||
data = data_rx.recv() => {
|
||||
match data {
|
||||
Some(frame_data) => {
|
||||
if write_half.write_all(&frame_data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&frame_data).await.is_err() { break; }
|
||||
if writer.flush().await.is_err() { break; }
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
@@ -506,8 +512,10 @@ async fn handle_edge_connection(
|
||||
}
|
||||
|
||||
// Spawn task: connect to SmartProxy, send PROXY header, pipe data
|
||||
let stream_counter = Arc::clone(&edge_stream_count);
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit; // hold semaphore permit until stream completes
|
||||
stream_counter.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let result = async {
|
||||
// A2: Connect to SmartProxy with timeout
|
||||
@@ -520,6 +528,7 @@ async fn handle_edge_connection(
|
||||
format!("connect to SmartProxy {}:{} timed out (10s)", target, dest_port).into()
|
||||
})??;
|
||||
|
||||
upstream.set_nodelay(true)?;
|
||||
upstream.write_all(proxy_header.as_bytes()).await?;
|
||||
|
||||
let (mut up_read, mut up_write) =
|
||||
@@ -529,6 +538,7 @@ async fn handle_edge_connection(
|
||||
// After writing to upstream, send WINDOW_UPDATE_BACK to edge
|
||||
let writer_token = stream_token.clone();
|
||||
let wub_tx = writer_tx.clone();
|
||||
let stream_counter_w = Arc::clone(&stream_counter);
|
||||
let writer_for_edge_data = tokio::spawn(async move {
|
||||
let mut consumed_since_update: u32 = 0;
|
||||
loop {
|
||||
@@ -537,10 +547,16 @@ async fn handle_edge_connection(
|
||||
match data {
|
||||
Some(data) => {
|
||||
let len = data.len() as u32;
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
up_write.write_all(&data),
|
||||
).await {
|
||||
// Check cancellation alongside the write so we respond
|
||||
// promptly to FRAME_CLOSE instead of blocking up to 60s.
|
||||
let write_result = tokio::select! {
|
||||
r = tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
up_write.write_all(&data),
|
||||
) => r,
|
||||
_ = writer_token.cancelled() => break,
|
||||
};
|
||||
match write_result {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(_)) => break,
|
||||
Err(_) => {
|
||||
@@ -548,12 +564,18 @@ async fn handle_edge_connection(
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Track consumption for flow control
|
||||
// Track consumption for adaptive flow control.
|
||||
// Increment capped to adaptive window to limit per-stream in-flight data.
|
||||
consumed_since_update += len;
|
||||
if consumed_since_update >= WINDOW_UPDATE_THRESHOLD {
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
|
||||
let adaptive_window = remoteingress_protocol::compute_window_for_stream_count(
|
||||
stream_counter_w.load(Ordering::Relaxed),
|
||||
);
|
||||
let threshold = adaptive_window / 2;
|
||||
if consumed_since_update >= threshold {
|
||||
let increment = consumed_since_update.min(adaptive_window);
|
||||
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, increment);
|
||||
if wub_tx.try_send(frame).is_ok() {
|
||||
consumed_since_update = 0;
|
||||
consumed_since_update -= increment;
|
||||
}
|
||||
// If try_send fails, keep accumulating — retry on next threshold
|
||||
}
|
||||
@@ -591,9 +613,20 @@ async fn handle_edge_connection(
|
||||
}
|
||||
if stream_token.is_cancelled() { break; }
|
||||
|
||||
// Limit read size to available window
|
||||
// Limit read size to available window.
|
||||
// IMPORTANT: if window is 0 (stall timeout fired), we must NOT
|
||||
// read into an empty buffer — read(&mut buf[..0]) returns Ok(0)
|
||||
// which would be falsely interpreted as EOF.
|
||||
let w = send_window.load(Ordering::Acquire) as usize;
|
||||
let max_read = w.min(buf.len());
|
||||
if w == 0 {
|
||||
log::warn!("Stream {} download: window still 0 after stall timeout, closing", stream_id);
|
||||
break;
|
||||
}
|
||||
// Adaptive: cap read to current per-stream target window
|
||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
||||
stream_counter.load(Ordering::Relaxed),
|
||||
) as usize;
|
||||
let max_read = w.min(buf.len()).min(adaptive_cap);
|
||||
|
||||
tokio::select! {
|
||||
read_result = up_read.read(&mut buf[..max_read]) => {
|
||||
@@ -615,10 +648,11 @@ async fn handle_edge_connection(
|
||||
}
|
||||
}
|
||||
|
||||
// Send CLOSE_BACK via DATA channel (must arrive AFTER last DATA_BACK)
|
||||
// Send CLOSE_BACK via DATA channel (must arrive AFTER last DATA_BACK).
|
||||
// Use send().await to guarantee delivery (try_send silently drops if full).
|
||||
if !stream_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let _ = data_writer_tx.try_send(close_frame);
|
||||
let _ = data_writer_tx.send(close_frame).await;
|
||||
}
|
||||
|
||||
writer_for_edge_data.abort();
|
||||
@@ -628,10 +662,11 @@ async fn handle_edge_connection(
|
||||
|
||||
if let Err(e) = result {
|
||||
log::error!("Stream {} error: {}", stream_id, e);
|
||||
// Send CLOSE_BACK via DATA channel on error (must arrive after any DATA_BACK)
|
||||
// Send CLOSE_BACK via DATA channel on error (must arrive after any DATA_BACK).
|
||||
// Use send().await to guarantee delivery.
|
||||
if !stream_token.is_cancelled() {
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let _ = data_writer_tx.try_send(close_frame);
|
||||
let _ = data_writer_tx.send(close_frame).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -646,6 +681,7 @@ async fn handle_edge_connection(
|
||||
stream_id,
|
||||
});
|
||||
}
|
||||
stream_counter.fetch_sub(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
FRAME_DATA => {
|
||||
@@ -707,8 +743,9 @@ async fn handle_edge_connection(
|
||||
_ = ping_ticker.tick() => {
|
||||
let ping_frame = encode_frame(0, FRAME_PING, &[]);
|
||||
if frame_writer_tx.try_send(ping_frame).is_err() {
|
||||
log::warn!("Failed to send PING to edge {}, writer channel full/closed", edge_id);
|
||||
break;
|
||||
// Control channel full — skip this PING cycle.
|
||||
// The 45s liveness timeout gives margin for the channel to drain.
|
||||
log::warn!("PING send to edge {} failed, control channel full — skipping", edge_id);
|
||||
}
|
||||
log::trace!("Sent PING to edge {}", edge_id);
|
||||
}
|
||||
|
||||
@@ -32,6 +32,15 @@ pub fn encode_window_update(stream_id: u32, frame_type: u8, increment: u32) -> V
|
||||
encode_frame(stream_id, frame_type, &increment.to_be_bytes())
|
||||
}
|
||||
|
||||
/// Compute the target per-stream window size based on the number of active streams.
|
||||
/// Total memory budget is ~32MB shared across all streams. As more streams are active,
|
||||
/// each gets a smaller window. This adapts to current demand — few streams get high
|
||||
/// throughput, many streams save memory and reduce control frame pressure.
|
||||
pub fn compute_window_for_stream_count(active: u32) -> u32 {
|
||||
let per_stream = (32 * 1024 * 1024u64) / (active.max(1) as u64);
|
||||
per_stream.clamp(64 * 1024, INITIAL_STREAM_WINDOW as u64) as u32
|
||||
}
|
||||
|
||||
/// Decode a WINDOW_UPDATE payload into a byte increment. Returns None if payload is malformed.
|
||||
pub fn decode_window_update(payload: &[u8]) -> Option<u32> {
|
||||
if payload.len() != 4 {
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@serve.zone/remoteingress',
|
||||
version: '4.5.5',
|
||||
version: '4.6.0',
|
||||
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user