fix(remoteingress-core): prioritize control frames over data in edge and hub tunnel writers

This commit is contained in:
2026-03-15 19:26:39 +00:00
parent 43e320a36d
commit a8ee0b33d7
4 changed files with 67 additions and 23 deletions

View File

@@ -1,5 +1,12 @@
# Changelog
## 2026-03-15 - 4.5.3 - fix(remoteingress-core)
prioritize control frames over data in edge and hub tunnel writers
- Split tunnel/frame writers into separate control and data channels in edge and hub
- Use biased select loops so PING, PONG, WINDOW_UPDATE, OPEN, and CLOSE frames are sent before data frames
- Route stream data through dedicated data channels while keeping OPEN, CLOSE, and flow-control updates on control channels to prevent keepalive starvation under load
## 2026-03-15 - 4.5.2 - fix(remoteingress-core)
improve stream flow control retries and increase channel buffer capacity

View File

@@ -366,13 +366,28 @@ async fn connect_to_hub_and_run(
let client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>> =
Arc::new(Mutex::new(HashMap::new()));
// A5: Channel-based tunnel writer replaces Arc<Mutex<WriteHalf>>
let (tunnel_writer_tx, mut tunnel_writer_rx) = mpsc::channel::<Vec<u8>>(4096);
// QoS dual-channel tunnel writer: control frames (PONG/WINDOW_UPDATE/CLOSE/OPEN)
// have priority over data frames (DATA). Prevents PING starvation under load.
let (tunnel_ctrl_tx, mut tunnel_ctrl_rx) = mpsc::channel::<Vec<u8>>(64);
let (tunnel_data_tx, mut tunnel_data_rx) = mpsc::channel::<Vec<u8>>(4096);
// Legacy alias — control channel for PONG, CLOSE, WINDOW_UPDATE, OPEN
let tunnel_writer_tx = tunnel_ctrl_tx.clone();
let tw_token = connection_token.clone();
let tunnel_writer_handle = tokio::spawn(async move {
loop {
tokio::select! {
data = tunnel_writer_rx.recv() => {
biased; // control frames always take priority over data
ctrl = tunnel_ctrl_rx.recv() => {
match ctrl {
Some(frame_data) => {
if write_half.write_all(&frame_data).await.is_err() {
break;
}
}
None => break,
}
}
data = tunnel_data_rx.recv() => {
match data {
Some(frame_data) => {
if write_half.write_all(&frame_data).await.is_err() {
@@ -393,6 +408,7 @@ async fn connect_to_hub_and_run(
&handshake.listen_ports,
&mut port_listeners,
&tunnel_writer_tx,
&tunnel_data_tx,
&client_writers,
active_streams,
next_stream_id,
@@ -458,6 +474,7 @@ async fn connect_to_hub_and_run(
&update.listen_ports,
&mut port_listeners,
&tunnel_writer_tx,
&tunnel_data_tx,
&client_writers,
active_streams,
next_stream_id,
@@ -519,7 +536,8 @@ async fn connect_to_hub_and_run(
fn apply_port_config(
new_ports: &[u16],
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
tunnel_writer_tx: &mpsc::Sender<Vec<u8>>,
tunnel_ctrl_tx: &mpsc::Sender<Vec<u8>>,
tunnel_data_tx: &mpsc::Sender<Vec<u8>>,
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
active_streams: &Arc<AtomicU32>,
next_stream_id: &Arc<AtomicU32>,
@@ -539,7 +557,8 @@ fn apply_port_config(
// Add new ports
for &port in new_set.difference(&old_set) {
let tunnel_writer_tx = tunnel_writer_tx.clone();
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
let tunnel_data_tx = tunnel_data_tx.clone();
let client_writers = client_writers.clone();
let active_streams = active_streams.clone();
let next_stream_id = next_stream_id.clone();
@@ -562,7 +581,8 @@ fn apply_port_config(
match accept_result {
Ok((client_stream, client_addr)) => {
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
let tunnel_writer_tx = tunnel_writer_tx.clone();
let tunnel_ctrl_tx = tunnel_ctrl_tx.clone();
let tunnel_data_tx = tunnel_data_tx.clone();
let client_writers = client_writers.clone();
let active_streams = active_streams.clone();
let edge_id = edge_id.clone();
@@ -577,7 +597,8 @@ fn apply_port_config(
stream_id,
port,
&edge_id,
tunnel_writer_tx,
tunnel_ctrl_tx,
tunnel_data_tx,
client_writers,
client_token,
)
@@ -607,7 +628,8 @@ async fn handle_client_connection(
stream_id: u32,
dest_port: u16,
edge_id: &str,
tunnel_writer_tx: mpsc::Sender<Vec<u8>>,
tunnel_ctrl_tx: mpsc::Sender<Vec<u8>>,
tunnel_data_tx: mpsc::Sender<Vec<u8>>,
client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
client_token: CancellationToken,
) {
@@ -617,10 +639,10 @@ async fn handle_client_connection(
// Determine edge IP (use 0.0.0.0 as placeholder — hub doesn't use it for routing)
let edge_ip = "0.0.0.0";
// Send OPEN frame with PROXY v1 header via writer channel
// Send OPEN frame with PROXY v1 header via control channel
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
if tunnel_writer_tx.send(open_frame).await.is_err() {
if tunnel_ctrl_tx.send(open_frame).await.is_err() {
return;
}
@@ -642,7 +664,7 @@ async fn handle_client_connection(
// Task: hub -> client (download direction)
// After writing to client TCP, send WINDOW_UPDATE to hub so it can send more
let hub_to_client_token = client_token.clone();
let wu_tx = tunnel_writer_tx.clone();
let wu_tx = tunnel_ctrl_tx.clone();
let hub_to_client = tokio::spawn(async move {
let mut consumed_since_update: u32 = 0;
loop {
@@ -703,8 +725,8 @@ async fn handle_client_connection(
Ok(n) => {
send_window.fetch_sub(n as u32, Ordering::Release);
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
if tunnel_writer_tx.send(data_frame).await.is_err() {
log::warn!("Stream {} tunnel writer closed, closing", stream_id);
if tunnel_data_tx.send(data_frame).await.is_err() {
log::warn!("Stream {} data channel closed, closing", stream_id);
break;
}
}
@@ -715,10 +737,10 @@ async fn handle_client_connection(
}
}
// Send CLOSE frame (only if not cancelled)
// Send CLOSE frame via control channel (only if not cancelled)
if !client_token.is_cancelled() {
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
let _ = tunnel_writer_tx.try_send(close_frame);
let _ = tunnel_ctrl_tx.try_send(close_frame);
}
// Cleanup

View File

@@ -371,14 +371,28 @@ async fn handle_edge_connection(
);
}
// A5: Channel-based writer replaces Arc<Mutex<WriteHalf>>
// All frame writes go through this channel → dedicated writer task serializes them
let (frame_writer_tx, mut frame_writer_rx) = mpsc::channel::<Vec<u8>>(4096);
// QoS dual-channel tunnel writer: control frames (PING/PONG/WINDOW_UPDATE/CLOSE)
// have priority over data frames (DATA_BACK). This prevents PING starvation under load.
let (ctrl_tx, mut ctrl_rx) = mpsc::channel::<Vec<u8>>(64);
let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(4096);
// Legacy alias for code that sends both control and data (will be migrated)
let frame_writer_tx = ctrl_tx.clone();
let writer_token = edge_token.clone();
let writer_handle = tokio::spawn(async move {
loop {
tokio::select! {
data = frame_writer_rx.recv() => {
biased; // control frames always take priority over data
ctrl = ctrl_rx.recv() => {
match ctrl {
Some(frame_data) => {
if write_half.write_all(&frame_data).await.is_err() {
break;
}
}
None => break,
}
}
data = data_rx.recv() => {
match data {
Some(frame_data) => {
if write_half.write_all(&frame_data).await.is_err() {
@@ -467,7 +481,8 @@ async fn handle_edge_connection(
let edge_id_clone = edge_id.clone();
let event_tx_clone = event_tx.clone();
let streams_clone = streams.clone();
let writer_tx = frame_writer_tx.clone();
let writer_tx = ctrl_tx.clone(); // control: CLOSE_BACK, WINDOW_UPDATE_BACK
let data_writer_tx = data_tx.clone(); // data: DATA_BACK
let target = target_host.clone();
let stream_token = edge_token.child_token();
@@ -576,8 +591,8 @@ async fn handle_edge_connection(
send_window.fetch_sub(n as u32, Ordering::Release);
let frame =
encode_frame(stream_id, FRAME_DATA_BACK, &buf[..n]);
if writer_tx.send(frame).await.is_err() {
log::warn!("Stream {} writer channel closed, closing", stream_id);
if data_writer_tx.send(frame).await.is_err() {
log::warn!("Stream {} data channel closed, closing", stream_id);
break;
}
}

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@serve.zone/remoteingress',
version: '4.5.2',
version: '4.5.3',
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
}