fix(protocol,edge): optimize tunnel frame handling and zero-copy uploads in edge I/O
This commit is contained in:
@@ -13,6 +13,15 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use remoteingress_protocol::*;
|
||||
|
||||
type EdgeTlsStream = tokio_rustls::client::TlsStream<TcpStream>;
|
||||
|
||||
/// Result of processing a frame (shared with hub.rs pattern).
|
||||
#[allow(dead_code)]
|
||||
enum EdgeFrameAction {
|
||||
Continue,
|
||||
Disconnect(String),
|
||||
}
|
||||
|
||||
/// Per-stream state tracked in the edge's client_writers map.
|
||||
struct EdgeStreamState {
|
||||
/// Channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
|
||||
@@ -272,6 +281,83 @@ enum EdgeLoopResult {
|
||||
Reconnect(String), // reason for disconnection
|
||||
}
|
||||
|
||||
/// Process a single frame received from the hub side of the tunnel.
|
||||
/// Handles FRAME_DATA_BACK, FRAME_WINDOW_UPDATE_BACK, FRAME_CLOSE_BACK, FRAME_CONFIG, FRAME_PING.
|
||||
async fn handle_edge_frame(
|
||||
frame: Frame,
|
||||
tunnel_io: &mut remoteingress_protocol::TunnelIo<EdgeTlsStream>,
|
||||
client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
|
||||
listen_ports: &Arc<RwLock<Vec<u16>>>,
|
||||
event_tx: &mpsc::Sender<EdgeEvent>,
|
||||
tunnel_writer_tx: &mpsc::Sender<Vec<u8>>,
|
||||
tunnel_data_tx: &mpsc::Sender<Vec<u8>>,
|
||||
port_listeners: &mut HashMap<u16, JoinHandle<()>>,
|
||||
active_streams: &Arc<AtomicU32>,
|
||||
next_stream_id: &Arc<AtomicU32>,
|
||||
edge_id: &str,
|
||||
connection_token: &CancellationToken,
|
||||
bind_address: &str,
|
||||
) -> EdgeFrameAction {
|
||||
match frame.frame_type {
|
||||
FRAME_DATA_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
if state.back_tx.try_send(frame.payload).is_err() {
|
||||
log::warn!("Stream {} back-channel full, closing", frame.stream_id);
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_WINDOW_UPDATE_BACK => {
|
||||
if let Some(increment) = decode_window_update(&frame.payload) {
|
||||
if increment > 0 {
|
||||
let writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
||||
if prev + increment > MAX_WINDOW_SIZE {
|
||||
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
||||
}
|
||||
state.window_notify.notify_one();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_CLOSE_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
FRAME_CONFIG => {
|
||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
||||
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
||||
*listen_ports.write().await = update.listen_ports.clone();
|
||||
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
||||
listen_ports: update.listen_ports.clone(),
|
||||
});
|
||||
apply_port_config(
|
||||
&update.listen_ports,
|
||||
port_listeners,
|
||||
tunnel_writer_tx,
|
||||
tunnel_data_tx,
|
||||
client_writers,
|
||||
active_streams,
|
||||
next_stream_id,
|
||||
edge_id,
|
||||
connection_token,
|
||||
bind_address,
|
||||
);
|
||||
}
|
||||
}
|
||||
FRAME_PING => {
|
||||
// Queue PONG directly — no channel round-trip, guaranteed delivery
|
||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
||||
}
|
||||
}
|
||||
EdgeFrameAction::Continue
|
||||
}
|
||||
|
||||
async fn connect_to_hub_and_run(
|
||||
config: &EdgeConfig,
|
||||
connected: &Arc<RwLock<bool>>,
|
||||
@@ -436,73 +522,22 @@ async fn connect_to_hub_and_run(
|
||||
let result = 'io_loop: loop {
|
||||
// Drain any buffered frames
|
||||
loop {
|
||||
match tunnel_io.try_parse_frame() {
|
||||
Some(Ok(frame)) => {
|
||||
last_activity = Instant::now();
|
||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||
match frame.frame_type {
|
||||
FRAME_DATA_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
if state.back_tx.try_send(frame.payload).is_err() {
|
||||
log::warn!("Stream {} back-channel full, closing", frame.stream_id);
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_WINDOW_UPDATE_BACK => {
|
||||
if let Some(increment) = decode_window_update(&frame.payload) {
|
||||
if increment > 0 {
|
||||
let writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
||||
if prev + increment > MAX_WINDOW_SIZE {
|
||||
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
||||
}
|
||||
state.window_notify.notify_one();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_CLOSE_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
FRAME_CONFIG => {
|
||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
||||
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
||||
*listen_ports.write().await = update.listen_ports.clone();
|
||||
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
||||
listen_ports: update.listen_ports.clone(),
|
||||
});
|
||||
apply_port_config(
|
||||
&update.listen_ports,
|
||||
&mut port_listeners,
|
||||
&tunnel_writer_tx,
|
||||
&tunnel_data_tx,
|
||||
&client_writers,
|
||||
active_streams,
|
||||
next_stream_id,
|
||||
&config.edge_id,
|
||||
connection_token,
|
||||
bind_address,
|
||||
);
|
||||
}
|
||||
}
|
||||
FRAME_PING => {
|
||||
// Queue PONG directly — no channel round-trip, guaranteed delivery
|
||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
let frame = match tunnel_io.try_parse_frame() {
|
||||
Some(Ok(f)) => f,
|
||||
Some(Err(e)) => {
|
||||
log::error!("Hub frame error: {}", e);
|
||||
break 'io_loop EdgeLoopResult::Reconnect(format!("hub_frame_error: {}", e));
|
||||
}
|
||||
None => break,
|
||||
};
|
||||
last_activity = Instant::now();
|
||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
||||
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||
).await {
|
||||
break 'io_loop EdgeLoopResult::Reconnect(reason);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,61 +550,12 @@ async fn connect_to_hub_and_run(
|
||||
remoteingress_protocol::TunnelEvent::Frame(frame) => {
|
||||
last_activity = Instant::now();
|
||||
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
|
||||
match frame.frame_type {
|
||||
FRAME_DATA_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
if state.back_tx.try_send(frame.payload).is_err() {
|
||||
log::warn!("Stream {} back-channel full, closing", frame.stream_id);
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_WINDOW_UPDATE_BACK => {
|
||||
if let Some(increment) = decode_window_update(&frame.payload) {
|
||||
if increment > 0 {
|
||||
let writers = client_writers.lock().await;
|
||||
if let Some(state) = writers.get(&frame.stream_id) {
|
||||
let prev = state.send_window.fetch_add(increment, Ordering::Release);
|
||||
if prev + increment > MAX_WINDOW_SIZE {
|
||||
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
|
||||
}
|
||||
state.window_notify.notify_one();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
FRAME_CLOSE_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
FRAME_CONFIG => {
|
||||
if let Ok(update) = serde_json::from_slice::<ConfigUpdate>(&frame.payload) {
|
||||
log::info!("Config update from hub: ports {:?}", update.listen_ports);
|
||||
*listen_ports.write().await = update.listen_ports.clone();
|
||||
let _ = event_tx.try_send(EdgeEvent::PortsUpdated {
|
||||
listen_ports: update.listen_ports.clone(),
|
||||
});
|
||||
apply_port_config(
|
||||
&update.listen_ports,
|
||||
&mut port_listeners,
|
||||
&tunnel_writer_tx,
|
||||
&tunnel_data_tx,
|
||||
&client_writers,
|
||||
active_streams,
|
||||
next_stream_id,
|
||||
&config.edge_id,
|
||||
connection_token,
|
||||
bind_address,
|
||||
);
|
||||
}
|
||||
}
|
||||
FRAME_PING => {
|
||||
tunnel_io.queue_ctrl(encode_frame(0, FRAME_PONG, &[]));
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
||||
}
|
||||
if let EdgeFrameAction::Disconnect(reason) = handle_edge_frame(
|
||||
frame, &mut tunnel_io, &client_writers, listen_ports, event_tx,
|
||||
&tunnel_writer_tx, &tunnel_data_tx, &mut port_listeners,
|
||||
active_streams, next_stream_id, &config.edge_id, connection_token, bind_address,
|
||||
).await {
|
||||
break EdgeLoopResult::Reconnect(reason);
|
||||
}
|
||||
}
|
||||
remoteingress_protocol::TunnelEvent::Eof => {
|
||||
@@ -813,15 +799,21 @@ async fn handle_client_connection(
|
||||
let _ = client_write.shutdown().await;
|
||||
});
|
||||
|
||||
// Task: client -> hub (upload direction) with per-stream flow control
|
||||
let mut buf = vec![0u8; 32768];
|
||||
// Task: client -> hub (upload direction) with per-stream flow control.
|
||||
// Zero-copy: read payload directly after the header, then prepend header.
|
||||
let mut buf = vec![0u8; FRAME_HEADER_SIZE + 32768];
|
||||
loop {
|
||||
// Wait for send window to have capacity (with stall timeout)
|
||||
// Wait for send window to have capacity (with stall timeout).
|
||||
// Safe pattern: register notified BEFORE checking the condition
|
||||
// to avoid missing a notify_one that fires between load and select.
|
||||
loop {
|
||||
let notified = window_notify.notified();
|
||||
tokio::pin!(notified);
|
||||
notified.as_mut().enable();
|
||||
let w = send_window.load(Ordering::Acquire);
|
||||
if w > 0 { break; }
|
||||
tokio::select! {
|
||||
_ = window_notify.notified() => continue,
|
||||
_ = notified => continue,
|
||||
_ = client_token.cancelled() => break,
|
||||
_ = tokio::time::sleep(Duration::from_secs(120)) => {
|
||||
log::warn!("Stream {} upload stalled (window empty for 120s)", stream_id);
|
||||
@@ -844,15 +836,16 @@ async fn handle_client_connection(
|
||||
let adaptive_cap = remoteingress_protocol::compute_window_for_stream_count(
|
||||
active_streams.load(Ordering::Relaxed),
|
||||
) as usize;
|
||||
let max_read = w.min(buf.len()).min(adaptive_cap);
|
||||
let max_read = w.min(32768).min(adaptive_cap);
|
||||
|
||||
tokio::select! {
|
||||
read_result = client_read.read(&mut buf[..max_read]) => {
|
||||
read_result = client_read.read(&mut buf[FRAME_HEADER_SIZE..FRAME_HEADER_SIZE + max_read]) => {
|
||||
match read_result {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
send_window.fetch_sub(n as u32, Ordering::Release);
|
||||
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
|
||||
encode_frame_header(&mut buf, stream_id, FRAME_DATA, n);
|
||||
let data_frame = buf[..FRAME_HEADER_SIZE + n].to_vec();
|
||||
if tunnel_data_tx.send(data_frame).await.is_err() {
|
||||
log::warn!("Stream {} data channel closed, closing", stream_id);
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user