Compare commits

..

8 Commits

8 changed files with 472 additions and 44 deletions

View File

@@ -1,5 +1,35 @@
# Changelog # Changelog
## 2026-03-15 - 4.5.0 - feat(remoteingress-core)
add per-stream flow control for edge and hub tunnel data transfer
- introduce WINDOW_UPDATE frame types and protocol helpers for per-stream flow control
- track per-stream send windows on both edge and hub to limit reads based on available capacity
- send window updates after downstream writes to reduce channel pressure during large transfers
## 2026-03-15 - 4.4.1 - fix(remoteingress-core)
prevent stream data loss by applying backpressure and closing saturated channels
- replace non-blocking frame writes with awaited sends in per-stream tasks so large transfers respect backpressure instead of dropping data
- close and remove streams when back-channel or data channels fill up to avoid TCP stream corruption from silently dropped frames
## 2026-03-03 - 4.4.0 - feat(remoteingress)
add heartbeat PING/PONG and liveness timeouts; implement fast-reconnect/backoff reset and JS crash-recovery auto-restart
- protocol: add FRAME_PING and FRAME_PONG and unit tests for ping/pong frames
- edge (Rust): reset backoff after successful connection, respond to PING with PONG, track liveness via deadline and reconnect on timeout, use Duration/Instant helpers
- hub (Rust): send periodic PING to edges, handle PONGs, enforce liveness timeout and disconnect inactive edges, use tokio interval and time utilities
- ts: RemoteIngressEdge and RemoteIngressHub: add crash-recovery auto-restart with exponential backoff and max attempts, save/restore config and allowed edges, register/remove exit handlers, ensure stop() marks stopping and cleans up listeners
- minor API/typing: introduce TAllowedEdge alias and persist allowed edges for restart recovery
## 2026-02-26 - 4.3.0 - feat(hub)
add optional TLS certificate/key support to hub start config and bridge
- TypeScript: add tls.certPem and tls.keyPem to IHubConfig and include tlsCertPem/tlsKeyPem in startHub bridge command when both are provided
- TypeScript: extend startHub params with tlsCertPem and tlsKeyPem and conditionally send them
- Rust: change HubConfig serde attributes for tls_cert_pem and tls_key_pem from skip to default so absent PEM fields deserialize as None
- Enables optional provisioning of TLS certificate and key to the hub when provided from the JS side
## 2026-02-26 - 4.2.0 - feat(core) ## 2026-02-26 - 4.2.0 - feat(core)
expose edge peer address in hub events and migrate writers to channel-based, non-blocking framing with stream limits and timeouts expose edge peer address in hub events and migrate writers to channel-based, non-blocking framing with stream limits and timeouts

View File

@@ -1,6 +1,6 @@
{ {
"name": "@serve.zone/remoteingress", "name": "@serve.zone/remoteingress",
"version": "4.2.0", "version": "4.5.0",
"private": false, "private": false,
"description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.", "description": "Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",

View File

@@ -1,16 +1,29 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader}; use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, Mutex, RwLock}; use tokio::sync::{mpsc, Mutex, Notify, RwLock};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{Instant, sleep_until};
use tokio_rustls::TlsConnector; use tokio_rustls::TlsConnector;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use remoteingress_protocol::*; use remoteingress_protocol::*;
/// Per-stream state tracked in the edge's client_writers map.
struct EdgeStreamState {
/// Channel to deliver FRAME_DATA_BACK payloads to the hub_to_client task.
back_tx: mpsc::Sender<Vec<u8>>,
/// Send window for FRAME_DATA (upload direction).
/// Decremented by the client reader, incremented by FRAME_WINDOW_UPDATE_BACK from hub.
send_window: Arc<AtomicU32>,
/// Notifier to wake the client reader when the window opens.
window_notify: Arc<Notify>,
}
/// Edge configuration (hub-host + credentials only; ports come from hub). /// Edge configuration (hub-host + credentials only; ports come from hub).
#[derive(Debug, Clone, Deserialize, Serialize)] #[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@@ -202,6 +215,13 @@ async fn edge_main_loop(
// Cancel connection token to kill all orphaned tasks from this cycle // Cancel connection token to kill all orphaned tasks from this cycle
connection_token.cancel(); connection_token.cancel();
// Reset backoff after a successful connection for fast reconnect
let was_connected = *connected.read().await;
if was_connected {
backoff_ms = 1000;
log::info!("Was connected; resetting backoff to {}ms for fast reconnect", backoff_ms);
}
*connected.write().await = false; *connected.write().await = false;
let _ = event_tx.try_send(EdgeEvent::TunnelDisconnected); let _ = event_tx.try_send(EdgeEvent::TunnelDisconnected);
active_streams.store(0, Ordering::Relaxed); active_streams.store(0, Ordering::Relaxed);
@@ -214,7 +234,7 @@ async fn edge_main_loop(
EdgeLoopResult::Reconnect => { EdgeLoopResult::Reconnect => {
log::info!("Reconnecting in {}ms...", backoff_ms); log::info!("Reconnecting in {}ms...", backoff_ms);
tokio::select! { tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)) => {} _ = tokio::time::sleep(Duration::from_millis(backoff_ms)) => {}
_ = cancel_token.cancelled() => break, _ = cancel_token.cancelled() => break,
_ = shutdown_rx.recv() => break, _ = shutdown_rx.recv() => break,
} }
@@ -336,14 +356,14 @@ async fn connect_to_hub_and_run(
_ = stun_token.cancelled() => break, _ = stun_token.cancelled() => break,
} }
tokio::select! { tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_secs(stun_interval)) => {} _ = tokio::time::sleep(Duration::from_secs(stun_interval)) => {}
_ = stun_token.cancelled() => break, _ = stun_token.cancelled() => break,
} }
} }
}); });
// Client socket map: stream_id -> sender for writing data back to client // Client socket map: stream_id -> per-stream state (back channel + flow control)
let client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>> = let client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>> =
Arc::new(Mutex::new(HashMap::new())); Arc::new(Mutex::new(HashMap::new()));
// A5: Channel-based tunnel writer replaces Arc<Mutex<WriteHalf>> // A5: Channel-based tunnel writer replaces Arc<Mutex<WriteHalf>>
@@ -380,6 +400,11 @@ async fn connect_to_hub_and_run(
connection_token, connection_token,
); );
// Heartbeat: liveness timeout detects silent hub failures
let liveness_timeout_dur = Duration::from_secs(45);
let mut last_activity = Instant::now();
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
// Read frames from hub // Read frames from hub
let mut frame_reader = FrameReader::new(buf_reader); let mut frame_reader = FrameReader::new(buf_reader);
let result = loop { let result = loop {
@@ -387,13 +412,34 @@ async fn connect_to_hub_and_run(
frame_result = frame_reader.next_frame() => { frame_result = frame_reader.next_frame() => {
match frame_result { match frame_result {
Ok(Some(frame)) => { Ok(Some(frame)) => {
// Reset liveness on any received frame
last_activity = Instant::now();
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
match frame.frame_type { match frame.frame_type {
FRAME_DATA_BACK => { FRAME_DATA_BACK => {
// A1: Non-blocking send to prevent head-of-line blocking // Non-blocking dispatch to per-stream channel.
let writers = client_writers.lock().await; // With flow control, the sender should rarely exceed the channel capacity.
if let Some(tx) = writers.get(&frame.stream_id) { let mut writers = client_writers.lock().await;
if tx.try_send(frame.payload).is_err() { if let Some(state) = writers.get(&frame.stream_id) {
log::warn!("Stream {} back-channel full, dropping frame", frame.stream_id); if state.back_tx.try_send(frame.payload).is_err() {
log::warn!("Stream {} back-channel full, closing stream", frame.stream_id);
writers.remove(&frame.stream_id);
}
}
}
FRAME_WINDOW_UPDATE_BACK => {
// Hub consumed data — increase our send window for this stream (upload direction)
if let Some(increment) = decode_window_update(&frame.payload) {
if increment > 0 {
let writers = client_writers.lock().await;
if let Some(state) = writers.get(&frame.stream_id) {
let prev = state.send_window.fetch_add(increment, Ordering::Release);
if prev + increment > MAX_WINDOW_SIZE {
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
}
state.window_notify.notify_one();
}
} }
} }
} }
@@ -420,6 +466,14 @@ async fn connect_to_hub_and_run(
); );
} }
} }
FRAME_PING => {
let pong_frame = encode_frame(0, FRAME_PONG, &[]);
if tunnel_writer_tx.try_send(pong_frame).is_err() {
log::warn!("Failed to send PONG, writer channel full/closed");
break EdgeLoopResult::Reconnect;
}
log::trace!("Received PING from hub, sent PONG");
}
_ => { _ => {
log::warn!("Unexpected frame type {} from hub", frame.frame_type); log::warn!("Unexpected frame type {} from hub", frame.frame_type);
} }
@@ -435,6 +489,11 @@ async fn connect_to_hub_and_run(
} }
} }
} }
_ = &mut liveness_deadline => {
log::warn!("Hub liveness timeout (no frames for {}s), reconnecting",
liveness_timeout_dur.as_secs());
break EdgeLoopResult::Reconnect;
}
_ = connection_token.cancelled() => { _ = connection_token.cancelled() => {
log::info!("Connection cancelled"); log::info!("Connection cancelled");
break EdgeLoopResult::Shutdown; break EdgeLoopResult::Shutdown;
@@ -461,7 +520,7 @@ fn apply_port_config(
new_ports: &[u16], new_ports: &[u16],
port_listeners: &mut HashMap<u16, JoinHandle<()>>, port_listeners: &mut HashMap<u16, JoinHandle<()>>,
tunnel_writer_tx: &mpsc::Sender<Vec<u8>>, tunnel_writer_tx: &mpsc::Sender<Vec<u8>>,
client_writers: &Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>, client_writers: &Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
active_streams: &Arc<AtomicU32>, active_streams: &Arc<AtomicU32>,
next_stream_id: &Arc<AtomicU32>, next_stream_id: &Arc<AtomicU32>,
edge_id: &str, edge_id: &str,
@@ -549,7 +608,7 @@ async fn handle_client_connection(
dest_port: u16, dest_port: u16,
edge_id: &str, edge_id: &str,
tunnel_writer_tx: mpsc::Sender<Vec<u8>>, tunnel_writer_tx: mpsc::Sender<Vec<u8>>,
client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>, client_writers: Arc<Mutex<HashMap<u32, EdgeStreamState>>>,
client_token: CancellationToken, client_token: CancellationToken,
) { ) {
let client_ip = client_addr.ip().to_string(); let client_ip = client_addr.ip().to_string();
@@ -565,26 +624,44 @@ async fn handle_client_connection(
return; return;
} }
// Set up channel for data coming back from hub // Set up channel for data coming back from hub (capacity 16 is sufficient with flow control)
let (back_tx, mut back_rx) = mpsc::channel::<Vec<u8>>(256); let (back_tx, mut back_rx) = mpsc::channel::<Vec<u8>>(16);
let send_window = Arc::new(AtomicU32::new(INITIAL_STREAM_WINDOW));
let window_notify = Arc::new(Notify::new());
{ {
let mut writers = client_writers.lock().await; let mut writers = client_writers.lock().await;
writers.insert(stream_id, back_tx); writers.insert(stream_id, EdgeStreamState {
back_tx,
send_window: Arc::clone(&send_window),
window_notify: Arc::clone(&window_notify),
});
} }
let (mut client_read, mut client_write) = client_stream.into_split(); let (mut client_read, mut client_write) = client_stream.into_split();
// Task: hub -> client // Task: hub -> client (download direction)
// After writing to client TCP, send WINDOW_UPDATE to hub so it can send more
let hub_to_client_token = client_token.clone(); let hub_to_client_token = client_token.clone();
let wu_tx = tunnel_writer_tx.clone();
let hub_to_client = tokio::spawn(async move { let hub_to_client = tokio::spawn(async move {
let mut consumed_since_update: u32 = 0;
loop { loop {
tokio::select! { tokio::select! {
data = back_rx.recv() => { data = back_rx.recv() => {
match data { match data {
Some(data) => { Some(data) => {
let len = data.len() as u32;
if client_write.write_all(&data).await.is_err() { if client_write.write_all(&data).await.is_err() {
break; break;
} }
// Track consumption for flow control
consumed_since_update += len;
if consumed_since_update >= WINDOW_UPDATE_THRESHOLD {
let increment = consumed_since_update;
consumed_since_update = 0;
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, increment);
let _ = wu_tx.try_send(frame);
}
} }
None => break, None => break,
} }
@@ -592,21 +669,41 @@ async fn handle_client_connection(
_ = hub_to_client_token.cancelled() => break, _ = hub_to_client_token.cancelled() => break,
} }
} }
// Send final window update for any remaining consumed bytes
if consumed_since_update > 0 {
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE, consumed_since_update);
let _ = wu_tx.try_send(frame);
}
let _ = client_write.shutdown().await; let _ = client_write.shutdown().await;
}); });
// Task: client -> hub (via writer channel) // Task: client -> hub (upload direction) with per-stream flow control
let mut buf = vec![0u8; 32768]; let mut buf = vec![0u8; 32768];
loop { loop {
// Wait for send window to have capacity
loop {
let w = send_window.load(Ordering::Acquire);
if w > 0 { break; }
tokio::select! {
_ = window_notify.notified() => continue,
_ = client_token.cancelled() => break,
}
}
if client_token.is_cancelled() { break; }
// Limit read size to available window
let w = send_window.load(Ordering::Acquire) as usize;
let max_read = w.min(buf.len());
tokio::select! { tokio::select! {
read_result = client_read.read(&mut buf) => { read_result = client_read.read(&mut buf[..max_read]) => {
match read_result { match read_result {
Ok(0) => break, Ok(0) => break,
Ok(n) => { Ok(n) => {
send_window.fetch_sub(n as u32, Ordering::Release);
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]); let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
// A5: Use try_send to avoid blocking if writer channel is full if tunnel_writer_tx.send(data_frame).await.is_err() {
if tunnel_writer_tx.try_send(data_frame).is_err() { log::warn!("Stream {} tunnel writer closed, closing", stream_id);
log::warn!("Stream {} tunnel writer full, closing", stream_id);
break; break;
} }
} }

View File

@@ -1,23 +1,39 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicU32, Ordering};
use std::time::Duration;
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader}; use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, Mutex, RwLock, Semaphore}; use tokio::sync::{mpsc, Mutex, Notify, RwLock, Semaphore};
use tokio::time::{interval, sleep_until, Instant};
use tokio_rustls::TlsAcceptor; use tokio_rustls::TlsAcceptor;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use remoteingress_protocol::*; use remoteingress_protocol::*;
/// Per-stream state tracked in the hub's stream map.
struct HubStreamState {
/// Channel to deliver FRAME_DATA payloads to the upstream writer task.
data_tx: mpsc::Sender<Vec<u8>>,
/// Cancellation token for this stream.
cancel_token: CancellationToken,
/// Send window for FRAME_DATA_BACK (download direction).
/// Decremented by the upstream reader, incremented by FRAME_WINDOW_UPDATE from edge.
send_window: Arc<AtomicU32>,
/// Notifier to wake the upstream reader when the window opens.
window_notify: Arc<Notify>,
}
/// Hub configuration. /// Hub configuration.
#[derive(Debug, Clone, Deserialize, Serialize)] #[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct HubConfig { pub struct HubConfig {
pub tunnel_port: u16, pub tunnel_port: u16,
pub target_host: Option<String>, pub target_host: Option<String>,
#[serde(skip)] #[serde(default)]
pub tls_cert_pem: Option<String>, pub tls_cert_pem: Option<String>,
#[serde(skip)] #[serde(default)]
pub tls_key_pem: Option<String>, pub tls_key_pem: Option<String>,
} }
@@ -107,7 +123,7 @@ pub struct TunnelHub {
struct ConnectedEdgeInfo { struct ConnectedEdgeInfo {
connected_at: u64, connected_at: u64,
peer_addr: String, peer_addr: String,
active_streams: Arc<Mutex<HashMap<u32, (mpsc::Sender<Vec<u8>>, CancellationToken)>>>, active_streams: Arc<Mutex<HashMap<u32, HubStreamState>>>,
config_tx: mpsc::Sender<EdgeConfigUpdate>, config_tx: mpsc::Sender<EdgeConfigUpdate>,
#[allow(dead_code)] // kept alive for Drop — cancels child tokens when edge is removed #[allow(dead_code)] // kept alive for Drop — cancels child tokens when edge is removed
cancel_token: CancellationToken, cancel_token: CancellationToken,
@@ -331,7 +347,7 @@ async fn handle_edge_connection(
write_half.write_all(handshake_json.as_bytes()).await?; write_half.write_all(handshake_json.as_bytes()).await?;
// Track this edge // Track this edge
let streams: Arc<Mutex<HashMap<u32, (mpsc::Sender<Vec<u8>>, CancellationToken)>>> = let streams: Arc<Mutex<HashMap<u32, HubStreamState>>> =
Arc::new(Mutex::new(HashMap::new())); Arc::new(Mutex::new(HashMap::new()));
let now = std::time::SystemTime::now() let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH) .duration_since(std::time::UNIX_EPOCH)
@@ -407,6 +423,14 @@ async fn handle_edge_connection(
// A4: Semaphore to limit concurrent streams per edge // A4: Semaphore to limit concurrent streams per edge
let stream_semaphore = Arc::new(Semaphore::new(MAX_STREAMS_PER_EDGE)); let stream_semaphore = Arc::new(Semaphore::new(MAX_STREAMS_PER_EDGE));
// Heartbeat: periodic PING and liveness timeout
let ping_interval_dur = Duration::from_secs(15);
let liveness_timeout_dur = Duration::from_secs(45);
let mut ping_ticker = interval(ping_interval_dur);
ping_ticker.tick().await; // consume the immediate first tick
let mut last_activity = Instant::now();
let mut liveness_deadline = Box::pin(sleep_until(last_activity + liveness_timeout_dur));
// Frame reading loop // Frame reading loop
let mut frame_reader = FrameReader::new(buf_reader); let mut frame_reader = FrameReader::new(buf_reader);
@@ -415,6 +439,10 @@ async fn handle_edge_connection(
frame_result = frame_reader.next_frame() => { frame_result = frame_reader.next_frame() => {
match frame_result { match frame_result {
Ok(Some(frame)) => { Ok(Some(frame)) => {
// Reset liveness on any received frame
last_activity = Instant::now();
liveness_deadline.as_mut().reset(last_activity + liveness_timeout_dur);
match frame.frame_type { match frame.frame_type {
FRAME_OPEN => { FRAME_OPEN => {
// A4: Check stream limit before processing // A4: Check stream limit before processing
@@ -448,11 +476,18 @@ async fn handle_edge_connection(
stream_id, stream_id,
}); });
// Create channel for data from edge to this stream // Create channel for data from edge to this stream (capacity 16 is sufficient with flow control)
let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(256); let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(16);
let send_window = Arc::new(AtomicU32::new(INITIAL_STREAM_WINDOW));
let window_notify = Arc::new(Notify::new());
{ {
let mut s = streams.lock().await; let mut s = streams.lock().await;
s.insert(stream_id, (data_tx, stream_token.clone())); s.insert(stream_id, HubStreamState {
data_tx,
cancel_token: stream_token.clone(),
send_window: Arc::clone(&send_window),
window_notify: Arc::clone(&window_notify),
});
} }
// Spawn task: connect to SmartProxy, send PROXY header, pipe data // Spawn task: connect to SmartProxy, send PROXY header, pipe data
@@ -462,7 +497,7 @@ async fn handle_edge_connection(
let result = async { let result = async {
// A2: Connect to SmartProxy with timeout // A2: Connect to SmartProxy with timeout
let mut upstream = tokio::time::timeout( let mut upstream = tokio::time::timeout(
std::time::Duration::from_secs(10), Duration::from_secs(10),
TcpStream::connect((target.as_str(), dest_port)), TcpStream::connect((target.as_str(), dest_port)),
) )
.await .await
@@ -476,16 +511,28 @@ async fn handle_edge_connection(
upstream.into_split(); upstream.into_split();
// Forward data from edge (via channel) to SmartProxy // Forward data from edge (via channel) to SmartProxy
// After writing to upstream, send WINDOW_UPDATE_BACK to edge
let writer_token = stream_token.clone(); let writer_token = stream_token.clone();
let wub_tx = writer_tx.clone();
let writer_for_edge_data = tokio::spawn(async move { let writer_for_edge_data = tokio::spawn(async move {
let mut consumed_since_update: u32 = 0;
loop { loop {
tokio::select! { tokio::select! {
data = data_rx.recv() => { data = data_rx.recv() => {
match data { match data {
Some(data) => { Some(data) => {
let len = data.len() as u32;
if up_write.write_all(&data).await.is_err() { if up_write.write_all(&data).await.is_err() {
break; break;
} }
// Track consumption for flow control
consumed_since_update += len;
if consumed_since_update >= WINDOW_UPDATE_THRESHOLD {
let increment = consumed_since_update;
consumed_since_update = 0;
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, increment);
let _ = wub_tx.try_send(frame);
}
} }
None => break, None => break,
} }
@@ -493,22 +540,43 @@ async fn handle_edge_connection(
_ = writer_token.cancelled() => break, _ = writer_token.cancelled() => break,
} }
} }
// Send final window update for remaining consumed bytes
if consumed_since_update > 0 {
let frame = encode_window_update(stream_id, FRAME_WINDOW_UPDATE_BACK, consumed_since_update);
let _ = wub_tx.try_send(frame);
}
let _ = up_write.shutdown().await; let _ = up_write.shutdown().await;
}); });
// Forward data from SmartProxy back to edge via writer channel // Forward data from SmartProxy back to edge via writer channel
// with per-stream flow control (check send_window before reading)
let mut buf = vec![0u8; 32768]; let mut buf = vec![0u8; 32768];
loop { loop {
// Wait for send window to have capacity
loop {
let w = send_window.load(Ordering::Acquire);
if w > 0 { break; }
tokio::select! {
_ = window_notify.notified() => continue,
_ = stream_token.cancelled() => break,
}
}
if stream_token.is_cancelled() { break; }
// Limit read size to available window
let w = send_window.load(Ordering::Acquire) as usize;
let max_read = w.min(buf.len());
tokio::select! { tokio::select! {
read_result = up_read.read(&mut buf) => { read_result = up_read.read(&mut buf[..max_read]) => {
match read_result { match read_result {
Ok(0) => break, Ok(0) => break,
Ok(n) => { Ok(n) => {
send_window.fetch_sub(n as u32, Ordering::Release);
let frame = let frame =
encode_frame(stream_id, FRAME_DATA_BACK, &buf[..n]); encode_frame(stream_id, FRAME_DATA_BACK, &buf[..n]);
// A5: Use try_send to avoid blocking if writer channel is full if writer_tx.send(frame).await.is_err() {
if writer_tx.try_send(frame).is_err() { log::warn!("Stream {} writer channel closed, closing", stream_id);
log::warn!("Stream {} writer channel full, closing", stream_id);
break; break;
} }
} }
@@ -553,24 +621,46 @@ async fn handle_edge_connection(
}); });
} }
FRAME_DATA => { FRAME_DATA => {
// A1: Non-blocking send to prevent head-of-line blocking // Non-blocking dispatch to per-stream channel.
let s = streams.lock().await; // With flow control, the sender should rarely exceed the channel capacity.
if let Some((tx, _)) = s.get(&frame.stream_id) { let mut s = streams.lock().await;
if tx.try_send(frame.payload).is_err() { if let Some(state) = s.get(&frame.stream_id) {
log::warn!("Stream {} data channel full, dropping frame", frame.stream_id); if state.data_tx.try_send(frame.payload).is_err() {
log::warn!("Stream {} data channel full, closing stream", frame.stream_id);
if let Some(state) = s.remove(&frame.stream_id) {
state.cancel_token.cancel();
}
}
}
}
FRAME_WINDOW_UPDATE => {
// Edge consumed data — increase our send window for this stream
if let Some(increment) = decode_window_update(&frame.payload) {
if increment > 0 {
let s = streams.lock().await;
if let Some(state) = s.get(&frame.stream_id) {
let prev = state.send_window.fetch_add(increment, Ordering::Release);
if prev + increment > MAX_WINDOW_SIZE {
state.send_window.store(MAX_WINDOW_SIZE, Ordering::Release);
}
state.window_notify.notify_one();
}
} }
} }
} }
FRAME_CLOSE => { FRAME_CLOSE => {
let mut s = streams.lock().await; let mut s = streams.lock().await;
if let Some((_, token)) = s.remove(&frame.stream_id) { if let Some(state) = s.remove(&frame.stream_id) {
token.cancel(); state.cancel_token.cancel();
let _ = event_tx.try_send(HubEvent::StreamClosed { let _ = event_tx.try_send(HubEvent::StreamClosed {
edge_id: edge_id.clone(), edge_id: edge_id.clone(),
stream_id: frame.stream_id, stream_id: frame.stream_id,
}); });
} }
} }
FRAME_PONG => {
log::debug!("Received PONG from edge {}", edge_id);
}
_ => { _ => {
log::warn!("Unexpected frame type {} from edge", frame.frame_type); log::warn!("Unexpected frame type {} from edge", frame.frame_type);
} }
@@ -586,6 +676,19 @@ async fn handle_edge_connection(
} }
} }
} }
_ = ping_ticker.tick() => {
let ping_frame = encode_frame(0, FRAME_PING, &[]);
if frame_writer_tx.try_send(ping_frame).is_err() {
log::warn!("Failed to send PING to edge {}, writer channel full/closed", edge_id);
break;
}
log::trace!("Sent PING to edge {}", edge_id);
}
_ = &mut liveness_deadline => {
log::warn!("Edge {} liveness timeout (no frames for {}s), disconnecting",
edge_id, liveness_timeout_dur.as_secs());
break;
}
_ = edge_token.cancelled() => { _ = edge_token.cancelled() => {
log::info!("Edge {} cancelled by hub", edge_id); log::info!("Edge {} cancelled by hub", edge_id);
break; break;

View File

@@ -7,6 +7,10 @@ pub const FRAME_CLOSE: u8 = 0x03;
pub const FRAME_DATA_BACK: u8 = 0x04; pub const FRAME_DATA_BACK: u8 = 0x04;
pub const FRAME_CLOSE_BACK: u8 = 0x05; pub const FRAME_CLOSE_BACK: u8 = 0x05;
pub const FRAME_CONFIG: u8 = 0x06; // Hub -> Edge: configuration update pub const FRAME_CONFIG: u8 = 0x06; // Hub -> Edge: configuration update
pub const FRAME_PING: u8 = 0x07; // Hub -> Edge: heartbeat probe
pub const FRAME_PONG: u8 = 0x08; // Edge -> Hub: heartbeat response
pub const FRAME_WINDOW_UPDATE: u8 = 0x09; // Edge -> Hub: per-stream flow control
pub const FRAME_WINDOW_UPDATE_BACK: u8 = 0x0A; // Hub -> Edge: per-stream flow control
// Frame header size: 4 (stream_id) + 1 (type) + 4 (length) = 9 bytes // Frame header size: 4 (stream_id) + 1 (type) + 4 (length) = 9 bytes
pub const FRAME_HEADER_SIZE: usize = 9; pub const FRAME_HEADER_SIZE: usize = 9;
@@ -14,6 +18,27 @@ pub const FRAME_HEADER_SIZE: usize = 9;
// Maximum payload size (16 MB) // Maximum payload size (16 MB)
pub const MAX_PAYLOAD_SIZE: u32 = 16 * 1024 * 1024; pub const MAX_PAYLOAD_SIZE: u32 = 16 * 1024 * 1024;
// Per-stream flow control constants
/// Initial per-stream window size (256 KB). With 32KB frames, this allows ~8 frames in flight.
pub const INITIAL_STREAM_WINDOW: u32 = 256 * 1024;
/// Send WINDOW_UPDATE after consuming this many bytes (half the initial window).
pub const WINDOW_UPDATE_THRESHOLD: u32 = INITIAL_STREAM_WINDOW / 2;
/// Maximum window size to prevent overflow.
pub const MAX_WINDOW_SIZE: u32 = 16 * 1024 * 1024;
/// Encode a WINDOW_UPDATE frame for a specific stream.
pub fn encode_window_update(stream_id: u32, frame_type: u8, increment: u32) -> Vec<u8> {
encode_frame(stream_id, frame_type, &increment.to_be_bytes())
}
/// Decode a WINDOW_UPDATE payload into a byte increment. Returns None if payload is malformed.
pub fn decode_window_update(payload: &[u8]) -> Option<u32> {
if payload.len() != 4 {
return None;
}
Some(u32::from_be_bytes([payload[0], payload[1], payload[2], payload[3]]))
}
/// A single multiplexed frame. /// A single multiplexed frame.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Frame { pub struct Frame {
@@ -261,6 +286,8 @@ mod tests {
FRAME_DATA_BACK, FRAME_DATA_BACK,
FRAME_CLOSE_BACK, FRAME_CLOSE_BACK,
FRAME_CONFIG, FRAME_CONFIG,
FRAME_PING,
FRAME_PONG,
]; ];
let mut data = Vec::new(); let mut data = Vec::new();
@@ -293,4 +320,19 @@ mod tests {
assert_eq!(frame.frame_type, FRAME_CLOSE); assert_eq!(frame.frame_type, FRAME_CLOSE);
assert!(frame.payload.is_empty()); assert!(frame.payload.is_empty());
} }
#[test]
fn test_encode_frame_ping_pong() {
// PING: stream_id=0, empty payload (control frame)
let ping = encode_frame(0, FRAME_PING, &[]);
assert_eq!(ping[4], FRAME_PING);
assert_eq!(&ping[0..4], &0u32.to_be_bytes());
assert_eq!(ping.len(), FRAME_HEADER_SIZE);
// PONG: stream_id=0, empty payload (control frame)
let pong = encode_frame(0, FRAME_PONG, &[]);
assert_eq!(pong[4], FRAME_PONG);
assert_eq!(&pong[0..4], &0u32.to_be_bytes());
assert_eq!(pong.len(), FRAME_HEADER_SIZE);
}
} }

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@serve.zone/remoteingress', name: '@serve.zone/remoteingress',
version: '4.2.0', version: '4.5.0',
description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.' description: 'Edge ingress tunnel for DcRouter - accepts incoming TCP connections at network edge and tunnels them to DcRouter SmartProxy preserving client IP via PROXY protocol v1.'
} }

View File

@@ -40,9 +40,16 @@ export interface IEdgeConfig {
secret: string; secret: string;
} }
const MAX_RESTART_ATTEMPTS = 10;
const MAX_RESTART_BACKOFF_MS = 30_000;
export class RemoteIngressEdge extends EventEmitter { export class RemoteIngressEdge extends EventEmitter {
private bridge: InstanceType<typeof plugins.smartrust.RustBridge<TEdgeCommands>>; private bridge: InstanceType<typeof plugins.smartrust.RustBridge<TEdgeCommands>>;
private started = false; private started = false;
private stopping = false;
private savedConfig: IEdgeConfig | null = null;
private restartBackoffMs = 1000;
private restartAttempts = 0;
private statusInterval: ReturnType<typeof setInterval> | undefined; private statusInterval: ReturnType<typeof setInterval> | undefined;
constructor() { constructor() {
@@ -109,11 +116,17 @@ export class RemoteIngressEdge extends EventEmitter {
edgeConfig = config; edgeConfig = config;
} }
this.savedConfig = edgeConfig;
this.stopping = false;
const spawned = await this.bridge.spawn(); const spawned = await this.bridge.spawn();
if (!spawned) { if (!spawned) {
throw new Error('Failed to spawn remoteingress-bin'); throw new Error('Failed to spawn remoteingress-bin');
} }
// Register crash recovery handler
this.bridge.on('exit', this.handleCrashRecovery);
await this.bridge.sendCommand('startEdge', { await this.bridge.sendCommand('startEdge', {
hubHost: edgeConfig.hubHost, hubHost: edgeConfig.hubHost,
hubPort: edgeConfig.hubPort ?? 8443, hubPort: edgeConfig.hubPort ?? 8443,
@@ -122,6 +135,8 @@ export class RemoteIngressEdge extends EventEmitter {
}); });
this.started = true; this.started = true;
this.restartAttempts = 0;
this.restartBackoffMs = 1000;
// Start periodic status logging // Start periodic status logging
this.statusInterval = setInterval(async () => { this.statusInterval = setInterval(async () => {
@@ -142,6 +157,7 @@ export class RemoteIngressEdge extends EventEmitter {
* Stop the edge and kill the Rust process. * Stop the edge and kill the Rust process.
*/ */
public async stop(): Promise<void> { public async stop(): Promise<void> {
this.stopping = true;
if (this.statusInterval) { if (this.statusInterval) {
clearInterval(this.statusInterval); clearInterval(this.statusInterval);
this.statusInterval = undefined; this.statusInterval = undefined;
@@ -152,6 +168,7 @@ export class RemoteIngressEdge extends EventEmitter {
} catch { } catch {
// Process may already be dead // Process may already be dead
} }
this.bridge.removeListener('exit', this.handleCrashRecovery);
this.bridge.kill(); this.bridge.kill();
this.started = false; this.started = false;
} }
@@ -170,4 +187,55 @@ export class RemoteIngressEdge extends EventEmitter {
public get running(): boolean { public get running(): boolean {
return this.bridge.running; return this.bridge.running;
} }
/**
* Handle unexpected Rust binary crash — auto-restart with backoff.
*/
private handleCrashRecovery = async (code: number | null, signal: string | null) => {
if (this.stopping || !this.started || !this.savedConfig) {
return;
}
console.error(
`[RemoteIngressEdge] Rust binary crashed (code=${code}, signal=${signal}), ` +
`attempt ${this.restartAttempts + 1}/${MAX_RESTART_ATTEMPTS}`
);
this.started = false;
if (this.restartAttempts >= MAX_RESTART_ATTEMPTS) {
console.error('[RemoteIngressEdge] Max restart attempts reached, giving up');
this.emit('crashRecoveryFailed');
return;
}
await new Promise(resolve => setTimeout(resolve, this.restartBackoffMs));
this.restartBackoffMs = Math.min(this.restartBackoffMs * 2, MAX_RESTART_BACKOFF_MS);
this.restartAttempts++;
try {
const spawned = await this.bridge.spawn();
if (!spawned) {
console.error('[RemoteIngressEdge] Failed to respawn binary');
return;
}
this.bridge.on('exit', this.handleCrashRecovery);
await this.bridge.sendCommand('startEdge', {
hubHost: this.savedConfig.hubHost,
hubPort: this.savedConfig.hubPort ?? 8443,
edgeId: this.savedConfig.edgeId,
secret: this.savedConfig.secret,
});
this.started = true;
this.restartAttempts = 0;
this.restartBackoffMs = 1000;
console.log('[RemoteIngressEdge] Successfully recovered from crash');
this.emit('crashRecovered');
} catch (err) {
console.error(`[RemoteIngressEdge] Crash recovery failed: ${err}`);
}
};
} }

View File

@@ -11,6 +11,8 @@ type THubCommands = {
params: { params: {
tunnelPort: number; tunnelPort: number;
targetHost?: string; targetHost?: string;
tlsCertPem?: string;
tlsKeyPem?: string;
}; };
result: { started: boolean }; result: { started: boolean };
}; };
@@ -42,11 +44,25 @@ type THubCommands = {
export interface IHubConfig { export interface IHubConfig {
tunnelPort?: number; tunnelPort?: number;
targetHost?: string; targetHost?: string;
tls?: {
certPem?: string;
keyPem?: string;
};
} }
type TAllowedEdge = { id: string; secret: string; listenPorts?: number[]; stunIntervalSecs?: number };
const MAX_RESTART_ATTEMPTS = 10;
const MAX_RESTART_BACKOFF_MS = 30_000;
export class RemoteIngressHub extends EventEmitter { export class RemoteIngressHub extends EventEmitter {
private bridge: InstanceType<typeof plugins.smartrust.RustBridge<THubCommands>>; private bridge: InstanceType<typeof plugins.smartrust.RustBridge<THubCommands>>;
private started = false; private started = false;
private stopping = false;
private savedConfig: IHubConfig | null = null;
private savedEdges: TAllowedEdge[] = [];
private restartBackoffMs = 1000;
private restartAttempts = 0;
constructor() { constructor() {
super(); super();
@@ -92,29 +108,42 @@ export class RemoteIngressHub extends EventEmitter {
* Start the hub — spawns the Rust binary and starts the tunnel server. * Start the hub — spawns the Rust binary and starts the tunnel server.
*/ */
public async start(config: IHubConfig = {}): Promise<void> { public async start(config: IHubConfig = {}): Promise<void> {
this.savedConfig = config;
this.stopping = false;
const spawned = await this.bridge.spawn(); const spawned = await this.bridge.spawn();
if (!spawned) { if (!spawned) {
throw new Error('Failed to spawn remoteingress-bin'); throw new Error('Failed to spawn remoteingress-bin');
} }
// Register crash recovery handler
this.bridge.on('exit', this.handleCrashRecovery);
await this.bridge.sendCommand('startHub', { await this.bridge.sendCommand('startHub', {
tunnelPort: config.tunnelPort ?? 8443, tunnelPort: config.tunnelPort ?? 8443,
targetHost: config.targetHost ?? '127.0.0.1', targetHost: config.targetHost ?? '127.0.0.1',
...(config.tls?.certPem && config.tls?.keyPem
? { tlsCertPem: config.tls.certPem, tlsKeyPem: config.tls.keyPem }
: {}),
}); });
this.started = true; this.started = true;
this.restartAttempts = 0;
this.restartBackoffMs = 1000;
} }
/** /**
* Stop the hub and kill the Rust process. * Stop the hub and kill the Rust process.
*/ */
public async stop(): Promise<void> { public async stop(): Promise<void> {
this.stopping = true;
if (this.started) { if (this.started) {
try { try {
await this.bridge.sendCommand('stopHub', {} as Record<string, never>); await this.bridge.sendCommand('stopHub', {} as Record<string, never>);
} catch { } catch {
// Process may already be dead // Process may already be dead
} }
this.bridge.removeListener('exit', this.handleCrashRecovery);
this.bridge.kill(); this.bridge.kill();
this.started = false; this.started = false;
} }
@@ -123,7 +152,8 @@ export class RemoteIngressHub extends EventEmitter {
/** /**
* Update the list of allowed edges that can connect to this hub. * Update the list of allowed edges that can connect to this hub.
*/ */
public async updateAllowedEdges(edges: Array<{ id: string; secret: string; listenPorts?: number[]; stunIntervalSecs?: number }>): Promise<void> { public async updateAllowedEdges(edges: TAllowedEdge[]): Promise<void> {
this.savedEdges = edges;
await this.bridge.sendCommand('updateAllowedEdges', { edges }); await this.bridge.sendCommand('updateAllowedEdges', { edges });
} }
@@ -140,4 +170,62 @@ export class RemoteIngressHub extends EventEmitter {
public get running(): boolean { public get running(): boolean {
return this.bridge.running; return this.bridge.running;
} }
/**
* Handle unexpected Rust binary crash — auto-restart with backoff.
*/
private handleCrashRecovery = async (code: number | null, signal: string | null) => {
if (this.stopping || !this.started || !this.savedConfig) {
return;
}
console.error(
`[RemoteIngressHub] Rust binary crashed (code=${code}, signal=${signal}), ` +
`attempt ${this.restartAttempts + 1}/${MAX_RESTART_ATTEMPTS}`
);
this.started = false;
if (this.restartAttempts >= MAX_RESTART_ATTEMPTS) {
console.error('[RemoteIngressHub] Max restart attempts reached, giving up');
this.emit('crashRecoveryFailed');
return;
}
await new Promise(resolve => setTimeout(resolve, this.restartBackoffMs));
this.restartBackoffMs = Math.min(this.restartBackoffMs * 2, MAX_RESTART_BACKOFF_MS);
this.restartAttempts++;
try {
const spawned = await this.bridge.spawn();
if (!spawned) {
console.error('[RemoteIngressHub] Failed to respawn binary');
return;
}
this.bridge.on('exit', this.handleCrashRecovery);
const config = this.savedConfig;
await this.bridge.sendCommand('startHub', {
tunnelPort: config.tunnelPort ?? 8443,
targetHost: config.targetHost ?? '127.0.0.1',
...(config.tls?.certPem && config.tls?.keyPem
? { tlsCertPem: config.tls.certPem, tlsKeyPem: config.tls.keyPem }
: {}),
});
// Restore allowed edges
if (this.savedEdges.length > 0) {
await this.bridge.sendCommand('updateAllowedEdges', { edges: this.savedEdges });
}
this.started = true;
this.restartAttempts = 0;
this.restartBackoffMs = 1000;
console.log('[RemoteIngressHub] Successfully recovered from crash');
this.emit('crashRecovered');
} catch (err) {
console.error(`[RemoteIngressHub] Crash recovery failed: ${err}`);
}
};
} }