BREAKING CHANGE(remoteingress): migrate core to Rust, add RemoteIngressHub/RemoteIngressEdge JS bridge, and bump package to v2.0.0
This commit is contained in:
478
rust/crates/remoteingress-core/src/edge.rs
Normal file
478
rust/crates/remoteingress-core/src/edge.rs
Normal file
@@ -0,0 +1,478 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
use tokio_rustls::TlsConnector;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use remoteingress_protocol::*;
|
||||
|
||||
/// Edge configuration.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct EdgeConfig {
|
||||
pub hub_host: String,
|
||||
pub hub_port: u16,
|
||||
pub edge_id: String,
|
||||
pub secret: String,
|
||||
pub listen_ports: Vec<u16>,
|
||||
pub stun_interval_secs: Option<u64>,
|
||||
}
|
||||
|
||||
/// Events emitted by the edge.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum EdgeEvent {
|
||||
TunnelConnected,
|
||||
TunnelDisconnected,
|
||||
#[serde(rename_all = "camelCase")]
|
||||
PublicIpDiscovered { ip: String },
|
||||
}
|
||||
|
||||
/// Edge status response.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct EdgeStatus {
|
||||
pub running: bool,
|
||||
pub connected: bool,
|
||||
pub public_ip: Option<String>,
|
||||
pub active_streams: usize,
|
||||
pub listen_ports: Vec<u16>,
|
||||
}
|
||||
|
||||
/// The tunnel edge that listens for client connections and multiplexes them to the hub.
|
||||
pub struct TunnelEdge {
|
||||
config: RwLock<EdgeConfig>,
|
||||
event_tx: mpsc::UnboundedSender<EdgeEvent>,
|
||||
event_rx: Mutex<Option<mpsc::UnboundedReceiver<EdgeEvent>>>,
|
||||
shutdown_tx: Mutex<Option<mpsc::Sender<()>>>,
|
||||
running: RwLock<bool>,
|
||||
connected: Arc<RwLock<bool>>,
|
||||
public_ip: Arc<RwLock<Option<String>>>,
|
||||
active_streams: Arc<AtomicU32>,
|
||||
next_stream_id: Arc<AtomicU32>,
|
||||
}
|
||||
|
||||
impl TunnelEdge {
|
||||
pub fn new(config: EdgeConfig) -> Self {
|
||||
let (event_tx, event_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
config: RwLock::new(config),
|
||||
event_tx,
|
||||
event_rx: Mutex::new(Some(event_rx)),
|
||||
shutdown_tx: Mutex::new(None),
|
||||
running: RwLock::new(false),
|
||||
connected: Arc::new(RwLock::new(false)),
|
||||
public_ip: Arc::new(RwLock::new(None)),
|
||||
active_streams: Arc::new(AtomicU32::new(0)),
|
||||
next_stream_id: Arc::new(AtomicU32::new(1)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Take the event receiver (can only be called once).
|
||||
pub async fn take_event_rx(&self) -> Option<mpsc::UnboundedReceiver<EdgeEvent>> {
|
||||
self.event_rx.lock().await.take()
|
||||
}
|
||||
|
||||
/// Get the current edge status.
|
||||
pub async fn get_status(&self) -> EdgeStatus {
|
||||
EdgeStatus {
|
||||
running: *self.running.read().await,
|
||||
connected: *self.connected.read().await,
|
||||
public_ip: self.public_ip.read().await.clone(),
|
||||
active_streams: self.active_streams.load(Ordering::Relaxed) as usize,
|
||||
listen_ports: self.config.read().await.listen_ports.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the edge: connect to hub, start listeners.
|
||||
pub async fn start(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let config = self.config.read().await.clone();
|
||||
let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1);
|
||||
*self.shutdown_tx.lock().await = Some(shutdown_tx);
|
||||
*self.running.write().await = true;
|
||||
|
||||
let connected = self.connected.clone();
|
||||
let public_ip = self.public_ip.clone();
|
||||
let active_streams = self.active_streams.clone();
|
||||
let next_stream_id = self.next_stream_id.clone();
|
||||
let event_tx = self.event_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
edge_main_loop(
|
||||
config,
|
||||
connected,
|
||||
public_ip,
|
||||
active_streams,
|
||||
next_stream_id,
|
||||
event_tx,
|
||||
shutdown_rx,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop the edge.
|
||||
pub async fn stop(&self) {
|
||||
if let Some(tx) = self.shutdown_tx.lock().await.take() {
|
||||
let _ = tx.send(()).await;
|
||||
}
|
||||
*self.running.write().await = false;
|
||||
*self.connected.write().await = false;
|
||||
}
|
||||
}
|
||||
|
||||
async fn edge_main_loop(
|
||||
config: EdgeConfig,
|
||||
connected: Arc<RwLock<bool>>,
|
||||
public_ip: Arc<RwLock<Option<String>>>,
|
||||
active_streams: Arc<AtomicU32>,
|
||||
next_stream_id: Arc<AtomicU32>,
|
||||
event_tx: mpsc::UnboundedSender<EdgeEvent>,
|
||||
mut shutdown_rx: mpsc::Receiver<()>,
|
||||
) {
|
||||
let mut backoff_ms: u64 = 1000;
|
||||
let max_backoff_ms: u64 = 30000;
|
||||
|
||||
loop {
|
||||
// Try to connect to hub
|
||||
let result = connect_to_hub_and_run(
|
||||
&config,
|
||||
&connected,
|
||||
&public_ip,
|
||||
&active_streams,
|
||||
&next_stream_id,
|
||||
&event_tx,
|
||||
&mut shutdown_rx,
|
||||
)
|
||||
.await;
|
||||
|
||||
*connected.write().await = false;
|
||||
let _ = event_tx.send(EdgeEvent::TunnelDisconnected);
|
||||
active_streams.store(0, Ordering::Relaxed);
|
||||
|
||||
match result {
|
||||
EdgeLoopResult::Shutdown => break,
|
||||
EdgeLoopResult::Reconnect => {
|
||||
log::info!("Reconnecting in {}ms...", backoff_ms);
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)) => {}
|
||||
_ = shutdown_rx.recv() => break,
|
||||
}
|
||||
backoff_ms = (backoff_ms * 2).min(max_backoff_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum EdgeLoopResult {
|
||||
Shutdown,
|
||||
Reconnect,
|
||||
}
|
||||
|
||||
async fn connect_to_hub_and_run(
|
||||
config: &EdgeConfig,
|
||||
connected: &Arc<RwLock<bool>>,
|
||||
public_ip: &Arc<RwLock<Option<String>>>,
|
||||
active_streams: &Arc<AtomicU32>,
|
||||
next_stream_id: &Arc<AtomicU32>,
|
||||
event_tx: &mpsc::UnboundedSender<EdgeEvent>,
|
||||
shutdown_rx: &mut mpsc::Receiver<()>,
|
||||
) -> EdgeLoopResult {
|
||||
// Build TLS connector that skips cert verification (auth is via secret)
|
||||
let tls_config = rustls::ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(NoCertVerifier))
|
||||
.with_no_client_auth();
|
||||
|
||||
let connector = TlsConnector::from(Arc::new(tls_config));
|
||||
|
||||
let addr = format!("{}:{}", config.hub_host, config.hub_port);
|
||||
let tcp = match TcpStream::connect(&addr).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log::error!("Failed to connect to hub at {}: {}", addr, e);
|
||||
return EdgeLoopResult::Reconnect;
|
||||
}
|
||||
};
|
||||
|
||||
let server_name = rustls::pki_types::ServerName::try_from(config.hub_host.clone())
|
||||
.unwrap_or_else(|_| rustls::pki_types::ServerName::try_from("remoteingress-hub".to_string()).unwrap());
|
||||
|
||||
let tls_stream = match connector.connect(server_name, tcp).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log::error!("TLS handshake failed: {}", e);
|
||||
return EdgeLoopResult::Reconnect;
|
||||
}
|
||||
};
|
||||
|
||||
let (read_half, mut write_half) = tokio::io::split(tls_stream);
|
||||
|
||||
// Send auth line
|
||||
let auth_line = format!("EDGE {} {}\n", config.edge_id, config.secret);
|
||||
if write_half.write_all(auth_line.as_bytes()).await.is_err() {
|
||||
return EdgeLoopResult::Reconnect;
|
||||
}
|
||||
|
||||
*connected.write().await = true;
|
||||
let _ = event_tx.send(EdgeEvent::TunnelConnected);
|
||||
log::info!("Connected to hub at {}", addr);
|
||||
|
||||
// Start STUN discovery
|
||||
let stun_interval = config.stun_interval_secs.unwrap_or(300);
|
||||
let public_ip_clone = public_ip.clone();
|
||||
let event_tx_clone = event_tx.clone();
|
||||
let stun_handle = tokio::spawn(async move {
|
||||
loop {
|
||||
if let Some(ip) = crate::stun::discover_public_ip().await {
|
||||
let mut pip = public_ip_clone.write().await;
|
||||
let changed = pip.as_ref() != Some(&ip);
|
||||
*pip = Some(ip.clone());
|
||||
if changed {
|
||||
let _ = event_tx_clone.send(EdgeEvent::PublicIpDiscovered { ip });
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_secs(stun_interval)).await;
|
||||
}
|
||||
});
|
||||
|
||||
// Client socket map: stream_id -> sender for writing data back to client
|
||||
let client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
// Shared tunnel writer
|
||||
let tunnel_writer = Arc::new(Mutex::new(write_half));
|
||||
|
||||
// Start TCP listeners for each port
|
||||
let mut listener_handles = Vec::new();
|
||||
for &port in &config.listen_ports {
|
||||
let tunnel_writer = tunnel_writer.clone();
|
||||
let client_writers = client_writers.clone();
|
||||
let active_streams = active_streams.clone();
|
||||
let next_stream_id = next_stream_id.clone();
|
||||
let edge_id = config.edge_id.clone();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let listener = match TcpListener::bind(("0.0.0.0", port)).await {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
log::error!("Failed to bind port {}: {}", port, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
log::info!("Listening on port {}", port);
|
||||
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((client_stream, client_addr)) => {
|
||||
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
|
||||
let tunnel_writer = tunnel_writer.clone();
|
||||
let client_writers = client_writers.clone();
|
||||
let active_streams = active_streams.clone();
|
||||
let edge_id = edge_id.clone();
|
||||
|
||||
active_streams.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
tokio::spawn(async move {
|
||||
handle_client_connection(
|
||||
client_stream,
|
||||
client_addr,
|
||||
stream_id,
|
||||
port,
|
||||
&edge_id,
|
||||
tunnel_writer,
|
||||
client_writers,
|
||||
)
|
||||
.await;
|
||||
active_streams.fetch_sub(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Accept error on port {}: {}", port, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
listener_handles.push(handle);
|
||||
}
|
||||
|
||||
// Read frames from hub
|
||||
let mut frame_reader = FrameReader::new(read_half);
|
||||
let result = loop {
|
||||
tokio::select! {
|
||||
frame_result = frame_reader.next_frame() => {
|
||||
match frame_result {
|
||||
Ok(Some(frame)) => {
|
||||
match frame.frame_type {
|
||||
FRAME_DATA_BACK => {
|
||||
let writers = client_writers.lock().await;
|
||||
if let Some(tx) = writers.get(&frame.stream_id) {
|
||||
let _ = tx.send(frame.payload).await;
|
||||
}
|
||||
}
|
||||
FRAME_CLOSE_BACK => {
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&frame.stream_id);
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
log::info!("Hub disconnected (EOF)");
|
||||
break EdgeLoopResult::Reconnect;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Hub frame error: {}", e);
|
||||
break EdgeLoopResult::Reconnect;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = shutdown_rx.recv() => {
|
||||
break EdgeLoopResult::Shutdown;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Cleanup
|
||||
stun_handle.abort();
|
||||
for h in listener_handles {
|
||||
h.abort();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn handle_client_connection(
|
||||
client_stream: TcpStream,
|
||||
client_addr: std::net::SocketAddr,
|
||||
stream_id: u32,
|
||||
dest_port: u16,
|
||||
edge_id: &str,
|
||||
tunnel_writer: Arc<Mutex<tokio::io::WriteHalf<tokio_rustls::client::TlsStream<TcpStream>>>>,
|
||||
client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>,
|
||||
) {
|
||||
let client_ip = client_addr.ip().to_string();
|
||||
let client_port = client_addr.port();
|
||||
|
||||
// Determine edge IP (use 0.0.0.0 as placeholder — hub doesn't use it for routing)
|
||||
let edge_ip = "0.0.0.0";
|
||||
|
||||
// Send OPEN frame with PROXY v1 header
|
||||
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
|
||||
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
|
||||
{
|
||||
let mut w = tunnel_writer.lock().await;
|
||||
if w.write_all(&open_frame).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Set up channel for data coming back from hub
|
||||
let (back_tx, mut back_rx) = mpsc::channel::<Vec<u8>>(256);
|
||||
{
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.insert(stream_id, back_tx);
|
||||
}
|
||||
|
||||
let (mut client_read, mut client_write) = client_stream.into_split();
|
||||
|
||||
// Task: hub -> client
|
||||
let hub_to_client = tokio::spawn(async move {
|
||||
while let Some(data) = back_rx.recv().await {
|
||||
if client_write.write_all(&data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
});
|
||||
|
||||
// Task: client -> hub
|
||||
let mut buf = vec![0u8; 32768];
|
||||
loop {
|
||||
match client_read.read(&mut buf).await {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
|
||||
let mut w = tunnel_writer.lock().await;
|
||||
if w.write_all(&data_frame).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Send CLOSE frame
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
|
||||
{
|
||||
let mut w = tunnel_writer.lock().await;
|
||||
let _ = w.write_all(&close_frame).await;
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
{
|
||||
let mut writers = client_writers.lock().await;
|
||||
writers.remove(&stream_id);
|
||||
}
|
||||
hub_to_client.abort();
|
||||
let _ = edge_id; // used for logging context
|
||||
}
|
||||
|
||||
/// TLS certificate verifier that accepts any certificate (auth is via shared secret).
|
||||
#[derive(Debug)]
|
||||
struct NoCertVerifier;
|
||||
|
||||
impl rustls::client::danger::ServerCertVerifier for NoCertVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &rustls::pki_types::CertificateDer<'_>,
|
||||
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
|
||||
_server_name: &rustls::pki_types::ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: rustls::pki_types::UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
vec![
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA256,
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA384,
|
||||
rustls::SignatureScheme::RSA_PKCS1_SHA512,
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||
rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA256,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA384,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA512,
|
||||
rustls::SignatureScheme::ED25519,
|
||||
rustls::SignatureScheme::ED448,
|
||||
]
|
||||
}
|
||||
}
|
||||
477
rust/crates/remoteingress-core/src/hub.rs
Normal file
477
rust/crates/remoteingress-core/src/hub.rs
Normal file
@@ -0,0 +1,477 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use remoteingress_protocol::*;
|
||||
|
||||
/// Hub configuration.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HubConfig {
|
||||
pub tunnel_port: u16,
|
||||
pub target_host: Option<String>,
|
||||
#[serde(skip)]
|
||||
pub tls_cert_pem: Option<String>,
|
||||
#[serde(skip)]
|
||||
pub tls_key_pem: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for HubConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tunnel_port: 8443,
|
||||
target_host: Some("127.0.0.1".to_string()),
|
||||
tls_cert_pem: None,
|
||||
tls_key_pem: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An allowed edge identity.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AllowedEdge {
|
||||
pub id: String,
|
||||
pub secret: String,
|
||||
}
|
||||
|
||||
/// Runtime status of a connected edge.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConnectedEdgeStatus {
|
||||
pub edge_id: String,
|
||||
pub connected_at: u64,
|
||||
pub active_streams: usize,
|
||||
}
|
||||
|
||||
/// Events emitted by the hub.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum HubEvent {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
EdgeConnected { edge_id: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
EdgeDisconnected { edge_id: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
StreamOpened { edge_id: String, stream_id: u32 },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
StreamClosed { edge_id: String, stream_id: u32 },
|
||||
}
|
||||
|
||||
/// Hub status response.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HubStatus {
|
||||
pub running: bool,
|
||||
pub tunnel_port: u16,
|
||||
pub connected_edges: Vec<ConnectedEdgeStatus>,
|
||||
}
|
||||
|
||||
/// The tunnel hub that accepts edge connections and demuxes streams to SmartProxy.
|
||||
pub struct TunnelHub {
|
||||
config: RwLock<HubConfig>,
|
||||
allowed_edges: Arc<RwLock<HashMap<String, String>>>, // id -> secret
|
||||
connected_edges: Arc<Mutex<HashMap<String, ConnectedEdgeInfo>>>,
|
||||
event_tx: mpsc::UnboundedSender<HubEvent>,
|
||||
event_rx: Mutex<Option<mpsc::UnboundedReceiver<HubEvent>>>,
|
||||
shutdown_tx: Mutex<Option<mpsc::Sender<()>>>,
|
||||
running: RwLock<bool>,
|
||||
}
|
||||
|
||||
struct ConnectedEdgeInfo {
|
||||
connected_at: u64,
|
||||
active_streams: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>,
|
||||
}
|
||||
|
||||
impl TunnelHub {
|
||||
pub fn new(config: HubConfig) -> Self {
|
||||
let (event_tx, event_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
config: RwLock::new(config),
|
||||
allowed_edges: Arc::new(RwLock::new(HashMap::new())),
|
||||
connected_edges: Arc::new(Mutex::new(HashMap::new())),
|
||||
event_tx,
|
||||
event_rx: Mutex::new(Some(event_rx)),
|
||||
shutdown_tx: Mutex::new(None),
|
||||
running: RwLock::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Take the event receiver (can only be called once).
|
||||
pub async fn take_event_rx(&self) -> Option<mpsc::UnboundedReceiver<HubEvent>> {
|
||||
self.event_rx.lock().await.take()
|
||||
}
|
||||
|
||||
/// Update the list of allowed edges.
|
||||
pub async fn update_allowed_edges(&self, edges: Vec<AllowedEdge>) {
|
||||
let mut map = self.allowed_edges.write().await;
|
||||
map.clear();
|
||||
for edge in edges {
|
||||
map.insert(edge.id, edge.secret);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current hub status.
|
||||
pub async fn get_status(&self) -> HubStatus {
|
||||
let running = *self.running.read().await;
|
||||
let config = self.config.read().await;
|
||||
let edges = self.connected_edges.lock().await;
|
||||
|
||||
let mut connected = Vec::new();
|
||||
for (id, info) in edges.iter() {
|
||||
let streams = info.active_streams.lock().await;
|
||||
connected.push(ConnectedEdgeStatus {
|
||||
edge_id: id.clone(),
|
||||
connected_at: info.connected_at,
|
||||
active_streams: streams.len(),
|
||||
});
|
||||
}
|
||||
|
||||
HubStatus {
|
||||
running,
|
||||
tunnel_port: config.tunnel_port,
|
||||
connected_edges: connected,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the hub — listen for TLS connections from edges.
|
||||
pub async fn start(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let config = self.config.read().await.clone();
|
||||
let tls_config = build_tls_config(&config)?;
|
||||
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
|
||||
|
||||
let listener = TcpListener::bind(("0.0.0.0", config.tunnel_port)).await?;
|
||||
log::info!("Hub listening on port {}", config.tunnel_port);
|
||||
|
||||
let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1);
|
||||
*self.shutdown_tx.lock().await = Some(shutdown_tx);
|
||||
*self.running.write().await = true;
|
||||
|
||||
let allowed = self.allowed_edges.clone();
|
||||
let connected = self.connected_edges.clone();
|
||||
let event_tx = self.event_tx.clone();
|
||||
let target_host = config.target_host.unwrap_or_else(|| "127.0.0.1".to_string());
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
result = listener.accept() => {
|
||||
match result {
|
||||
Ok((stream, addr)) => {
|
||||
log::info!("Edge connection from {}", addr);
|
||||
let acceptor = acceptor.clone();
|
||||
let allowed = allowed.clone();
|
||||
let connected = connected.clone();
|
||||
let event_tx = event_tx.clone();
|
||||
let target = target_host.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = handle_edge_connection(
|
||||
stream, acceptor, allowed, connected, event_tx, target,
|
||||
).await {
|
||||
log::error!("Edge connection error: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Accept error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = shutdown_rx.recv() => {
|
||||
log::info!("Hub shutting down");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop the hub.
|
||||
pub async fn stop(&self) {
|
||||
if let Some(tx) = self.shutdown_tx.lock().await.take() {
|
||||
let _ = tx.send(()).await;
|
||||
}
|
||||
*self.running.write().await = false;
|
||||
// Clear connected edges
|
||||
self.connected_edges.lock().await.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single edge connection: authenticate, then enter frame loop.
|
||||
async fn handle_edge_connection(
|
||||
stream: TcpStream,
|
||||
acceptor: TlsAcceptor,
|
||||
allowed: Arc<RwLock<HashMap<String, String>>>,
|
||||
connected: Arc<Mutex<HashMap<String, ConnectedEdgeInfo>>>,
|
||||
event_tx: mpsc::UnboundedSender<HubEvent>,
|
||||
target_host: String,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let tls_stream = acceptor.accept(stream).await?;
|
||||
let (read_half, write_half) = tokio::io::split(tls_stream);
|
||||
let mut buf_reader = BufReader::new(read_half);
|
||||
|
||||
// Read auth line: "EDGE <edgeId> <secret>\n"
|
||||
let mut auth_line = String::new();
|
||||
buf_reader.read_line(&mut auth_line).await?;
|
||||
let auth_line = auth_line.trim();
|
||||
|
||||
let parts: Vec<&str> = auth_line.splitn(3, ' ').collect();
|
||||
if parts.len() != 3 || parts[0] != "EDGE" {
|
||||
return Err("invalid auth line".into());
|
||||
}
|
||||
|
||||
let edge_id = parts[1].to_string();
|
||||
let secret = parts[2];
|
||||
|
||||
// Verify credentials
|
||||
{
|
||||
let edges = allowed.read().await;
|
||||
match edges.get(&edge_id) {
|
||||
Some(expected) => {
|
||||
if !constant_time_eq(secret.as_bytes(), expected.as_bytes()) {
|
||||
return Err(format!("invalid secret for edge {}", edge_id).into());
|
||||
}
|
||||
}
|
||||
None => {
|
||||
return Err(format!("unknown edge {}", edge_id).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log::info!("Edge {} authenticated", edge_id);
|
||||
let _ = event_tx.send(HubEvent::EdgeConnected {
|
||||
edge_id: edge_id.clone(),
|
||||
});
|
||||
|
||||
// Track this edge
|
||||
let streams: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
{
|
||||
let mut edges = connected.lock().await;
|
||||
edges.insert(
|
||||
edge_id.clone(),
|
||||
ConnectedEdgeInfo {
|
||||
connected_at: now,
|
||||
active_streams: streams.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Shared writer for sending frames back to edge
|
||||
let write_half = Arc::new(Mutex::new(write_half));
|
||||
|
||||
// Frame reading loop
|
||||
let mut frame_reader = FrameReader::new(buf_reader);
|
||||
|
||||
loop {
|
||||
match frame_reader.next_frame().await {
|
||||
Ok(Some(frame)) => {
|
||||
match frame.frame_type {
|
||||
FRAME_OPEN => {
|
||||
// Payload is PROXY v1 header line
|
||||
let proxy_header = String::from_utf8_lossy(&frame.payload).to_string();
|
||||
|
||||
// Parse destination port from PROXY header
|
||||
let dest_port = parse_dest_port_from_proxy(&proxy_header).unwrap_or(443);
|
||||
|
||||
let stream_id = frame.stream_id;
|
||||
let edge_id_clone = edge_id.clone();
|
||||
let event_tx_clone = event_tx.clone();
|
||||
let streams_clone = streams.clone();
|
||||
let writer_clone = write_half.clone();
|
||||
let target = target_host.clone();
|
||||
|
||||
let _ = event_tx.send(HubEvent::StreamOpened {
|
||||
edge_id: edge_id.clone(),
|
||||
stream_id,
|
||||
});
|
||||
|
||||
// Create channel for data from edge to this stream
|
||||
let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(256);
|
||||
{
|
||||
let mut s = streams.lock().await;
|
||||
s.insert(stream_id, data_tx);
|
||||
}
|
||||
|
||||
// Spawn task: connect to SmartProxy, send PROXY header, pipe data
|
||||
tokio::spawn(async move {
|
||||
let result = async {
|
||||
let mut upstream =
|
||||
TcpStream::connect((target.as_str(), dest_port)).await?;
|
||||
upstream.write_all(proxy_header.as_bytes()).await?;
|
||||
|
||||
let (mut up_read, mut up_write) =
|
||||
upstream.into_split();
|
||||
|
||||
// Forward data from edge (via channel) to SmartProxy
|
||||
let writer_for_edge_data = tokio::spawn(async move {
|
||||
while let Some(data) = data_rx.recv().await {
|
||||
if up_write.write_all(&data).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let _ = up_write.shutdown().await;
|
||||
});
|
||||
|
||||
// Forward data from SmartProxy back to edge
|
||||
let mut buf = vec![0u8; 32768];
|
||||
loop {
|
||||
match up_read.read(&mut buf).await {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
let frame =
|
||||
encode_frame(stream_id, FRAME_DATA_BACK, &buf[..n]);
|
||||
let mut w = writer_clone.lock().await;
|
||||
if w.write_all(&frame).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Send CLOSE_BACK to edge
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let mut w = writer_clone.lock().await;
|
||||
let _ = w.write_all(&close_frame).await;
|
||||
|
||||
writer_for_edge_data.abort();
|
||||
Ok::<(), Box<dyn std::error::Error + Send + Sync>>(())
|
||||
}
|
||||
.await;
|
||||
|
||||
if let Err(e) = result {
|
||||
log::error!("Stream {} error: {}", stream_id, e);
|
||||
// Send CLOSE_BACK on error
|
||||
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
|
||||
let mut w = writer_clone.lock().await;
|
||||
let _ = w.write_all(&close_frame).await;
|
||||
}
|
||||
|
||||
// Clean up stream
|
||||
{
|
||||
let mut s = streams_clone.lock().await;
|
||||
s.remove(&stream_id);
|
||||
}
|
||||
let _ = event_tx_clone.send(HubEvent::StreamClosed {
|
||||
edge_id: edge_id_clone,
|
||||
stream_id,
|
||||
});
|
||||
});
|
||||
}
|
||||
FRAME_DATA => {
|
||||
let s = streams.lock().await;
|
||||
if let Some(tx) = s.get(&frame.stream_id) {
|
||||
let _ = tx.send(frame.payload).await;
|
||||
}
|
||||
}
|
||||
FRAME_CLOSE => {
|
||||
let mut s = streams.lock().await;
|
||||
s.remove(&frame.stream_id);
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected frame type {} from edge", frame.frame_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
log::info!("Edge {} disconnected (EOF)", edge_id);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Edge {} frame error: {}", edge_id, e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
{
|
||||
let mut edges = connected.lock().await;
|
||||
edges.remove(&edge_id);
|
||||
}
|
||||
let _ = event_tx.send(HubEvent::EdgeDisconnected {
|
||||
edge_id: edge_id.clone(),
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Parse destination port from PROXY v1 header.
|
||||
fn parse_dest_port_from_proxy(header: &str) -> Option<u16> {
|
||||
let parts: Vec<&str> = header.trim().split_whitespace().collect();
|
||||
if parts.len() >= 6 {
|
||||
parts[5].parse().ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Build TLS server config from PEM strings, or auto-generate self-signed.
|
||||
fn build_tls_config(
|
||||
config: &HubConfig,
|
||||
) -> Result<rustls::ServerConfig, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let (cert_pem, key_pem) = match (&config.tls_cert_pem, &config.tls_key_pem) {
|
||||
(Some(cert), Some(key)) => (cert.clone(), key.clone()),
|
||||
_ => {
|
||||
// Generate self-signed certificate
|
||||
let cert = rcgen::generate_simple_self_signed(vec!["remoteingress-hub".to_string()])?;
|
||||
let cert_pem = cert.cert.pem();
|
||||
let key_pem = cert.key_pair.serialize_pem();
|
||||
(cert_pem, key_pem)
|
||||
}
|
||||
};
|
||||
|
||||
let certs = rustls_pemfile_parse_certs(&cert_pem)?;
|
||||
let key = rustls_pemfile_parse_key(&key_pem)?;
|
||||
|
||||
let mut config = rustls::ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)?;
|
||||
|
||||
config.alpn_protocols = vec![b"remoteingress".to_vec()];
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
fn rustls_pemfile_parse_certs(
|
||||
pem: &str,
|
||||
) -> Result<Vec<rustls::pki_types::CertificateDer<'static>>, Box<dyn std::error::Error + Send + Sync>>
|
||||
{
|
||||
let mut reader = std::io::Cursor::new(pem.as_bytes());
|
||||
let certs = rustls_pemfile::certs(&mut reader).collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(certs)
|
||||
}
|
||||
|
||||
fn rustls_pemfile_parse_key(
|
||||
pem: &str,
|
||||
) -> Result<rustls::pki_types::PrivateKeyDer<'static>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let mut reader = std::io::Cursor::new(pem.as_bytes());
|
||||
let key = rustls_pemfile::private_key(&mut reader)?
|
||||
.ok_or("no private key found in PEM")?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Constant-time comparison of two byte slices.
|
||||
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut diff = 0u8;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
diff |= x ^ y;
|
||||
}
|
||||
diff == 0
|
||||
}
|
||||
5
rust/crates/remoteingress-core/src/lib.rs
Normal file
5
rust/crates/remoteingress-core/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod hub;
|
||||
pub mod edge;
|
||||
pub mod stun;
|
||||
|
||||
pub use remoteingress_protocol as protocol;
|
||||
137
rust/crates/remoteingress-core/src/stun.rs
Normal file
137
rust/crates/remoteingress-core/src/stun.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use std::net::Ipv4Addr;
|
||||
use tokio::net::UdpSocket;
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
const STUN_SERVER: &str = "stun.cloudflare.com:3478";
|
||||
const STUN_TIMEOUT: Duration = Duration::from_secs(3);
|
||||
|
||||
// STUN constants
|
||||
const STUN_BINDING_REQUEST: u16 = 0x0001;
|
||||
const STUN_MAGIC_COOKIE: u32 = 0x2112A442;
|
||||
const ATTR_XOR_MAPPED_ADDRESS: u16 = 0x0020;
|
||||
const ATTR_MAPPED_ADDRESS: u16 = 0x0001;
|
||||
|
||||
/// Discover our public IP via STUN Binding Request (RFC 5389).
|
||||
/// Returns `None` on timeout or parse failure.
|
||||
pub async fn discover_public_ip() -> Option<String> {
|
||||
discover_public_ip_from(STUN_SERVER).await
|
||||
}
|
||||
|
||||
pub async fn discover_public_ip_from(server: &str) -> Option<String> {
|
||||
let result = timeout(STUN_TIMEOUT, async {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").await.ok()?;
|
||||
socket.connect(server).await.ok()?;
|
||||
|
||||
// Build STUN Binding Request (20 bytes)
|
||||
let mut request = [0u8; 20];
|
||||
// Message Type: Binding Request (0x0001)
|
||||
request[0..2].copy_from_slice(&STUN_BINDING_REQUEST.to_be_bytes());
|
||||
// Message Length: 0 (no attributes)
|
||||
request[2..4].copy_from_slice(&0u16.to_be_bytes());
|
||||
// Magic Cookie
|
||||
request[4..8].copy_from_slice(&STUN_MAGIC_COOKIE.to_be_bytes());
|
||||
// Transaction ID: 12 random bytes
|
||||
let txn_id: [u8; 12] = rand_bytes();
|
||||
request[8..20].copy_from_slice(&txn_id);
|
||||
|
||||
socket.send(&request).await.ok()?;
|
||||
|
||||
let mut buf = [0u8; 512];
|
||||
let n = socket.recv(&mut buf).await.ok()?;
|
||||
if n < 20 {
|
||||
return None;
|
||||
}
|
||||
|
||||
parse_stun_response(&buf[..n], &txn_id)
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(ip) => ip,
|
||||
Err(_) => None, // timeout
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_stun_response(data: &[u8], _txn_id: &[u8; 12]) -> Option<String> {
|
||||
if data.len() < 20 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Verify it's a Binding Response (0x0101)
|
||||
let msg_type = u16::from_be_bytes([data[0], data[1]]);
|
||||
if msg_type != 0x0101 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let msg_len = u16::from_be_bytes([data[2], data[3]]) as usize;
|
||||
let magic = u32::from_be_bytes([data[4], data[5], data[6], data[7]]);
|
||||
|
||||
// Parse attributes
|
||||
let attrs = &data[20..std::cmp::min(20 + msg_len, data.len())];
|
||||
let mut offset = 0;
|
||||
|
||||
while offset + 4 <= attrs.len() {
|
||||
let attr_type = u16::from_be_bytes([attrs[offset], attrs[offset + 1]]);
|
||||
let attr_len = u16::from_be_bytes([attrs[offset + 2], attrs[offset + 3]]) as usize;
|
||||
offset += 4;
|
||||
|
||||
if offset + attr_len > attrs.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
let attr_data = &attrs[offset..offset + attr_len];
|
||||
|
||||
match attr_type {
|
||||
ATTR_XOR_MAPPED_ADDRESS if attr_data.len() >= 8 => {
|
||||
let family = attr_data[1];
|
||||
if family == 0x01 {
|
||||
// IPv4
|
||||
let port_xored = u16::from_be_bytes([attr_data[2], attr_data[3]]);
|
||||
let _port = port_xored ^ (STUN_MAGIC_COOKIE >> 16) as u16;
|
||||
let ip_xored = u32::from_be_bytes([
|
||||
attr_data[4],
|
||||
attr_data[5],
|
||||
attr_data[6],
|
||||
attr_data[7],
|
||||
]);
|
||||
let ip = ip_xored ^ magic;
|
||||
return Some(Ipv4Addr::from(ip).to_string());
|
||||
}
|
||||
}
|
||||
ATTR_MAPPED_ADDRESS if attr_data.len() >= 8 => {
|
||||
let family = attr_data[1];
|
||||
if family == 0x01 {
|
||||
// IPv4 (non-XOR fallback)
|
||||
let ip = u32::from_be_bytes([
|
||||
attr_data[4],
|
||||
attr_data[5],
|
||||
attr_data[6],
|
||||
attr_data[7],
|
||||
]);
|
||||
return Some(Ipv4Addr::from(ip).to_string());
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Pad to 4-byte boundary
|
||||
offset += (attr_len + 3) & !3;
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Generate 12 random bytes for transaction ID.
|
||||
fn rand_bytes() -> [u8; 12] {
|
||||
let mut bytes = [0u8; 12];
|
||||
// Use a simple approach: mix timestamp + counter
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default();
|
||||
let nanos = now.as_nanos();
|
||||
bytes[0..8].copy_from_slice(&(nanos as u64).to_le_bytes());
|
||||
// Fill remaining with process-id based data
|
||||
let pid = std::process::id();
|
||||
bytes[8..12].copy_from_slice(&pid.to_le_bytes());
|
||||
bytes
|
||||
}
|
||||
Reference in New Issue
Block a user