BREAKING CHANGE(remoteingress): migrate core to Rust, add RemoteIngressHub/RemoteIngressEdge JS bridge, and bump package to v2.0.0

This commit is contained in:
2026-02-16 11:22:23 +00:00
parent a3970edf23
commit a144f5a798
25 changed files with 11564 additions and 3408 deletions

View File

@@ -0,0 +1,18 @@
[package]
name = "remoteingress-bin"
version = "2.0.0"
edition = "2021"
[[bin]]
name = "remoteingress-bin"
path = "src/main.rs"
[dependencies]
remoteingress-core = { path = "../remoteingress-core" }
remoteingress-protocol = { path = "../remoteingress-protocol" }
tokio = { version = "1", features = ["full"] }
clap = { version = "4", features = ["derive"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
log = "0.4"
env_logger = "0.11"

View File

@@ -0,0 +1,354 @@
use clap::Parser;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::sync::Mutex;
use remoteingress_core::hub::{AllowedEdge, HubConfig, HubEvent, TunnelHub};
use remoteingress_core::edge::{EdgeConfig, EdgeEvent, TunnelEdge};
#[derive(Parser)]
#[command(name = "remoteingress-bin", version = "2.0.0")]
struct Cli {
/// Run in IPC management mode (JSON over stdin/stdout)
#[arg(long)]
management: bool,
}
// IPC message types
#[derive(Deserialize)]
struct IpcRequest {
id: String,
method: String,
params: serde_json::Value,
}
#[derive(Serialize)]
struct IpcResponse {
id: String,
success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<String>,
}
#[derive(Serialize)]
struct IpcEvent {
event: String,
data: serde_json::Value,
}
fn send_ipc_line(line: &str) {
// Write to stdout synchronously, since we're line-buffered
use std::io::Write;
let stdout = std::io::stdout();
let mut out = stdout.lock();
let _ = out.write_all(line.as_bytes());
let _ = out.write_all(b"\n");
let _ = out.flush();
}
fn send_event(event: &str, data: serde_json::Value) {
let evt = IpcEvent {
event: event.to_string(),
data,
};
if let Ok(json) = serde_json::to_string(&evt) {
send_ipc_line(&json);
}
}
fn send_response(id: &str, result: serde_json::Value) {
let resp = IpcResponse {
id: id.to_string(),
success: true,
result: Some(result),
error: None,
};
if let Ok(json) = serde_json::to_string(&resp) {
send_ipc_line(&json);
}
}
fn send_error(id: &str, error: &str) {
let resp = IpcResponse {
id: id.to_string(),
success: false,
result: None,
error: Some(error.to_string()),
};
if let Ok(json) = serde_json::to_string(&resp) {
send_ipc_line(&json);
}
}
#[tokio::main]
async fn main() {
let cli = Cli::parse();
if !cli.management {
eprintln!("remoteingress-bin: use --management for IPC mode");
std::process::exit(1);
}
// Initialize logging to stderr (stdout is for IPC)
env_logger::Builder::from_default_env()
.target(env_logger::Target::Stderr)
.filter_level(log::LevelFilter::Info)
.init();
// Send ready event
send_event("ready", serde_json::json!({ "version": "2.0.0" }));
// State
let hub: Arc<Mutex<Option<Arc<TunnelHub>>>> = Arc::new(Mutex::new(None));
let edge: Arc<Mutex<Option<Arc<TunnelEdge>>>> = Arc::new(Mutex::new(None));
// Read commands from stdin
let stdin = tokio::io::stdin();
let reader = BufReader::new(stdin);
let mut lines = reader.lines();
while let Ok(Some(line)) = lines.next_line().await {
let line = line.trim().to_string();
if line.is_empty() {
continue;
}
let request: IpcRequest = match serde_json::from_str(&line) {
Ok(r) => r,
Err(e) => {
log::error!("Invalid IPC request: {}", e);
continue;
}
};
let hub = hub.clone();
let edge = edge.clone();
tokio::spawn(async move {
handle_request(request, hub, edge).await;
});
}
}
async fn handle_request(
req: IpcRequest,
hub: Arc<Mutex<Option<Arc<TunnelHub>>>>,
edge: Arc<Mutex<Option<Arc<TunnelEdge>>>>,
) {
match req.method.as_str() {
"ping" => {
send_response(&req.id, serde_json::json!({ "pong": true }));
}
"startHub" => {
let config: HubConfig = match serde_json::from_value(req.params.clone()) {
Ok(c) => c,
Err(e) => {
send_error(&req.id, &format!("invalid hub config: {}", e));
return;
}
};
let tunnel_hub = Arc::new(TunnelHub::new(config));
// Forward hub events to IPC
if let Some(mut event_rx) = tunnel_hub.take_event_rx().await {
tokio::spawn(async move {
while let Some(event) = event_rx.recv().await {
match &event {
HubEvent::EdgeConnected { edge_id } => {
send_event(
"edgeConnected",
serde_json::json!({ "edgeId": edge_id }),
);
}
HubEvent::EdgeDisconnected { edge_id } => {
send_event(
"edgeDisconnected",
serde_json::json!({ "edgeId": edge_id }),
);
}
HubEvent::StreamOpened {
edge_id,
stream_id,
} => {
send_event(
"streamOpened",
serde_json::json!({
"edgeId": edge_id,
"streamId": stream_id,
}),
);
}
HubEvent::StreamClosed {
edge_id,
stream_id,
} => {
send_event(
"streamClosed",
serde_json::json!({
"edgeId": edge_id,
"streamId": stream_id,
}),
);
}
}
}
});
}
match tunnel_hub.start().await {
Ok(()) => {
*hub.lock().await = Some(tunnel_hub);
send_response(&req.id, serde_json::json!({ "started": true }));
}
Err(e) => {
send_error(&req.id, &format!("failed to start hub: {}", e));
}
}
}
"stopHub" => {
let mut h = hub.lock().await;
if let Some(hub_instance) = h.take() {
hub_instance.stop().await;
send_response(&req.id, serde_json::json!({ "stopped": true }));
} else {
send_response(
&req.id,
serde_json::json!({ "stopped": true, "wasRunning": false }),
);
}
}
"updateAllowedEdges" => {
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateEdgesParams {
edges: Vec<AllowedEdge>,
}
let params: UpdateEdgesParams = match serde_json::from_value(req.params.clone()) {
Ok(p) => p,
Err(e) => {
send_error(&req.id, &format!("invalid params: {}", e));
return;
}
};
let h = hub.lock().await;
if let Some(hub_instance) = h.as_ref() {
hub_instance.update_allowed_edges(params.edges).await;
send_response(&req.id, serde_json::json!({ "updated": true }));
} else {
send_error(&req.id, "hub not running");
}
}
"getHubStatus" => {
let h = hub.lock().await;
if let Some(hub_instance) = h.as_ref() {
let status = hub_instance.get_status().await;
send_response(
&req.id,
serde_json::to_value(&status).unwrap_or_default(),
);
} else {
send_response(
&req.id,
serde_json::json!({
"running": false,
"tunnelPort": 0,
"connectedEdges": []
}),
);
}
}
"startEdge" => {
let config: EdgeConfig = match serde_json::from_value(req.params.clone()) {
Ok(c) => c,
Err(e) => {
send_error(&req.id, &format!("invalid edge config: {}", e));
return;
}
};
let tunnel_edge = Arc::new(TunnelEdge::new(config));
// Forward edge events to IPC
if let Some(mut event_rx) = tunnel_edge.take_event_rx().await {
tokio::spawn(async move {
while let Some(event) = event_rx.recv().await {
match &event {
EdgeEvent::TunnelConnected => {
send_event("tunnelConnected", serde_json::json!({}));
}
EdgeEvent::TunnelDisconnected => {
send_event("tunnelDisconnected", serde_json::json!({}));
}
EdgeEvent::PublicIpDiscovered { ip } => {
send_event(
"publicIpDiscovered",
serde_json::json!({ "ip": ip }),
);
}
}
}
});
}
match tunnel_edge.start().await {
Ok(()) => {
*edge.lock().await = Some(tunnel_edge);
send_response(&req.id, serde_json::json!({ "started": true }));
}
Err(e) => {
send_error(&req.id, &format!("failed to start edge: {}", e));
}
}
}
"stopEdge" => {
let mut e = edge.lock().await;
if let Some(edge_instance) = e.take() {
edge_instance.stop().await;
send_response(&req.id, serde_json::json!({ "stopped": true }));
} else {
send_response(
&req.id,
serde_json::json!({ "stopped": true, "wasRunning": false }),
);
}
}
"getEdgeStatus" => {
let e = edge.lock().await;
if let Some(edge_instance) = e.as_ref() {
let status = edge_instance.get_status().await;
send_response(
&req.id,
serde_json::to_value(&status).unwrap_or_default(),
);
} else {
send_response(
&req.id,
serde_json::json!({
"running": false,
"connected": false,
"publicIp": null,
"activeStreams": 0,
"listenPorts": []
}),
);
}
}
_ => {
send_error(&req.id, &format!("unknown method: {}", req.method));
}
}
}

View File

@@ -0,0 +1,15 @@
[package]
name = "remoteingress-core"
version = "2.0.0"
edition = "2021"
[dependencies]
remoteingress-protocol = { path = "../remoteingress-protocol" }
tokio = { version = "1", features = ["full"] }
tokio-rustls = "0.26"
rustls = { version = "0.23", features = ["ring"] }
rcgen = "0.13"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
log = "0.4"
rustls-pemfile = "2"

View File

@@ -0,0 +1,478 @@
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, Mutex, RwLock};
use tokio_rustls::TlsConnector;
use serde::{Deserialize, Serialize};
use remoteingress_protocol::*;
/// Edge configuration.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct EdgeConfig {
pub hub_host: String,
pub hub_port: u16,
pub edge_id: String,
pub secret: String,
pub listen_ports: Vec<u16>,
pub stun_interval_secs: Option<u64>,
}
/// Events emitted by the edge.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type")]
pub enum EdgeEvent {
TunnelConnected,
TunnelDisconnected,
#[serde(rename_all = "camelCase")]
PublicIpDiscovered { ip: String },
}
/// Edge status response.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct EdgeStatus {
pub running: bool,
pub connected: bool,
pub public_ip: Option<String>,
pub active_streams: usize,
pub listen_ports: Vec<u16>,
}
/// The tunnel edge that listens for client connections and multiplexes them to the hub.
pub struct TunnelEdge {
config: RwLock<EdgeConfig>,
event_tx: mpsc::UnboundedSender<EdgeEvent>,
event_rx: Mutex<Option<mpsc::UnboundedReceiver<EdgeEvent>>>,
shutdown_tx: Mutex<Option<mpsc::Sender<()>>>,
running: RwLock<bool>,
connected: Arc<RwLock<bool>>,
public_ip: Arc<RwLock<Option<String>>>,
active_streams: Arc<AtomicU32>,
next_stream_id: Arc<AtomicU32>,
}
impl TunnelEdge {
pub fn new(config: EdgeConfig) -> Self {
let (event_tx, event_rx) = mpsc::unbounded_channel();
Self {
config: RwLock::new(config),
event_tx,
event_rx: Mutex::new(Some(event_rx)),
shutdown_tx: Mutex::new(None),
running: RwLock::new(false),
connected: Arc::new(RwLock::new(false)),
public_ip: Arc::new(RwLock::new(None)),
active_streams: Arc::new(AtomicU32::new(0)),
next_stream_id: Arc::new(AtomicU32::new(1)),
}
}
/// Take the event receiver (can only be called once).
pub async fn take_event_rx(&self) -> Option<mpsc::UnboundedReceiver<EdgeEvent>> {
self.event_rx.lock().await.take()
}
/// Get the current edge status.
pub async fn get_status(&self) -> EdgeStatus {
EdgeStatus {
running: *self.running.read().await,
connected: *self.connected.read().await,
public_ip: self.public_ip.read().await.clone(),
active_streams: self.active_streams.load(Ordering::Relaxed) as usize,
listen_ports: self.config.read().await.listen_ports.clone(),
}
}
/// Start the edge: connect to hub, start listeners.
pub async fn start(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let config = self.config.read().await.clone();
let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1);
*self.shutdown_tx.lock().await = Some(shutdown_tx);
*self.running.write().await = true;
let connected = self.connected.clone();
let public_ip = self.public_ip.clone();
let active_streams = self.active_streams.clone();
let next_stream_id = self.next_stream_id.clone();
let event_tx = self.event_tx.clone();
tokio::spawn(async move {
edge_main_loop(
config,
connected,
public_ip,
active_streams,
next_stream_id,
event_tx,
shutdown_rx,
)
.await;
});
Ok(())
}
/// Stop the edge.
pub async fn stop(&self) {
if let Some(tx) = self.shutdown_tx.lock().await.take() {
let _ = tx.send(()).await;
}
*self.running.write().await = false;
*self.connected.write().await = false;
}
}
async fn edge_main_loop(
config: EdgeConfig,
connected: Arc<RwLock<bool>>,
public_ip: Arc<RwLock<Option<String>>>,
active_streams: Arc<AtomicU32>,
next_stream_id: Arc<AtomicU32>,
event_tx: mpsc::UnboundedSender<EdgeEvent>,
mut shutdown_rx: mpsc::Receiver<()>,
) {
let mut backoff_ms: u64 = 1000;
let max_backoff_ms: u64 = 30000;
loop {
// Try to connect to hub
let result = connect_to_hub_and_run(
&config,
&connected,
&public_ip,
&active_streams,
&next_stream_id,
&event_tx,
&mut shutdown_rx,
)
.await;
*connected.write().await = false;
let _ = event_tx.send(EdgeEvent::TunnelDisconnected);
active_streams.store(0, Ordering::Relaxed);
match result {
EdgeLoopResult::Shutdown => break,
EdgeLoopResult::Reconnect => {
log::info!("Reconnecting in {}ms...", backoff_ms);
tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)) => {}
_ = shutdown_rx.recv() => break,
}
backoff_ms = (backoff_ms * 2).min(max_backoff_ms);
}
}
}
}
enum EdgeLoopResult {
Shutdown,
Reconnect,
}
async fn connect_to_hub_and_run(
config: &EdgeConfig,
connected: &Arc<RwLock<bool>>,
public_ip: &Arc<RwLock<Option<String>>>,
active_streams: &Arc<AtomicU32>,
next_stream_id: &Arc<AtomicU32>,
event_tx: &mpsc::UnboundedSender<EdgeEvent>,
shutdown_rx: &mut mpsc::Receiver<()>,
) -> EdgeLoopResult {
// Build TLS connector that skips cert verification (auth is via secret)
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(NoCertVerifier))
.with_no_client_auth();
let connector = TlsConnector::from(Arc::new(tls_config));
let addr = format!("{}:{}", config.hub_host, config.hub_port);
let tcp = match TcpStream::connect(&addr).await {
Ok(s) => s,
Err(e) => {
log::error!("Failed to connect to hub at {}: {}", addr, e);
return EdgeLoopResult::Reconnect;
}
};
let server_name = rustls::pki_types::ServerName::try_from(config.hub_host.clone())
.unwrap_or_else(|_| rustls::pki_types::ServerName::try_from("remoteingress-hub".to_string()).unwrap());
let tls_stream = match connector.connect(server_name, tcp).await {
Ok(s) => s,
Err(e) => {
log::error!("TLS handshake failed: {}", e);
return EdgeLoopResult::Reconnect;
}
};
let (read_half, mut write_half) = tokio::io::split(tls_stream);
// Send auth line
let auth_line = format!("EDGE {} {}\n", config.edge_id, config.secret);
if write_half.write_all(auth_line.as_bytes()).await.is_err() {
return EdgeLoopResult::Reconnect;
}
*connected.write().await = true;
let _ = event_tx.send(EdgeEvent::TunnelConnected);
log::info!("Connected to hub at {}", addr);
// Start STUN discovery
let stun_interval = config.stun_interval_secs.unwrap_or(300);
let public_ip_clone = public_ip.clone();
let event_tx_clone = event_tx.clone();
let stun_handle = tokio::spawn(async move {
loop {
if let Some(ip) = crate::stun::discover_public_ip().await {
let mut pip = public_ip_clone.write().await;
let changed = pip.as_ref() != Some(&ip);
*pip = Some(ip.clone());
if changed {
let _ = event_tx_clone.send(EdgeEvent::PublicIpDiscovered { ip });
}
}
tokio::time::sleep(std::time::Duration::from_secs(stun_interval)).await;
}
});
// Client socket map: stream_id -> sender for writing data back to client
let client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>> =
Arc::new(Mutex::new(HashMap::new()));
// Shared tunnel writer
let tunnel_writer = Arc::new(Mutex::new(write_half));
// Start TCP listeners for each port
let mut listener_handles = Vec::new();
for &port in &config.listen_ports {
let tunnel_writer = tunnel_writer.clone();
let client_writers = client_writers.clone();
let active_streams = active_streams.clone();
let next_stream_id = next_stream_id.clone();
let edge_id = config.edge_id.clone();
let handle = tokio::spawn(async move {
let listener = match TcpListener::bind(("0.0.0.0", port)).await {
Ok(l) => l,
Err(e) => {
log::error!("Failed to bind port {}: {}", port, e);
return;
}
};
log::info!("Listening on port {}", port);
loop {
match listener.accept().await {
Ok((client_stream, client_addr)) => {
let stream_id = next_stream_id.fetch_add(1, Ordering::Relaxed);
let tunnel_writer = tunnel_writer.clone();
let client_writers = client_writers.clone();
let active_streams = active_streams.clone();
let edge_id = edge_id.clone();
active_streams.fetch_add(1, Ordering::Relaxed);
tokio::spawn(async move {
handle_client_connection(
client_stream,
client_addr,
stream_id,
port,
&edge_id,
tunnel_writer,
client_writers,
)
.await;
active_streams.fetch_sub(1, Ordering::Relaxed);
});
}
Err(e) => {
log::error!("Accept error on port {}: {}", port, e);
}
}
}
});
listener_handles.push(handle);
}
// Read frames from hub
let mut frame_reader = FrameReader::new(read_half);
let result = loop {
tokio::select! {
frame_result = frame_reader.next_frame() => {
match frame_result {
Ok(Some(frame)) => {
match frame.frame_type {
FRAME_DATA_BACK => {
let writers = client_writers.lock().await;
if let Some(tx) = writers.get(&frame.stream_id) {
let _ = tx.send(frame.payload).await;
}
}
FRAME_CLOSE_BACK => {
let mut writers = client_writers.lock().await;
writers.remove(&frame.stream_id);
}
_ => {
log::warn!("Unexpected frame type {} from hub", frame.frame_type);
}
}
}
Ok(None) => {
log::info!("Hub disconnected (EOF)");
break EdgeLoopResult::Reconnect;
}
Err(e) => {
log::error!("Hub frame error: {}", e);
break EdgeLoopResult::Reconnect;
}
}
}
_ = shutdown_rx.recv() => {
break EdgeLoopResult::Shutdown;
}
}
};
// Cleanup
stun_handle.abort();
for h in listener_handles {
h.abort();
}
result
}
async fn handle_client_connection(
client_stream: TcpStream,
client_addr: std::net::SocketAddr,
stream_id: u32,
dest_port: u16,
edge_id: &str,
tunnel_writer: Arc<Mutex<tokio::io::WriteHalf<tokio_rustls::client::TlsStream<TcpStream>>>>,
client_writers: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>,
) {
let client_ip = client_addr.ip().to_string();
let client_port = client_addr.port();
// Determine edge IP (use 0.0.0.0 as placeholder — hub doesn't use it for routing)
let edge_ip = "0.0.0.0";
// Send OPEN frame with PROXY v1 header
let proxy_header = build_proxy_v1_header(&client_ip, edge_ip, client_port, dest_port);
let open_frame = encode_frame(stream_id, FRAME_OPEN, proxy_header.as_bytes());
{
let mut w = tunnel_writer.lock().await;
if w.write_all(&open_frame).await.is_err() {
return;
}
}
// Set up channel for data coming back from hub
let (back_tx, mut back_rx) = mpsc::channel::<Vec<u8>>(256);
{
let mut writers = client_writers.lock().await;
writers.insert(stream_id, back_tx);
}
let (mut client_read, mut client_write) = client_stream.into_split();
// Task: hub -> client
let hub_to_client = tokio::spawn(async move {
while let Some(data) = back_rx.recv().await {
if client_write.write_all(&data).await.is_err() {
break;
}
}
let _ = client_write.shutdown().await;
});
// Task: client -> hub
let mut buf = vec![0u8; 32768];
loop {
match client_read.read(&mut buf).await {
Ok(0) => break,
Ok(n) => {
let data_frame = encode_frame(stream_id, FRAME_DATA, &buf[..n]);
let mut w = tunnel_writer.lock().await;
if w.write_all(&data_frame).await.is_err() {
break;
}
}
Err(_) => break,
}
}
// Send CLOSE frame
let close_frame = encode_frame(stream_id, FRAME_CLOSE, &[]);
{
let mut w = tunnel_writer.lock().await;
let _ = w.write_all(&close_frame).await;
}
// Cleanup
{
let mut writers = client_writers.lock().await;
writers.remove(&stream_id);
}
hub_to_client.abort();
let _ = edge_id; // used for logging context
}
/// TLS certificate verifier that accepts any certificate (auth is via shared secret).
#[derive(Debug)]
struct NoCertVerifier;
impl rustls::client::danger::ServerCertVerifier for NoCertVerifier {
fn verify_server_cert(
&self,
_end_entity: &rustls::pki_types::CertificateDer<'_>,
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
_server_name: &rustls::pki_types::ServerName<'_>,
_ocsp_response: &[u8],
_now: rustls::pki_types::UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
vec![
rustls::SignatureScheme::RSA_PKCS1_SHA256,
rustls::SignatureScheme::RSA_PKCS1_SHA384,
rustls::SignatureScheme::RSA_PKCS1_SHA512,
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
rustls::SignatureScheme::ECDSA_NISTP521_SHA512,
rustls::SignatureScheme::RSA_PSS_SHA256,
rustls::SignatureScheme::RSA_PSS_SHA384,
rustls::SignatureScheme::RSA_PSS_SHA512,
rustls::SignatureScheme::ED25519,
rustls::SignatureScheme::ED448,
]
}
}

View File

@@ -0,0 +1,477 @@
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, Mutex, RwLock};
use tokio_rustls::TlsAcceptor;
use serde::{Deserialize, Serialize};
use remoteingress_protocol::*;
/// Hub configuration.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct HubConfig {
pub tunnel_port: u16,
pub target_host: Option<String>,
#[serde(skip)]
pub tls_cert_pem: Option<String>,
#[serde(skip)]
pub tls_key_pem: Option<String>,
}
impl Default for HubConfig {
fn default() -> Self {
Self {
tunnel_port: 8443,
target_host: Some("127.0.0.1".to_string()),
tls_cert_pem: None,
tls_key_pem: None,
}
}
}
/// An allowed edge identity.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct AllowedEdge {
pub id: String,
pub secret: String,
}
/// Runtime status of a connected edge.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ConnectedEdgeStatus {
pub edge_id: String,
pub connected_at: u64,
pub active_streams: usize,
}
/// Events emitted by the hub.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "type")]
pub enum HubEvent {
#[serde(rename_all = "camelCase")]
EdgeConnected { edge_id: String },
#[serde(rename_all = "camelCase")]
EdgeDisconnected { edge_id: String },
#[serde(rename_all = "camelCase")]
StreamOpened { edge_id: String, stream_id: u32 },
#[serde(rename_all = "camelCase")]
StreamClosed { edge_id: String, stream_id: u32 },
}
/// Hub status response.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct HubStatus {
pub running: bool,
pub tunnel_port: u16,
pub connected_edges: Vec<ConnectedEdgeStatus>,
}
/// The tunnel hub that accepts edge connections and demuxes streams to SmartProxy.
pub struct TunnelHub {
config: RwLock<HubConfig>,
allowed_edges: Arc<RwLock<HashMap<String, String>>>, // id -> secret
connected_edges: Arc<Mutex<HashMap<String, ConnectedEdgeInfo>>>,
event_tx: mpsc::UnboundedSender<HubEvent>,
event_rx: Mutex<Option<mpsc::UnboundedReceiver<HubEvent>>>,
shutdown_tx: Mutex<Option<mpsc::Sender<()>>>,
running: RwLock<bool>,
}
struct ConnectedEdgeInfo {
connected_at: u64,
active_streams: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>>,
}
impl TunnelHub {
pub fn new(config: HubConfig) -> Self {
let (event_tx, event_rx) = mpsc::unbounded_channel();
Self {
config: RwLock::new(config),
allowed_edges: Arc::new(RwLock::new(HashMap::new())),
connected_edges: Arc::new(Mutex::new(HashMap::new())),
event_tx,
event_rx: Mutex::new(Some(event_rx)),
shutdown_tx: Mutex::new(None),
running: RwLock::new(false),
}
}
/// Take the event receiver (can only be called once).
pub async fn take_event_rx(&self) -> Option<mpsc::UnboundedReceiver<HubEvent>> {
self.event_rx.lock().await.take()
}
/// Update the list of allowed edges.
pub async fn update_allowed_edges(&self, edges: Vec<AllowedEdge>) {
let mut map = self.allowed_edges.write().await;
map.clear();
for edge in edges {
map.insert(edge.id, edge.secret);
}
}
/// Get the current hub status.
pub async fn get_status(&self) -> HubStatus {
let running = *self.running.read().await;
let config = self.config.read().await;
let edges = self.connected_edges.lock().await;
let mut connected = Vec::new();
for (id, info) in edges.iter() {
let streams = info.active_streams.lock().await;
connected.push(ConnectedEdgeStatus {
edge_id: id.clone(),
connected_at: info.connected_at,
active_streams: streams.len(),
});
}
HubStatus {
running,
tunnel_port: config.tunnel_port,
connected_edges: connected,
}
}
/// Start the hub — listen for TLS connections from edges.
pub async fn start(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let config = self.config.read().await.clone();
let tls_config = build_tls_config(&config)?;
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
let listener = TcpListener::bind(("0.0.0.0", config.tunnel_port)).await?;
log::info!("Hub listening on port {}", config.tunnel_port);
let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1);
*self.shutdown_tx.lock().await = Some(shutdown_tx);
*self.running.write().await = true;
let allowed = self.allowed_edges.clone();
let connected = self.connected_edges.clone();
let event_tx = self.event_tx.clone();
let target_host = config.target_host.unwrap_or_else(|| "127.0.0.1".to_string());
tokio::spawn(async move {
loop {
tokio::select! {
result = listener.accept() => {
match result {
Ok((stream, addr)) => {
log::info!("Edge connection from {}", addr);
let acceptor = acceptor.clone();
let allowed = allowed.clone();
let connected = connected.clone();
let event_tx = event_tx.clone();
let target = target_host.clone();
tokio::spawn(async move {
if let Err(e) = handle_edge_connection(
stream, acceptor, allowed, connected, event_tx, target,
).await {
log::error!("Edge connection error: {}", e);
}
});
}
Err(e) => {
log::error!("Accept error: {}", e);
}
}
}
_ = shutdown_rx.recv() => {
log::info!("Hub shutting down");
break;
}
}
}
});
Ok(())
}
/// Stop the hub.
pub async fn stop(&self) {
if let Some(tx) = self.shutdown_tx.lock().await.take() {
let _ = tx.send(()).await;
}
*self.running.write().await = false;
// Clear connected edges
self.connected_edges.lock().await.clear();
}
}
/// Handle a single edge connection: authenticate, then enter frame loop.
async fn handle_edge_connection(
stream: TcpStream,
acceptor: TlsAcceptor,
allowed: Arc<RwLock<HashMap<String, String>>>,
connected: Arc<Mutex<HashMap<String, ConnectedEdgeInfo>>>,
event_tx: mpsc::UnboundedSender<HubEvent>,
target_host: String,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let tls_stream = acceptor.accept(stream).await?;
let (read_half, write_half) = tokio::io::split(tls_stream);
let mut buf_reader = BufReader::new(read_half);
// Read auth line: "EDGE <edgeId> <secret>\n"
let mut auth_line = String::new();
buf_reader.read_line(&mut auth_line).await?;
let auth_line = auth_line.trim();
let parts: Vec<&str> = auth_line.splitn(3, ' ').collect();
if parts.len() != 3 || parts[0] != "EDGE" {
return Err("invalid auth line".into());
}
let edge_id = parts[1].to_string();
let secret = parts[2];
// Verify credentials
{
let edges = allowed.read().await;
match edges.get(&edge_id) {
Some(expected) => {
if !constant_time_eq(secret.as_bytes(), expected.as_bytes()) {
return Err(format!("invalid secret for edge {}", edge_id).into());
}
}
None => {
return Err(format!("unknown edge {}", edge_id).into());
}
}
}
log::info!("Edge {} authenticated", edge_id);
let _ = event_tx.send(HubEvent::EdgeConnected {
edge_id: edge_id.clone(),
});
// Track this edge
let streams: Arc<Mutex<HashMap<u32, mpsc::Sender<Vec<u8>>>>> =
Arc::new(Mutex::new(HashMap::new()));
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
{
let mut edges = connected.lock().await;
edges.insert(
edge_id.clone(),
ConnectedEdgeInfo {
connected_at: now,
active_streams: streams.clone(),
},
);
}
// Shared writer for sending frames back to edge
let write_half = Arc::new(Mutex::new(write_half));
// Frame reading loop
let mut frame_reader = FrameReader::new(buf_reader);
loop {
match frame_reader.next_frame().await {
Ok(Some(frame)) => {
match frame.frame_type {
FRAME_OPEN => {
// Payload is PROXY v1 header line
let proxy_header = String::from_utf8_lossy(&frame.payload).to_string();
// Parse destination port from PROXY header
let dest_port = parse_dest_port_from_proxy(&proxy_header).unwrap_or(443);
let stream_id = frame.stream_id;
let edge_id_clone = edge_id.clone();
let event_tx_clone = event_tx.clone();
let streams_clone = streams.clone();
let writer_clone = write_half.clone();
let target = target_host.clone();
let _ = event_tx.send(HubEvent::StreamOpened {
edge_id: edge_id.clone(),
stream_id,
});
// Create channel for data from edge to this stream
let (data_tx, mut data_rx) = mpsc::channel::<Vec<u8>>(256);
{
let mut s = streams.lock().await;
s.insert(stream_id, data_tx);
}
// Spawn task: connect to SmartProxy, send PROXY header, pipe data
tokio::spawn(async move {
let result = async {
let mut upstream =
TcpStream::connect((target.as_str(), dest_port)).await?;
upstream.write_all(proxy_header.as_bytes()).await?;
let (mut up_read, mut up_write) =
upstream.into_split();
// Forward data from edge (via channel) to SmartProxy
let writer_for_edge_data = tokio::spawn(async move {
while let Some(data) = data_rx.recv().await {
if up_write.write_all(&data).await.is_err() {
break;
}
}
let _ = up_write.shutdown().await;
});
// Forward data from SmartProxy back to edge
let mut buf = vec![0u8; 32768];
loop {
match up_read.read(&mut buf).await {
Ok(0) => break,
Ok(n) => {
let frame =
encode_frame(stream_id, FRAME_DATA_BACK, &buf[..n]);
let mut w = writer_clone.lock().await;
if w.write_all(&frame).await.is_err() {
break;
}
}
Err(_) => break,
}
}
// Send CLOSE_BACK to edge
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
let mut w = writer_clone.lock().await;
let _ = w.write_all(&close_frame).await;
writer_for_edge_data.abort();
Ok::<(), Box<dyn std::error::Error + Send + Sync>>(())
}
.await;
if let Err(e) = result {
log::error!("Stream {} error: {}", stream_id, e);
// Send CLOSE_BACK on error
let close_frame = encode_frame(stream_id, FRAME_CLOSE_BACK, &[]);
let mut w = writer_clone.lock().await;
let _ = w.write_all(&close_frame).await;
}
// Clean up stream
{
let mut s = streams_clone.lock().await;
s.remove(&stream_id);
}
let _ = event_tx_clone.send(HubEvent::StreamClosed {
edge_id: edge_id_clone,
stream_id,
});
});
}
FRAME_DATA => {
let s = streams.lock().await;
if let Some(tx) = s.get(&frame.stream_id) {
let _ = tx.send(frame.payload).await;
}
}
FRAME_CLOSE => {
let mut s = streams.lock().await;
s.remove(&frame.stream_id);
}
_ => {
log::warn!("Unexpected frame type {} from edge", frame.frame_type);
}
}
}
Ok(None) => {
log::info!("Edge {} disconnected (EOF)", edge_id);
break;
}
Err(e) => {
log::error!("Edge {} frame error: {}", edge_id, e);
break;
}
}
}
// Cleanup
{
let mut edges = connected.lock().await;
edges.remove(&edge_id);
}
let _ = event_tx.send(HubEvent::EdgeDisconnected {
edge_id: edge_id.clone(),
});
Ok(())
}
/// Parse destination port from PROXY v1 header.
fn parse_dest_port_from_proxy(header: &str) -> Option<u16> {
let parts: Vec<&str> = header.trim().split_whitespace().collect();
if parts.len() >= 6 {
parts[5].parse().ok()
} else {
None
}
}
/// Build TLS server config from PEM strings, or auto-generate self-signed.
fn build_tls_config(
config: &HubConfig,
) -> Result<rustls::ServerConfig, Box<dyn std::error::Error + Send + Sync>> {
let (cert_pem, key_pem) = match (&config.tls_cert_pem, &config.tls_key_pem) {
(Some(cert), Some(key)) => (cert.clone(), key.clone()),
_ => {
// Generate self-signed certificate
let cert = rcgen::generate_simple_self_signed(vec!["remoteingress-hub".to_string()])?;
let cert_pem = cert.cert.pem();
let key_pem = cert.key_pair.serialize_pem();
(cert_pem, key_pem)
}
};
let certs = rustls_pemfile_parse_certs(&cert_pem)?;
let key = rustls_pemfile_parse_key(&key_pem)?;
let mut config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)?;
config.alpn_protocols = vec![b"remoteingress".to_vec()];
Ok(config)
}
fn rustls_pemfile_parse_certs(
pem: &str,
) -> Result<Vec<rustls::pki_types::CertificateDer<'static>>, Box<dyn std::error::Error + Send + Sync>>
{
let mut reader = std::io::Cursor::new(pem.as_bytes());
let certs = rustls_pemfile::certs(&mut reader).collect::<Result<Vec<_>, _>>()?;
Ok(certs)
}
fn rustls_pemfile_parse_key(
pem: &str,
) -> Result<rustls::pki_types::PrivateKeyDer<'static>, Box<dyn std::error::Error + Send + Sync>> {
let mut reader = std::io::Cursor::new(pem.as_bytes());
let key = rustls_pemfile::private_key(&mut reader)?
.ok_or("no private key found in PEM")?;
Ok(key)
}
/// Constant-time comparison of two byte slices.
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut diff = 0u8;
for (x, y) in a.iter().zip(b.iter()) {
diff |= x ^ y;
}
diff == 0
}

View File

@@ -0,0 +1,5 @@
pub mod hub;
pub mod edge;
pub mod stun;
pub use remoteingress_protocol as protocol;

View File

@@ -0,0 +1,137 @@
use std::net::Ipv4Addr;
use tokio::net::UdpSocket;
use tokio::time::{timeout, Duration};
const STUN_SERVER: &str = "stun.cloudflare.com:3478";
const STUN_TIMEOUT: Duration = Duration::from_secs(3);
// STUN constants
const STUN_BINDING_REQUEST: u16 = 0x0001;
const STUN_MAGIC_COOKIE: u32 = 0x2112A442;
const ATTR_XOR_MAPPED_ADDRESS: u16 = 0x0020;
const ATTR_MAPPED_ADDRESS: u16 = 0x0001;
/// Discover our public IP via STUN Binding Request (RFC 5389).
/// Returns `None` on timeout or parse failure.
pub async fn discover_public_ip() -> Option<String> {
discover_public_ip_from(STUN_SERVER).await
}
pub async fn discover_public_ip_from(server: &str) -> Option<String> {
let result = timeout(STUN_TIMEOUT, async {
let socket = UdpSocket::bind("0.0.0.0:0").await.ok()?;
socket.connect(server).await.ok()?;
// Build STUN Binding Request (20 bytes)
let mut request = [0u8; 20];
// Message Type: Binding Request (0x0001)
request[0..2].copy_from_slice(&STUN_BINDING_REQUEST.to_be_bytes());
// Message Length: 0 (no attributes)
request[2..4].copy_from_slice(&0u16.to_be_bytes());
// Magic Cookie
request[4..8].copy_from_slice(&STUN_MAGIC_COOKIE.to_be_bytes());
// Transaction ID: 12 random bytes
let txn_id: [u8; 12] = rand_bytes();
request[8..20].copy_from_slice(&txn_id);
socket.send(&request).await.ok()?;
let mut buf = [0u8; 512];
let n = socket.recv(&mut buf).await.ok()?;
if n < 20 {
return None;
}
parse_stun_response(&buf[..n], &txn_id)
})
.await;
match result {
Ok(ip) => ip,
Err(_) => None, // timeout
}
}
fn parse_stun_response(data: &[u8], _txn_id: &[u8; 12]) -> Option<String> {
if data.len() < 20 {
return None;
}
// Verify it's a Binding Response (0x0101)
let msg_type = u16::from_be_bytes([data[0], data[1]]);
if msg_type != 0x0101 {
return None;
}
let msg_len = u16::from_be_bytes([data[2], data[3]]) as usize;
let magic = u32::from_be_bytes([data[4], data[5], data[6], data[7]]);
// Parse attributes
let attrs = &data[20..std::cmp::min(20 + msg_len, data.len())];
let mut offset = 0;
while offset + 4 <= attrs.len() {
let attr_type = u16::from_be_bytes([attrs[offset], attrs[offset + 1]]);
let attr_len = u16::from_be_bytes([attrs[offset + 2], attrs[offset + 3]]) as usize;
offset += 4;
if offset + attr_len > attrs.len() {
break;
}
let attr_data = &attrs[offset..offset + attr_len];
match attr_type {
ATTR_XOR_MAPPED_ADDRESS if attr_data.len() >= 8 => {
let family = attr_data[1];
if family == 0x01 {
// IPv4
let port_xored = u16::from_be_bytes([attr_data[2], attr_data[3]]);
let _port = port_xored ^ (STUN_MAGIC_COOKIE >> 16) as u16;
let ip_xored = u32::from_be_bytes([
attr_data[4],
attr_data[5],
attr_data[6],
attr_data[7],
]);
let ip = ip_xored ^ magic;
return Some(Ipv4Addr::from(ip).to_string());
}
}
ATTR_MAPPED_ADDRESS if attr_data.len() >= 8 => {
let family = attr_data[1];
if family == 0x01 {
// IPv4 (non-XOR fallback)
let ip = u32::from_be_bytes([
attr_data[4],
attr_data[5],
attr_data[6],
attr_data[7],
]);
return Some(Ipv4Addr::from(ip).to_string());
}
}
_ => {}
}
// Pad to 4-byte boundary
offset += (attr_len + 3) & !3;
}
None
}
/// Generate 12 random bytes for transaction ID.
fn rand_bytes() -> [u8; 12] {
let mut bytes = [0u8; 12];
// Use a simple approach: mix timestamp + counter
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default();
let nanos = now.as_nanos();
bytes[0..8].copy_from_slice(&(nanos as u64).to_le_bytes());
// Fill remaining with process-id based data
let pid = std::process::id();
bytes[8..12].copy_from_slice(&pid.to_le_bytes());
bytes
}

View File

@@ -0,0 +1,7 @@
[package]
name = "remoteingress-protocol"
version = "2.0.0"
edition = "2021"
[dependencies]
tokio = { version = "1", features = ["io-util"] }

View File

@@ -0,0 +1,172 @@
use tokio::io::{AsyncRead, AsyncReadExt};
// Frame type constants
pub const FRAME_OPEN: u8 = 0x01;
pub const FRAME_DATA: u8 = 0x02;
pub const FRAME_CLOSE: u8 = 0x03;
pub const FRAME_DATA_BACK: u8 = 0x04;
pub const FRAME_CLOSE_BACK: u8 = 0x05;
// Frame header size: 4 (stream_id) + 1 (type) + 4 (length) = 9 bytes
pub const FRAME_HEADER_SIZE: usize = 9;
// Maximum payload size (16 MB)
pub const MAX_PAYLOAD_SIZE: u32 = 16 * 1024 * 1024;
/// A single multiplexed frame.
#[derive(Debug, Clone)]
pub struct Frame {
pub stream_id: u32,
pub frame_type: u8,
pub payload: Vec<u8>,
}
/// Encode a frame into bytes: [stream_id:4][type:1][length:4][payload]
pub fn encode_frame(stream_id: u32, frame_type: u8, payload: &[u8]) -> Vec<u8> {
let len = payload.len() as u32;
let mut buf = Vec::with_capacity(FRAME_HEADER_SIZE + payload.len());
buf.extend_from_slice(&stream_id.to_be_bytes());
buf.push(frame_type);
buf.extend_from_slice(&len.to_be_bytes());
buf.extend_from_slice(payload);
buf
}
/// Build a PROXY protocol v1 header line.
/// Format: `PROXY TCP4 <client_ip> <edge_ip> <client_port> <dest_port>\r\n`
pub fn build_proxy_v1_header(
client_ip: &str,
edge_ip: &str,
client_port: u16,
dest_port: u16,
) -> String {
format!(
"PROXY TCP4 {} {} {} {}\r\n",
client_ip, edge_ip, client_port, dest_port
)
}
/// Stateful async frame reader that yields `Frame` values from an `AsyncRead`.
pub struct FrameReader<R> {
reader: R,
header_buf: [u8; FRAME_HEADER_SIZE],
}
impl<R: AsyncRead + Unpin> FrameReader<R> {
pub fn new(reader: R) -> Self {
Self {
reader,
header_buf: [0u8; FRAME_HEADER_SIZE],
}
}
/// Read the next frame. Returns `None` on EOF, `Err` on protocol violation.
pub async fn next_frame(&mut self) -> Result<Option<Frame>, std::io::Error> {
// Read header
match self.reader.read_exact(&mut self.header_buf).await {
Ok(_) => {}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
Err(e) => return Err(e),
}
let stream_id = u32::from_be_bytes([
self.header_buf[0],
self.header_buf[1],
self.header_buf[2],
self.header_buf[3],
]);
let frame_type = self.header_buf[4];
let length = u32::from_be_bytes([
self.header_buf[5],
self.header_buf[6],
self.header_buf[7],
self.header_buf[8],
]);
if length > MAX_PAYLOAD_SIZE {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("frame payload too large: {} bytes", length),
));
}
let mut payload = vec![0u8; length as usize];
if length > 0 {
self.reader.read_exact(&mut payload).await?;
}
Ok(Some(Frame {
stream_id,
frame_type,
payload,
}))
}
/// Consume the reader and return the inner stream.
pub fn into_inner(self) -> R {
self.reader
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encode_frame() {
let data = b"hello";
let encoded = encode_frame(42, FRAME_DATA, data);
assert_eq!(encoded.len(), FRAME_HEADER_SIZE + data.len());
// stream_id = 42 in BE
assert_eq!(&encoded[0..4], &42u32.to_be_bytes());
// frame type
assert_eq!(encoded[4], FRAME_DATA);
// length
assert_eq!(&encoded[5..9], &5u32.to_be_bytes());
// payload
assert_eq!(&encoded[9..], b"hello");
}
#[test]
fn test_encode_empty_frame() {
let encoded = encode_frame(1, FRAME_CLOSE, &[]);
assert_eq!(encoded.len(), FRAME_HEADER_SIZE);
assert_eq!(&encoded[5..9], &0u32.to_be_bytes());
}
#[test]
fn test_proxy_v1_header() {
let header = build_proxy_v1_header("1.2.3.4", "5.6.7.8", 12345, 443);
assert_eq!(header, "PROXY TCP4 1.2.3.4 5.6.7.8 12345 443\r\n");
}
#[tokio::test]
async fn test_frame_reader() {
let frame1 = encode_frame(1, FRAME_OPEN, b"PROXY TCP4 1.2.3.4 5.6.7.8 1234 443\r\n");
let frame2 = encode_frame(1, FRAME_DATA, b"GET / HTTP/1.1\r\n");
let frame3 = encode_frame(1, FRAME_CLOSE, &[]);
let mut data = Vec::new();
data.extend_from_slice(&frame1);
data.extend_from_slice(&frame2);
data.extend_from_slice(&frame3);
let cursor = std::io::Cursor::new(data);
let mut reader = FrameReader::new(cursor);
let f1 = reader.next_frame().await.unwrap().unwrap();
assert_eq!(f1.stream_id, 1);
assert_eq!(f1.frame_type, FRAME_OPEN);
assert!(f1.payload.starts_with(b"PROXY"));
let f2 = reader.next_frame().await.unwrap().unwrap();
assert_eq!(f2.frame_type, FRAME_DATA);
let f3 = reader.next_frame().await.unwrap().unwrap();
assert_eq!(f3.frame_type, FRAME_CLOSE);
assert!(f3.payload.is_empty());
// EOF
assert!(reader.next_frame().await.unwrap().is_none());
}
}