Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8cefe9d66a | |||
| d5e08c83fc | |||
| 1247f48856 | |||
| e3bae4c399 | |||
| 0930f7e10c | |||
| aa9e6dfd94 | |||
| 211d5cf835 | |||
| 2ce1899337 | |||
| 2e2ffc4485 | |||
| da26816af5 | |||
| d598bffec3 | |||
| a9dbccfaff | |||
| 386859a2bd | |||
| 2b58615d24 | |||
| 95adf56e52 | |||
| c96a493fb6 | |||
| b92587cc16 | |||
| b3dc0a6db2 | |||
| de3b8d3f58 | |||
| 75089ec975 | |||
| b106db932b | |||
| fb0c0dcc31 |
72
changelog.md
72
changelog.md
@@ -1,5 +1,77 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-03-16 - 25.11.6 - fix(rustproxy-http,rustproxy-passthrough)
|
||||
improve upstream connection cleanup and graceful tunnel shutdown
|
||||
|
||||
- Evict pooled HTTP/2 connections when their driver exits and shorten the maximum pooled H2 age to reduce reuse of stale upstream connections.
|
||||
- Strip hop-by-hop headers from backend responses before forwarding to HTTP/2 clients to avoid invalid H2 response handling.
|
||||
- Replace immediate task aborts in WebSocket and TCP tunnel watchdogs with cancellation-driven graceful shutdown plus timed fallback aborts.
|
||||
- Use non-blocking semaphore acquisition in the TCP listener so connection limits do not stall the accept loop for the entire port.
|
||||
|
||||
## 2026-03-16 - 25.11.5 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-15 - 25.11.4 - fix(rustproxy-http)
|
||||
report streamed HTTP and WebSocket bytes per chunk for real-time throughput metrics
|
||||
|
||||
- Update CountingBody to record bytes immediately on each data frame instead of aggregating until completion or drop
|
||||
- Record WebSocket tunnel traffic inside both copy loops and remove the final aggregate byte report to keep throughput metrics current
|
||||
|
||||
## 2026-03-15 - 25.11.3 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-15 - 25.11.2 - fix(rustproxy-http)
|
||||
avoid reusing HTTP/1 senders during streaming responses and relax HTTP/2 keep-alive timeouts
|
||||
|
||||
- Stop returning HTTP/1 senders to the connection pool before upstream response bodies finish streaming to prevent unsafe reuse on active connections.
|
||||
- Increase HTTP/2 keep-alive timeout from 5 seconds to 30 seconds in proxy connection builders to better support longer-lived backend streams.
|
||||
- Improves reliability for large streaming payloads and backend fallback request handling.
|
||||
|
||||
## 2026-03-15 - 25.11.1 - fix(rustproxy-http)
|
||||
keep connection idle tracking alive during streaming and tune HTTP/2 connection lifetimes
|
||||
|
||||
- Propagate connection activity tracking through HTTP/1, HTTP/2, and WebSocket forwarding so active request and response body streams do not trigger the idle watchdog.
|
||||
- Update CountingBody to refresh connection activity timestamps while data frames are polled during uploads and downloads.
|
||||
- Increase pooled HTTP/2 max age and set explicit HTTP/2 connection window sizes to improve long-lived streaming behavior.
|
||||
|
||||
## 2026-03-15 - 25.11.0 - feat(rustproxy-http)
|
||||
add HTTP/2 Extended CONNECT WebSocket proxy support
|
||||
|
||||
- Enable HTTP/2 CONNECT protocol support on the Hyper auto connection builder
|
||||
- Detect WebSocket requests for both HTTP/1 Upgrade and HTTP/2 Extended CONNECT flows
|
||||
- Translate HTTP/2 WebSocket requests to an HTTP/1.1 backend handshake and return RFC-compliant client responses
|
||||
|
||||
## 2026-03-12 - 25.10.7 - fix(rustproxy-http)
|
||||
remove Host header from HTTP/2 upstream requests while preserving it for HTTP/1 retries
|
||||
|
||||
- strips the Host header before sending HTTP/2 upstream requests so :authority from the URI is used instead
|
||||
- avoids 400 responses from nginx caused by sending both Host and :authority headers
|
||||
- keeps a cloned header set for bodyless request retries so HTTP/1 fallback still retains the Host header
|
||||
|
||||
## 2026-03-12 - 25.10.6 - fix(rustproxy-http)
|
||||
use the requested domain as HTTP/2 authority instead of the backend host and port
|
||||
|
||||
- build HTTP/2 absolute URIs from the client-facing domain so the :authority pseudo-header matches the Host header
|
||||
- remove backend port from generated HTTP/2 request URIs and fall back to the upstream host only when no domain is available
|
||||
- apply the authority handling consistently across pooled, inline, and generic upstream request paths
|
||||
|
||||
## 2026-03-12 - 25.10.5 - fix(rustproxy-http)
|
||||
configure HTTP/2 client builders with a Tokio timer for keep-alive handling
|
||||
|
||||
- Adds TokioTimer to all HTTP/2 client builder instances in proxy_service.
|
||||
- Ensures configured HTTP/2 keep-alive interval and timeout settings have the required timer runtime support.
|
||||
|
||||
## 2026-03-12 - 25.10.4 - fix(rustproxy-http)
|
||||
stabilize upstream HTTP/2 forwarding and fallback behavior
|
||||
|
||||
- Remove hop-by-hop headers before forwarding requests to HTTP/2 backends to comply with RFC 9113.
|
||||
- Use ALPN-enabled TLS configuration whenever HTTP/2 is possible, including explicit H2 connections and retries.
|
||||
- Add HTTP/2 handshake timeouts, tuned connection settings, and fallback to HTTP/1 when H2 negotiation times out or fails.
|
||||
- Register pooled HTTP/2 senders only after a successful first request to avoid reusing broken connections.
|
||||
- Build absolute URIs for HTTP/2 upstream requests so pseudo-headers such as scheme and authority are derived correctly.
|
||||
|
||||
## 2026-03-12 - 25.10.3 - fix(rustproxy-http)
|
||||
include request domain in backend proxy error and protocol detection logs
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartproxy",
|
||||
"version": "25.10.3",
|
||||
"version": "25.11.6",
|
||||
"private": false,
|
||||
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
||||
"main": "dist_ts/index.js",
|
||||
|
||||
@@ -20,6 +20,7 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
|
||||
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
|
||||
/// Prevents staleness from backends that close idle connections (e.g. nginx GOAWAY).
|
||||
/// 120s is well within typical server GOAWAY windows (nginx: ~60s idle, envoy: ~60s).
|
||||
const MAX_H2_AGE: Duration = Duration::from_secs(120);
|
||||
|
||||
/// Identifies a unique backend endpoint.
|
||||
|
||||
@@ -11,20 +11,22 @@ use rustproxy_metrics::MetricsCollector;
|
||||
|
||||
/// Wraps any `http_body::Body` and counts data bytes passing through.
|
||||
///
|
||||
/// When the body is fully consumed or dropped, accumulated byte counts
|
||||
/// are reported to the `MetricsCollector`.
|
||||
/// Each chunk is reported to the `MetricsCollector` immediately so that
|
||||
/// the throughput tracker (sampled at 1 Hz) reflects real-time data flow.
|
||||
///
|
||||
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
|
||||
pub struct CountingBody<B> {
|
||||
inner: Pin<Box<B>>,
|
||||
counted_bytes: AtomicU64,
|
||||
metrics: Arc<MetricsCollector>,
|
||||
route_id: Option<String>,
|
||||
source_ip: Option<String>,
|
||||
/// Whether we count bytes as "in" (request body) or "out" (response body).
|
||||
direction: Direction,
|
||||
/// Whether we've already reported the bytes (to avoid double-reporting on drop).
|
||||
reported: bool,
|
||||
/// Optional connection-level activity tracker. When set, poll_frame updates this
|
||||
/// to keep the idle watchdog alive during active body streaming (uploads/downloads).
|
||||
connection_activity: Option<Arc<AtomicU64>>,
|
||||
/// Start instant for computing elapsed ms for connection_activity.
|
||||
activity_start: Option<std::time::Instant>,
|
||||
}
|
||||
|
||||
/// Which direction the bytes flow.
|
||||
@@ -47,42 +49,36 @@ impl<B> CountingBody<B> {
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Box::pin(inner),
|
||||
counted_bytes: AtomicU64::new(0),
|
||||
metrics,
|
||||
route_id,
|
||||
source_ip,
|
||||
direction,
|
||||
reported: false,
|
||||
connection_activity: None,
|
||||
activity_start: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Report accumulated bytes to the metrics collector.
|
||||
fn report(&mut self) {
|
||||
if self.reported {
|
||||
return;
|
||||
}
|
||||
self.reported = true;
|
||||
|
||||
let bytes = self.counted_bytes.load(Ordering::Relaxed);
|
||||
if bytes == 0 {
|
||||
return;
|
||||
}
|
||||
/// Set the connection-level activity tracker. When set, each data frame
|
||||
/// updates this timestamp to prevent the idle watchdog from killing the
|
||||
/// connection during active body streaming.
|
||||
pub fn with_connection_activity(mut self, activity: Arc<AtomicU64>, start: std::time::Instant) -> Self {
|
||||
self.connection_activity = Some(activity);
|
||||
self.activity_start = Some(start);
|
||||
self
|
||||
}
|
||||
|
||||
/// Report a chunk of bytes immediately to the metrics collector.
|
||||
#[inline]
|
||||
fn report_chunk(&self, len: u64) {
|
||||
let route_id = self.route_id.as_deref();
|
||||
let source_ip = self.source_ip.as_deref();
|
||||
match self.direction {
|
||||
Direction::In => self.metrics.record_bytes(bytes, 0, route_id, source_ip),
|
||||
Direction::Out => self.metrics.record_bytes(0, bytes, route_id, source_ip),
|
||||
Direction::In => self.metrics.record_bytes(len, 0, route_id, source_ip),
|
||||
Direction::Out => self.metrics.record_bytes(0, len, route_id, source_ip),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> Drop for CountingBody<B> {
|
||||
fn drop(&mut self) {
|
||||
self.report();
|
||||
}
|
||||
}
|
||||
|
||||
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
|
||||
impl<B> Unpin for CountingBody<B> {}
|
||||
|
||||
@@ -102,16 +98,18 @@ where
|
||||
match this.inner.as_mut().poll_frame(cx) {
|
||||
Poll::Ready(Some(Ok(frame))) => {
|
||||
if let Some(data) = frame.data_ref() {
|
||||
this.counted_bytes.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
let len = data.len() as u64;
|
||||
// Report bytes immediately so the 1 Hz throughput sampler sees them
|
||||
this.report_chunk(len);
|
||||
// Keep the connection-level idle watchdog alive during body streaming
|
||||
if let (Some(activity), Some(start)) = (&this.connection_activity, &this.activity_start) {
|
||||
activity.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Ok(frame)))
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
|
||||
Poll::Ready(None) => {
|
||||
// Body is fully consumed — report now
|
||||
this.report();
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,14 @@ use crate::request_filter::RequestFilter;
|
||||
use crate::response_filter::ResponseFilter;
|
||||
use crate::upstream_selector::UpstreamSelector;
|
||||
|
||||
/// Per-connection context for keeping the idle watchdog alive during body streaming.
|
||||
/// Passed through the forwarding chain so CountingBody can update the timestamp.
|
||||
#[derive(Clone)]
|
||||
struct ConnActivity {
|
||||
last_activity: Arc<AtomicU64>,
|
||||
start: std::time::Instant,
|
||||
}
|
||||
|
||||
/// Default upstream connect timeout (30 seconds).
|
||||
const DEFAULT_CONNECT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
|
||||
|
||||
@@ -294,8 +302,9 @@ impl HttpProxyService {
|
||||
let cn = cancel_inner.clone();
|
||||
let la = Arc::clone(&la_inner);
|
||||
let st = start;
|
||||
let ca = ConnActivity { last_activity: Arc::clone(&la_inner), start };
|
||||
async move {
|
||||
let result = svc.handle_request(req, peer, port, cn).await;
|
||||
let result = svc.handle_request(req, peer, port, cn, ca).await;
|
||||
// Mark request end — update activity timestamp before guard drops
|
||||
la.store(st.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
drop(req_guard); // Explicitly drop to decrement active_requests
|
||||
@@ -304,8 +313,13 @@ impl HttpProxyService {
|
||||
});
|
||||
|
||||
// Auto-detect h1 vs h2 based on ALPN / connection preface.
|
||||
// serve_connection_with_upgrades supports h1 Upgrade (WebSocket) and h2 CONNECT.
|
||||
let builder = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
|
||||
// serve_connection_with_upgrades supports h1 Upgrade (WebSocket) and h2 Extended CONNECT (RFC 8441).
|
||||
let mut builder = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
|
||||
// Configure H2 server settings: Extended CONNECT for WebSocket + flow control tuning
|
||||
builder.http2()
|
||||
.enable_connect_protocol()
|
||||
.initial_stream_window_size(2 * 1024 * 1024) // 2MB per stream (vs default 64KB)
|
||||
.initial_connection_window_size(8 * 1024 * 1024); // 8MB per client connection
|
||||
let conn = builder.serve_connection_with_upgrades(io, service);
|
||||
// Pin on the heap — auto::UpgradeableConnection is !Unpin
|
||||
let mut conn = Box::pin(conn);
|
||||
@@ -365,6 +379,7 @@ impl HttpProxyService {
|
||||
peer_addr: std::net::SocketAddr,
|
||||
port: u16,
|
||||
cancel: CancellationToken,
|
||||
conn_activity: ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let host = req.headers()
|
||||
.get("host")
|
||||
@@ -482,16 +497,23 @@ impl HttpProxyService {
|
||||
let domain_str = host.as_deref().unwrap_or("-");
|
||||
self.upstream_selector.connection_started(&upstream_key);
|
||||
|
||||
// Check for WebSocket upgrade
|
||||
let is_websocket = req.headers()
|
||||
// Check for WebSocket upgrade: H1 (Upgrade header) or H2 Extended CONNECT (RFC 8441)
|
||||
let is_h1_websocket = req.headers()
|
||||
.get("upgrade")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|v| v.eq_ignore_ascii_case("websocket"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if is_websocket {
|
||||
let is_h2_websocket = req.method() == hyper::Method::CONNECT
|
||||
&& req.extensions()
|
||||
.get::<hyper::ext::Protocol>()
|
||||
.map(|p| p.as_str().eq_ignore_ascii_case("websocket"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if is_h1_websocket || is_h2_websocket {
|
||||
let result = self.handle_websocket_upgrade(
|
||||
req, peer_addr, &upstream, route_match.route, route_id, &upstream_key, cancel, &ip_str,
|
||||
req, peer_addr, &upstream, route_match.route, route_id, &upstream_key, cancel, &ip_str, is_h2_websocket,
|
||||
if is_h2_websocket { Some(conn_activity.clone()) } else { None },
|
||||
).await;
|
||||
// Note: for WebSocket, connection_ended is called inside
|
||||
// the spawned tunnel task when the connection closes.
|
||||
@@ -539,6 +561,15 @@ impl HttpProxyService {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove hop-by-hop headers (RFC 9113 §8.2.2 forbids connection-specific headers in H2)
|
||||
upstream_headers.remove("connection");
|
||||
upstream_headers.remove("keep-alive");
|
||||
upstream_headers.remove("proxy-connection");
|
||||
upstream_headers.remove("transfer-encoding");
|
||||
upstream_headers.remove("te");
|
||||
upstream_headers.remove("trailer");
|
||||
upstream_headers.remove("upgrade");
|
||||
|
||||
// Add standard reverse-proxy headers (X-Forwarded-*)
|
||||
{
|
||||
let original_host = host.as_deref().unwrap_or("");
|
||||
@@ -623,7 +654,7 @@ impl HttpProxyService {
|
||||
self.metrics.set_backend_protocol(&upstream_key, "h2");
|
||||
let result = self.forward_h2_pooled(
|
||||
sender, parts, body, upstream_headers, &upstream_path,
|
||||
route_match.route, route_id, &ip_str, &pool_key, domain_str,
|
||||
route_match.route, route_id, &ip_str, &pool_key, domain_str, &conn_activity,
|
||||
).await;
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
return result;
|
||||
@@ -634,8 +665,8 @@ impl HttpProxyService {
|
||||
// --- Fresh connection path ---
|
||||
self.metrics.backend_pool_miss(&upstream_key);
|
||||
|
||||
// Choose TLS config: use ALPN config for auto-detect probe, plain config otherwise
|
||||
let tls_config = if needs_alpn_probe {
|
||||
// Choose TLS config: use ALPN config when H2 is possible (auto-detect probe OR explicit H2)
|
||||
let tls_config = if needs_alpn_probe || use_h2 {
|
||||
&self.backend_tls_config_alpn
|
||||
} else {
|
||||
&self.backend_tls_config
|
||||
@@ -762,19 +793,19 @@ impl HttpProxyService {
|
||||
self.forward_h2_with_fallback(
|
||||
io, parts, body, upstream_headers, &upstream_path,
|
||||
&upstream, route_match.route, route_id, &ip_str, &final_pool_key,
|
||||
host.clone(), domain_str,
|
||||
host.clone(), domain_str, &conn_activity,
|
||||
).await
|
||||
} else {
|
||||
// Explicit H2 mode: hard-fail on handshake error (preserved behavior)
|
||||
self.forward_h2(
|
||||
io, parts, body, upstream_headers, &upstream_path,
|
||||
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str,
|
||||
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity,
|
||||
).await
|
||||
}
|
||||
} else {
|
||||
self.forward_h1(
|
||||
io, parts, body, upstream_headers, &upstream_path,
|
||||
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str,
|
||||
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity,
|
||||
).await
|
||||
};
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
@@ -797,6 +828,7 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
|
||||
|
||||
@@ -805,7 +837,7 @@ impl HttpProxyService {
|
||||
self.metrics.backend_pool_hit(&backend_key);
|
||||
return self.forward_h1_with_sender(
|
||||
pooled_sender, parts, body, upstream_headers, upstream_path,
|
||||
route, route_id, source_ip, pool_key, domain,
|
||||
route, route_id, source_ip, pool_key, domain, conn_activity,
|
||||
).await;
|
||||
}
|
||||
|
||||
@@ -828,7 +860,7 @@ impl HttpProxyService {
|
||||
}
|
||||
});
|
||||
|
||||
self.forward_h1_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, pool_key, domain).await
|
||||
self.forward_h1_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, pool_key, domain, conn_activity).await
|
||||
}
|
||||
|
||||
/// Common H1 forwarding logic used by both fresh and pooled paths.
|
||||
@@ -844,6 +876,7 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
// Always use HTTP/1.1 for h1 backend connections (h2 incoming requests have version HTTP/2.0)
|
||||
let mut upstream_req = Request::builder()
|
||||
@@ -862,7 +895,7 @@ impl HttpProxyService {
|
||||
route_id.map(|s| s.to_string()),
|
||||
Some(source_ip.to_string()),
|
||||
Direction::In,
|
||||
);
|
||||
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
|
||||
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
|
||||
|
||||
let upstream_req = upstream_req.body(boxed_body).unwrap();
|
||||
@@ -877,10 +910,17 @@ impl HttpProxyService {
|
||||
}
|
||||
};
|
||||
|
||||
// Return sender to pool (body streams lazily, sender is reusable once response head is received)
|
||||
self.connection_pool.checkin_h1(pool_key.clone(), sender);
|
||||
// Note: we do NOT return the sender to the pool here because the response body
|
||||
// hasn't been fully streamed yet. Pooling a sender while its response body is still
|
||||
// in-flight risks another request being dispatched on the same connection if is_ready()
|
||||
// momentarily returns true between chunks. The sender is dropped after this scope,
|
||||
// and the backend connection remains alive via the spawned conn driver task until
|
||||
// the response body finishes streaming.
|
||||
// For small/empty responses, the sender could theoretically be reused, but the safety
|
||||
// of large streaming responses (e.g. 352MB Docker layers) takes priority.
|
||||
drop(sender);
|
||||
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
|
||||
}
|
||||
|
||||
/// Forward request to backend via HTTP/2 with body streaming (fresh connection).
|
||||
@@ -898,32 +938,54 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
|
||||
let exec = hyper_util::rt::TokioExecutor::new();
|
||||
// Explicitly type the handshake with BoxBody for uniform pool type
|
||||
let mut h2_builder = hyper::client::conn::http2::Builder::new(exec);
|
||||
h2_builder
|
||||
.timer(hyper_util::rt::TokioTimer::new())
|
||||
.keep_alive_interval(std::time::Duration::from_secs(10))
|
||||
.keep_alive_timeout(std::time::Duration::from_secs(30))
|
||||
.initial_stream_window_size(2 * 1024 * 1024)
|
||||
.initial_connection_window_size(16 * 1024 * 1024);
|
||||
let (sender, conn): (
|
||||
hyper::client::conn::http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
|
||||
hyper::client::conn::http2::Connection<TokioIo<BackendStream>, BoxBody<Bytes, hyper::Error>, hyper_util::rt::TokioExecutor>,
|
||||
) = match hyper::client::conn::http2::handshake(exec, io).await {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
) = match tokio::time::timeout(self.connect_timeout, h2_builder.handshake(io)).await {
|
||||
Ok(Ok(h)) => h,
|
||||
Ok(Err(e)) => {
|
||||
error!(backend = %backend_key, domain = %domain, error = %e, "Backend H2 handshake failed");
|
||||
self.metrics.backend_handshake_error(&backend_key);
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 handshake failed"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!(backend = %backend_key, domain = %domain, "Backend H2 handshake timeout");
|
||||
self.metrics.backend_handshake_error(&backend_key);
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend H2 handshake timeout"));
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("HTTP/2 upstream connection error: {}", e);
|
||||
}
|
||||
});
|
||||
// Spawn the H2 connection driver; proactively evict from pool on exit
|
||||
// so the next request gets a fresh connection instead of a dead sender.
|
||||
{
|
||||
let pool = Arc::clone(&self.connection_pool);
|
||||
let key = pool_key.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("HTTP/2 upstream connection error: {}", e);
|
||||
}
|
||||
pool.remove_h2(&key);
|
||||
});
|
||||
}
|
||||
|
||||
// Register for multiplexed reuse
|
||||
self.connection_pool.register_h2(pool_key.clone(), sender.clone());
|
||||
|
||||
self.forward_h2_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, Some(pool_key), domain).await
|
||||
// Clone sender for potential pool registration; register only after first request succeeds
|
||||
let sender_for_pool = sender.clone();
|
||||
let result = self.forward_h2_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, Some(pool_key), domain, conn_activity).await;
|
||||
if matches!(&result, Ok(ref resp) if resp.status() != StatusCode::BAD_GATEWAY) {
|
||||
self.connection_pool.register_h2(pool_key.clone(), sender_for_pool);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Forward request using an existing (pooled) HTTP/2 sender.
|
||||
@@ -941,6 +1003,7 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
// Save retry state for bodyless requests (cheap: Method is an enum, HeaderMap clones Arc-backed Bytes)
|
||||
let retry_state = if body.is_end_stream() {
|
||||
@@ -951,7 +1014,7 @@ impl HttpProxyService {
|
||||
|
||||
let result = self.forward_h2_with_sender(
|
||||
sender, parts, body, upstream_headers, upstream_path,
|
||||
route, route_id, source_ip, Some(pool_key), domain,
|
||||
route, route_id, source_ip, Some(pool_key), domain, conn_activity,
|
||||
).await;
|
||||
|
||||
// If the request failed (502) and we can retry with an empty body, do so
|
||||
@@ -962,7 +1025,7 @@ impl HttpProxyService {
|
||||
"Stale pooled H2 sender, retrying with fresh connection");
|
||||
return self.retry_h2_with_fresh_connection(
|
||||
method, headers, upstream_path,
|
||||
pool_key, route, route_id, source_ip, domain,
|
||||
pool_key, route, route_id, source_ip, domain, conn_activity,
|
||||
).await;
|
||||
}
|
||||
}
|
||||
@@ -981,6 +1044,7 @@ impl HttpProxyService {
|
||||
route_id: Option<&str>,
|
||||
source_ip: &str,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
|
||||
|
||||
@@ -989,7 +1053,7 @@ impl HttpProxyService {
|
||||
let backend = if pool_key.use_tls {
|
||||
match tokio::time::timeout(
|
||||
self.connect_timeout,
|
||||
connect_tls_backend(&self.backend_tls_config, &pool_key.host, pool_key.port),
|
||||
connect_tls_backend(&self.backend_tls_config_alpn, &pool_key.host, pool_key.port),
|
||||
).await {
|
||||
Ok(Ok(tls)) => BackendStream::Tls(tls),
|
||||
Ok(Err(e)) => {
|
||||
@@ -1028,31 +1092,55 @@ impl HttpProxyService {
|
||||
|
||||
let io = TokioIo::new(backend);
|
||||
let exec = hyper_util::rt::TokioExecutor::new();
|
||||
let mut h2_builder = hyper::client::conn::http2::Builder::new(exec);
|
||||
h2_builder
|
||||
.timer(hyper_util::rt::TokioTimer::new())
|
||||
.keep_alive_interval(std::time::Duration::from_secs(10))
|
||||
.keep_alive_timeout(std::time::Duration::from_secs(30))
|
||||
.initial_stream_window_size(2 * 1024 * 1024)
|
||||
.initial_connection_window_size(16 * 1024 * 1024);
|
||||
let (mut sender, conn): (
|
||||
hyper::client::conn::http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
|
||||
hyper::client::conn::http2::Connection<TokioIo<BackendStream>, BoxBody<Bytes, hyper::Error>, hyper_util::rt::TokioExecutor>,
|
||||
) = match hyper::client::conn::http2::handshake(exec, io).await {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
) = match tokio::time::timeout(self.connect_timeout, h2_builder.handshake(io)).await {
|
||||
Ok(Ok(h)) => h,
|
||||
Ok(Err(e)) => {
|
||||
error!(backend = %backend_key, domain = %domain, error = %e, "H2 retry: handshake failed");
|
||||
self.metrics.backend_handshake_error(&backend_key);
|
||||
self.metrics.backend_connection_closed(&backend_key);
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 retry handshake failed"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!(backend = %backend_key, domain = %domain, "H2 retry: handshake timeout");
|
||||
self.metrics.backend_handshake_error(&backend_key);
|
||||
self.metrics.backend_connection_closed(&backend_key);
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend H2 retry handshake timeout"));
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("H2 retry: upstream connection error: {}", e);
|
||||
}
|
||||
});
|
||||
// Spawn the H2 connection driver; proactively evict from pool on exit.
|
||||
{
|
||||
let pool = Arc::clone(&self.connection_pool);
|
||||
let key = pool_key.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("H2 retry: upstream connection error: {}", e);
|
||||
}
|
||||
pool.remove_h2(&key);
|
||||
});
|
||||
}
|
||||
|
||||
// Register fresh sender in pool for future requests
|
||||
self.connection_pool.register_h2(pool_key.clone(), sender.clone());
|
||||
|
||||
// Build request with empty body
|
||||
// Build request with empty body using absolute URI for H2 pseudo-headers
|
||||
let scheme = if pool_key.use_tls { "https" } else { "http" };
|
||||
let authority = if domain != "-" { domain } else { pool_key.host.as_str() };
|
||||
let h2_uri = format!("{}://{}{}", scheme, authority, upstream_path);
|
||||
let mut upstream_req = Request::builder()
|
||||
.method(method)
|
||||
.uri(upstream_path);
|
||||
.uri(&h2_uri);
|
||||
|
||||
// Remove Host header for H2 — :authority pseudo-header (from URI) is sufficient
|
||||
let mut upstream_headers = upstream_headers;
|
||||
upstream_headers.remove("host");
|
||||
|
||||
if let Some(headers) = upstream_req.headers_mut() {
|
||||
*headers = upstream_headers;
|
||||
@@ -1065,7 +1153,9 @@ impl HttpProxyService {
|
||||
|
||||
match sender.send_request(upstream_req).await {
|
||||
Ok(resp) => {
|
||||
let result = self.build_streaming_response(resp, route, route_id, source_ip).await;
|
||||
// Register in pool only after request succeeds
|
||||
self.connection_pool.register_h2(pool_key.clone(), sender);
|
||||
let result = self.build_streaming_response(resp, route, route_id, source_ip, conn_activity).await;
|
||||
// Close the fresh backend connection (opened above)
|
||||
self.metrics.backend_connection_closed(&backend_key);
|
||||
result
|
||||
@@ -1073,7 +1163,6 @@ impl HttpProxyService {
|
||||
Err(e) => {
|
||||
error!(backend = %backend_key, domain = %domain, error = %e, "H2 retry: request failed");
|
||||
self.metrics.backend_request_error(&backend_key);
|
||||
self.connection_pool.remove_h2(pool_key);
|
||||
// Close the fresh backend connection (opened above)
|
||||
self.metrics.backend_connection_closed(&backend_key);
|
||||
Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 request failed on retry"))
|
||||
@@ -1093,7 +1182,7 @@ impl HttpProxyService {
|
||||
io: TokioIo<BackendStream>,
|
||||
parts: hyper::http::request::Parts,
|
||||
body: Incoming,
|
||||
upstream_headers: hyper::HeaderMap,
|
||||
mut upstream_headers: hyper::HeaderMap,
|
||||
upstream_path: &str,
|
||||
upstream: &crate::upstream_selector::UpstreamSelection,
|
||||
route: &rustproxy_config::RouteConfig,
|
||||
@@ -1102,34 +1191,94 @@ impl HttpProxyService {
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
requested_host: Option<String>,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let exec = hyper_util::rt::TokioExecutor::new();
|
||||
let handshake_result: Result<(
|
||||
hyper::client::conn::http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
|
||||
hyper::client::conn::http2::Connection<TokioIo<BackendStream>, BoxBody<Bytes, hyper::Error>, hyper_util::rt::TokioExecutor>,
|
||||
), hyper::Error> = hyper::client::conn::http2::handshake(exec, io).await;
|
||||
let mut h2_builder = hyper::client::conn::http2::Builder::new(exec);
|
||||
h2_builder
|
||||
.timer(hyper_util::rt::TokioTimer::new())
|
||||
.keep_alive_interval(std::time::Duration::from_secs(10))
|
||||
.keep_alive_timeout(std::time::Duration::from_secs(30))
|
||||
.initial_stream_window_size(2 * 1024 * 1024)
|
||||
.initial_connection_window_size(16 * 1024 * 1024);
|
||||
let handshake_result = tokio::time::timeout(
|
||||
self.connect_timeout,
|
||||
h2_builder.handshake(io),
|
||||
).await;
|
||||
|
||||
match handshake_result {
|
||||
Ok((mut sender, conn)) => {
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("HTTP/2 upstream connection error: {}", e);
|
||||
Err(_) => {
|
||||
// H2 handshake timed out — fall back to H1
|
||||
let bk = format!("{}:{}", upstream.host, upstream.port);
|
||||
warn!(
|
||||
backend = %bk,
|
||||
domain = %domain,
|
||||
"H2 handshake timeout, falling back to H1"
|
||||
);
|
||||
self.metrics.backend_h2_failure(&bk);
|
||||
self.metrics.backend_handshake_error(&bk);
|
||||
|
||||
let cache_key = crate::protocol_cache::ProtocolCacheKey {
|
||||
host: upstream.host.clone(),
|
||||
port: upstream.port,
|
||||
requested_host: requested_host.clone(),
|
||||
};
|
||||
self.protocol_cache.insert(cache_key, crate::protocol_cache::DetectedProtocol::H1);
|
||||
|
||||
match self.reconnect_backend(upstream, domain).await {
|
||||
Some(fallback_backend) => {
|
||||
let h1_pool_key = crate::connection_pool::PoolKey {
|
||||
host: upstream.host.clone(),
|
||||
port: upstream.port,
|
||||
use_tls: upstream.use_tls,
|
||||
h2: false,
|
||||
};
|
||||
let fallback_io = TokioIo::new(fallback_backend);
|
||||
let result = self.forward_h1(
|
||||
fallback_io, parts, body, upstream_headers, upstream_path,
|
||||
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
|
||||
).await;
|
||||
self.metrics.backend_connection_closed(&bk);
|
||||
result
|
||||
}
|
||||
});
|
||||
None => {
|
||||
Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable after H2 timeout fallback"))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Ok((mut sender, conn))) => {
|
||||
// Spawn the H2 connection driver; proactively evict from pool on exit.
|
||||
{
|
||||
let pool = Arc::clone(&self.connection_pool);
|
||||
let key = pool_key.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = conn.await {
|
||||
debug!("HTTP/2 upstream connection error: {}", e);
|
||||
}
|
||||
pool.remove_h2(&key);
|
||||
});
|
||||
}
|
||||
|
||||
// Save retry state before consuming parts/body (for bodyless requests like GET)
|
||||
// Clone BEFORE removing Host — H1 fallback needs Host header
|
||||
let retry_state = if body.is_end_stream() {
|
||||
Some((parts.method.clone(), upstream_headers.clone()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Remove Host header for H2 — :authority pseudo-header (from URI) is sufficient
|
||||
upstream_headers.remove("host");
|
||||
|
||||
// Build and send the h2 request inline (don't register in pool yet —
|
||||
// we need to verify the request actually succeeds first, because some
|
||||
// backends advertise h2 via ALPN but don't speak the h2 binary protocol).
|
||||
let scheme = if upstream.use_tls { "https" } else { "http" };
|
||||
let authority = if domain != "-" { domain } else { upstream.host.as_str() };
|
||||
let h2_uri = format!("{}://{}{}", scheme, authority, upstream_path);
|
||||
let mut upstream_req = Request::builder()
|
||||
.method(parts.method)
|
||||
.uri(upstream_path);
|
||||
.uri(&h2_uri);
|
||||
|
||||
if let Some(headers) = upstream_req.headers_mut() {
|
||||
*headers = upstream_headers;
|
||||
@@ -1141,7 +1290,7 @@ impl HttpProxyService {
|
||||
route_id.map(|s| s.to_string()),
|
||||
Some(source_ip.to_string()),
|
||||
Direction::In,
|
||||
);
|
||||
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
|
||||
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
|
||||
let upstream_req = upstream_req.body(boxed_body).unwrap();
|
||||
|
||||
@@ -1149,7 +1298,7 @@ impl HttpProxyService {
|
||||
Ok(upstream_response) => {
|
||||
// H2 works! Register sender in pool for multiplexed reuse
|
||||
self.connection_pool.register_h2(pool_key.clone(), sender);
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
|
||||
}
|
||||
Err(e) => {
|
||||
// H2 request failed — backend advertises h2 via ALPN but doesn't
|
||||
@@ -1182,7 +1331,7 @@ impl HttpProxyService {
|
||||
let fallback_io = TokioIo::new(fallback_backend);
|
||||
let result = self.forward_h1_empty_body(
|
||||
fallback_io, method, headers, upstream_path,
|
||||
route, route_id, source_ip, &h1_pool_key, domain,
|
||||
route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
|
||||
).await;
|
||||
// Close the reconnected backend connection (opened in reconnect_backend)
|
||||
self.metrics.backend_connection_closed(&bk);
|
||||
@@ -1198,7 +1347,7 @@ impl HttpProxyService {
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
Ok(Err(e)) => {
|
||||
// H2 handshake truly failed — fall back to H1
|
||||
// Body is NOT consumed yet, so we can retry the full request.
|
||||
let bk = format!("{}:{}", upstream.host, upstream.port);
|
||||
@@ -1231,7 +1380,7 @@ impl HttpProxyService {
|
||||
let fallback_io = TokioIo::new(fallback_backend);
|
||||
let result = self.forward_h1(
|
||||
fallback_io, parts, body, upstream_headers, upstream_path,
|
||||
upstream, route, route_id, source_ip, &h1_pool_key, domain,
|
||||
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
|
||||
).await;
|
||||
// Close the reconnected backend connection (opened in reconnect_backend)
|
||||
self.metrics.backend_connection_closed(&bk);
|
||||
@@ -1258,6 +1407,7 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: &crate::connection_pool::PoolKey,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
|
||||
let (mut sender, conn): (
|
||||
@@ -1301,10 +1451,10 @@ impl HttpProxyService {
|
||||
}
|
||||
};
|
||||
|
||||
// Return sender to pool for keep-alive reuse
|
||||
self.connection_pool.checkin_h1(pool_key.clone(), sender);
|
||||
// Don't pool the sender while response body is still streaming (same safety as forward_h1_with_sender)
|
||||
drop(sender);
|
||||
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
|
||||
}
|
||||
|
||||
/// Reconnect to a backend (used for H2→H1 fallback).
|
||||
@@ -1375,10 +1525,23 @@ impl HttpProxyService {
|
||||
source_ip: &str,
|
||||
pool_key: Option<&crate::connection_pool::PoolKey>,
|
||||
domain: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
// Build absolute URI for H2 pseudo-headers (:scheme, :authority)
|
||||
// Use the requested domain as authority (not backend address) so :authority matches Host header
|
||||
let scheme = if pool_key.map(|pk| pk.use_tls).unwrap_or(false) { "https" } else { "http" };
|
||||
let authority = if domain != "-" { domain } else {
|
||||
pool_key.map(|pk| pk.host.as_str()).unwrap_or("localhost")
|
||||
};
|
||||
let h2_uri = format!("{}://{}{}", scheme, authority, upstream_path);
|
||||
let mut upstream_req = Request::builder()
|
||||
.method(parts.method)
|
||||
.uri(upstream_path);
|
||||
.uri(&h2_uri);
|
||||
|
||||
// Remove Host header for H2 — :authority pseudo-header (from URI) is sufficient
|
||||
// Having both Host and :authority causes nginx to return 400
|
||||
let mut upstream_headers = upstream_headers;
|
||||
upstream_headers.remove("host");
|
||||
|
||||
if let Some(headers) = upstream_req.headers_mut() {
|
||||
*headers = upstream_headers;
|
||||
@@ -1391,7 +1554,7 @@ impl HttpProxyService {
|
||||
route_id.map(|s| s.to_string()),
|
||||
Some(source_ip.to_string()),
|
||||
Direction::In,
|
||||
);
|
||||
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
|
||||
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
|
||||
|
||||
let upstream_req = upstream_req.body(boxed_body).unwrap();
|
||||
@@ -1412,7 +1575,7 @@ impl HttpProxyService {
|
||||
}
|
||||
};
|
||||
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
|
||||
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
|
||||
}
|
||||
|
||||
/// Build the client-facing response from an upstream response, streaming the body.
|
||||
@@ -1425,6 +1588,7 @@ impl HttpProxyService {
|
||||
route: &rustproxy_config::RouteConfig,
|
||||
route_id: Option<&str>,
|
||||
source_ip: &str,
|
||||
conn_activity: &ConnActivity,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let (resp_parts, resp_body) = upstream_response.into_parts();
|
||||
|
||||
@@ -1433,6 +1597,19 @@ impl HttpProxyService {
|
||||
|
||||
if let Some(headers) = response.headers_mut() {
|
||||
*headers = resp_parts.headers;
|
||||
|
||||
// Strip hop-by-hop headers from the backend response.
|
||||
// RFC 9113 §8.2.2 forbids connection-specific headers in HTTP/2 responses;
|
||||
// forwarding them from an H1 backend can cause H2 stream resets.
|
||||
// Mirrors the request-path stripping at the forward methods above.
|
||||
headers.remove("connection");
|
||||
headers.remove("keep-alive");
|
||||
headers.remove("proxy-connection");
|
||||
headers.remove("transfer-encoding");
|
||||
headers.remove("te");
|
||||
headers.remove("trailer");
|
||||
// Note: "upgrade" is intentionally kept — needed for WebSocket 101 responses.
|
||||
|
||||
ResponseFilter::apply_headers(route, headers, None);
|
||||
}
|
||||
|
||||
@@ -1445,14 +1622,14 @@ impl HttpProxyService {
|
||||
route_id.map(|s| s.to_string()),
|
||||
Some(source_ip.to_string()),
|
||||
Direction::Out,
|
||||
);
|
||||
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
|
||||
|
||||
let body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_body);
|
||||
|
||||
Ok(response.body(body).unwrap())
|
||||
}
|
||||
|
||||
/// Handle a WebSocket upgrade request.
|
||||
/// Handle a WebSocket upgrade request (H1 Upgrade or H2 Extended CONNECT per RFC 8441).
|
||||
async fn handle_websocket_upgrade(
|
||||
&self,
|
||||
req: Request<Incoming>,
|
||||
@@ -1463,6 +1640,8 @@ impl HttpProxyService {
|
||||
upstream_key: &str,
|
||||
cancel: CancellationToken,
|
||||
source_ip: &str,
|
||||
is_h2: bool,
|
||||
conn_activity: Option<ConnActivity>,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
@@ -1548,9 +1727,11 @@ impl HttpProxyService {
|
||||
|
||||
let (parts, _body) = req.into_parts();
|
||||
|
||||
// H2 Extended CONNECT uses method=CONNECT, but the H1.1 backend expects GET
|
||||
let backend_method = if is_h2 { "GET" } else { parts.method.as_str() };
|
||||
let mut raw_request = format!(
|
||||
"{} {} HTTP/1.1\r\n",
|
||||
parts.method, upstream_path
|
||||
backend_method, upstream_path
|
||||
);
|
||||
|
||||
// Copy all original headers (preserving the client's Host header).
|
||||
@@ -1578,6 +1759,23 @@ impl HttpProxyService {
|
||||
}
|
||||
}
|
||||
|
||||
// H2 Extended CONNECT doesn't carry H1 WebSocket handshake headers;
|
||||
// inject them so the H1.1 backend can complete the upgrade.
|
||||
if is_h2 {
|
||||
if !parts.headers.contains_key("upgrade") {
|
||||
raw_request.push_str("upgrade: websocket\r\n");
|
||||
}
|
||||
if !parts.headers.contains_key("connection") {
|
||||
raw_request.push_str("connection: Upgrade\r\n");
|
||||
}
|
||||
if !parts.headers.contains_key("sec-websocket-version") {
|
||||
raw_request.push_str("sec-websocket-version: 13\r\n");
|
||||
}
|
||||
if !parts.headers.contains_key("sec-websocket-key") {
|
||||
raw_request.push_str("sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ==\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Add standard reverse-proxy headers (X-Forwarded-*)
|
||||
{
|
||||
let original_host = parts.headers.get("host")
|
||||
@@ -1680,8 +1878,12 @@ impl HttpProxyService {
|
||||
));
|
||||
}
|
||||
|
||||
let mut client_resp = Response::builder()
|
||||
.status(StatusCode::SWITCHING_PROTOCOLS);
|
||||
// H1: 101 Switching Protocols; H2: 200 OK (RFC 8441 — hyper requires 2xx for Extended CONNECT upgrade)
|
||||
let mut client_resp = if is_h2 {
|
||||
Response::builder().status(StatusCode::OK)
|
||||
} else {
|
||||
Response::builder().status(StatusCode::SWITCHING_PROTOCOLS)
|
||||
};
|
||||
|
||||
if let Some(resp_headers) = client_resp.headers_mut() {
|
||||
for line in response_str.lines().skip(1) {
|
||||
@@ -1692,6 +1894,17 @@ impl HttpProxyService {
|
||||
if let Some((name, value)) = line.split_once(':') {
|
||||
let name = name.trim();
|
||||
let value = value.trim();
|
||||
// Skip hop-by-hop headers for H2 (forbidden by RFC 9113 §8.2.2)
|
||||
if is_h2 {
|
||||
let name_lower = name.to_lowercase();
|
||||
if name_lower == "upgrade" || name_lower == "connection"
|
||||
|| name_lower == "sec-websocket-accept"
|
||||
|| name_lower == "transfer-encoding"
|
||||
|| name_lower == "keep-alive"
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Ok(header_name) = hyper::header::HeaderName::from_bytes(name.as_bytes()) {
|
||||
if let Ok(header_value) = hyper::header::HeaderValue::from_str(value) {
|
||||
resp_headers.insert(header_name, header_value);
|
||||
@@ -1732,48 +1945,89 @@ impl HttpProxyService {
|
||||
let last_activity = Arc::new(AtomicU64::new(0));
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||
// aborting tasks, so the copy loops can shut down gracefully (TLS close_notify).
|
||||
let ws_cancel = CancellationToken::new();
|
||||
|
||||
// For H2 WebSocket: also update the connection-level activity tracker
|
||||
// to prevent the idle watchdog from killing the H2 connection
|
||||
let conn_act_c2u = conn_activity.as_ref().map(|ca| (Arc::clone(&ca.last_activity), ca.start));
|
||||
let conn_act_u2c = conn_activity.as_ref().map(|ca| (Arc::clone(&ca.last_activity), ca.start));
|
||||
|
||||
let la1 = Arc::clone(&last_activity);
|
||||
let metrics_c2u = Arc::clone(&metrics);
|
||||
let route_c2u = route_id_owned.clone();
|
||||
let ip_c2u = source_ip_owned.clone();
|
||||
let wsc1 = ws_cancel.clone();
|
||||
let c2u = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match cr.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = cr.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = wsc1.cancelled() => break,
|
||||
};
|
||||
if uw.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
}
|
||||
total += n as u64;
|
||||
metrics_c2u.record_bytes(n as u64, 0, route_c2u.as_deref(), Some(&ip_c2u));
|
||||
la1.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
if let Some((ref ca, ca_start)) = conn_act_c2u {
|
||||
ca.store(ca_start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
let _ = uw.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
uw.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
let la2 = Arc::clone(&last_activity);
|
||||
let metrics_u2c = Arc::clone(&metrics);
|
||||
let route_u2c = route_id_owned.clone();
|
||||
let ip_u2c = source_ip_owned.clone();
|
||||
let wsc2 = ws_cancel.clone();
|
||||
let u2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match ur.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = ur.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = wsc2.cancelled() => break,
|
||||
};
|
||||
if cw.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
}
|
||||
total += n as u64;
|
||||
metrics_u2c.record_bytes(0, n as u64, route_u2c.as_deref(), Some(&ip_u2c));
|
||||
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
if let Some((ref ca, ca_start)) = conn_act_u2c {
|
||||
ca.store(ca_start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
let _ = cw.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
cw.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
// Watchdog: monitors inactivity, max lifetime, and cancellation
|
||||
// Watchdog: monitors inactivity, max lifetime, and cancellation.
|
||||
// First cancels the per-connection token for graceful shutdown (close_notify/FIN),
|
||||
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||
let la_watch = Arc::clone(&last_activity);
|
||||
let c2u_handle = c2u.abort_handle();
|
||||
let u2c_handle = u2c.abort_handle();
|
||||
let c2u_abort = c2u.abort_handle();
|
||||
let u2c_abort = u2c.abort_handle();
|
||||
let inactivity_timeout = ws_inactivity_timeout;
|
||||
let max_lifetime = ws_max_lifetime;
|
||||
|
||||
@@ -1785,8 +2039,6 @@ impl HttpProxyService {
|
||||
_ = tokio::time::sleep(check_interval) => {}
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("WebSocket tunnel cancelled by shutdown");
|
||||
c2u_handle.abort();
|
||||
u2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1794,8 +2046,6 @@ impl HttpProxyService {
|
||||
// Check max lifetime
|
||||
if start.elapsed() >= max_lifetime {
|
||||
debug!("WebSocket tunnel exceeded max lifetime, closing");
|
||||
c2u_handle.abort();
|
||||
u2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1805,13 +2055,18 @@ impl HttpProxyService {
|
||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||
debug!("WebSocket tunnel inactive for {}ms, closing", elapsed_since_activity);
|
||||
c2u_handle.abort();
|
||||
u2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
last_seen = current;
|
||||
}
|
||||
// Phase 1: Signal copy loops to exit gracefully (allows close_notify/FIN)
|
||||
ws_cancel.cancel();
|
||||
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||
c2u_abort.abort();
|
||||
u2c_abort.abort();
|
||||
});
|
||||
|
||||
let bytes_in = c2u.await.unwrap_or(0);
|
||||
@@ -1821,9 +2076,7 @@ impl HttpProxyService {
|
||||
debug!("WebSocket tunnel closed: {} bytes in, {} bytes out", bytes_in, bytes_out);
|
||||
|
||||
upstream_selector.connection_ended(&upstream_key_owned);
|
||||
if let Some(ref rid) = route_id_owned {
|
||||
metrics.record_bytes(bytes_in, bytes_out, Some(rid.as_str()), Some(&source_ip_owned));
|
||||
}
|
||||
// Bytes already reported per-chunk in the copy loops above
|
||||
});
|
||||
|
||||
let body: BoxBody<Bytes, hyper::Error> = BoxBody::new(
|
||||
|
||||
@@ -97,16 +97,25 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
let last_activity = Arc::new(AtomicU64::new(0));
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||
// aborting tasks, so the copy loops can shut down gracefully (TCP FIN instead
|
||||
// of RST, TLS close_notify if the stream is TLS-wrapped).
|
||||
let conn_cancel = CancellationToken::new();
|
||||
|
||||
let la1 = Arc::clone(&last_activity);
|
||||
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
|
||||
let metrics_c2b = metrics.clone();
|
||||
let cc1 = conn_cancel.clone();
|
||||
let c2b = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = initial_len;
|
||||
loop {
|
||||
let n = match client_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = client_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc1.cancelled() => break,
|
||||
};
|
||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -117,19 +126,27 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = backend_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
backend_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
let la2 = Arc::clone(&last_activity);
|
||||
let metrics_b2c = metrics;
|
||||
let cc2 = conn_cancel.clone();
|
||||
let b2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match backend_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = backend_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc2.cancelled() => break,
|
||||
};
|
||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -140,14 +157,20 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
client_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
// Watchdog: inactivity, max lifetime, and cancellation
|
||||
// Watchdog: inactivity, max lifetime, and cancellation.
|
||||
// First cancels the per-connection token for graceful shutdown (FIN/close_notify),
|
||||
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||
let la_watch = Arc::clone(&last_activity);
|
||||
let c2b_handle = c2b.abort_handle();
|
||||
let b2c_handle = b2c.abort_handle();
|
||||
let c2b_abort = c2b.abort_handle();
|
||||
let b2c_abort = b2c.abort_handle();
|
||||
let watchdog = tokio::spawn(async move {
|
||||
let check_interval = std::time::Duration::from_secs(5);
|
||||
let mut last_seen = 0u64;
|
||||
@@ -155,16 +178,12 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
tokio::select! {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("Connection cancelled by shutdown");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(check_interval) => {
|
||||
// Check max lifetime
|
||||
if start.elapsed() >= max_lifetime {
|
||||
debug!("Connection exceeded max lifetime, closing");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -174,8 +193,6 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -183,6 +200,13 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
}
|
||||
}
|
||||
}
|
||||
// Phase 1: Signal copy loops to exit gracefully (allows FIN/close_notify)
|
||||
conn_cancel.cancel();
|
||||
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||
c2b_abort.abort();
|
||||
b2c_abort.abort();
|
||||
});
|
||||
|
||||
let bytes_in = c2b.await.unwrap_or(0);
|
||||
|
||||
@@ -465,21 +465,19 @@ impl TcpListenerManager {
|
||||
Ok((stream, peer_addr)) => {
|
||||
let ip = peer_addr.ip();
|
||||
|
||||
// Global connection limit — acquire semaphore permit with timeout
|
||||
let permit = match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(5),
|
||||
conn_semaphore.clone().acquire_owned(),
|
||||
).await {
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
// Semaphore closed — shouldn't happen, but be safe
|
||||
debug!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
||||
// Global connection limit — non-blocking check.
|
||||
// MUST NOT block the accept loop: a blocking acquire would stall
|
||||
// ALL connections to this port (not just the one over limit), because
|
||||
// listener.accept() is not polled while we await the semaphore.
|
||||
let permit = match conn_semaphore.clone().try_acquire_owned() {
|
||||
Ok(permit) => permit,
|
||||
Err(tokio::sync::TryAcquireError::NoPermits) => {
|
||||
debug!("Global connection limit reached, dropping connection from {}", peer_addr);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout — global limit reached
|
||||
debug!("Global connection limit reached, dropping connection from {}", peer_addr);
|
||||
Err(tokio::sync::TryAcquireError::Closed) => {
|
||||
debug!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
@@ -1396,15 +1394,24 @@ impl TcpListenerManager {
|
||||
let last_activity = Arc::new(AtomicU64::new(0));
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||
// aborting tasks, so the copy loops can shut down gracefully (TLS close_notify
|
||||
// for terminate/reencrypt mode, TCP FIN for passthrough mode).
|
||||
let conn_cancel = CancellationToken::new();
|
||||
|
||||
let la1 = Arc::clone(&last_activity);
|
||||
let metrics_c2b = metrics.clone();
|
||||
let cc1 = conn_cancel.clone();
|
||||
let c2b = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match client_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = client_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc1.cancelled() => break,
|
||||
};
|
||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -1418,19 +1425,27 @@ impl TcpListenerManager {
|
||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = backend_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
backend_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
let la2 = Arc::clone(&last_activity);
|
||||
let metrics_b2c = metrics;
|
||||
let cc2 = conn_cancel.clone();
|
||||
let b2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match backend_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = backend_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc2.cancelled() => break,
|
||||
};
|
||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -1444,14 +1459,20 @@ impl TcpListenerManager {
|
||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
client_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
// Watchdog task: check for inactivity, max lifetime, and cancellation
|
||||
// Watchdog task: check for inactivity, max lifetime, and cancellation.
|
||||
// First cancels the per-connection token for graceful shutdown (close_notify/FIN),
|
||||
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||
let la_watch = Arc::clone(&last_activity);
|
||||
let c2b_handle = c2b.abort_handle();
|
||||
let b2c_handle = b2c.abort_handle();
|
||||
let c2b_abort = c2b.abort_handle();
|
||||
let b2c_abort = b2c.abort_handle();
|
||||
let watchdog = tokio::spawn(async move {
|
||||
let check_interval = std::time::Duration::from_secs(5);
|
||||
let mut last_seen = 0u64;
|
||||
@@ -1459,16 +1480,12 @@ impl TcpListenerManager {
|
||||
tokio::select! {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("Split-stream connection cancelled by shutdown");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(check_interval) => {
|
||||
// Check max lifetime
|
||||
if start.elapsed() >= max_lifetime {
|
||||
debug!("Connection exceeded max lifetime, closing");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1479,8 +1496,6 @@ impl TcpListenerManager {
|
||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1488,6 +1503,13 @@ impl TcpListenerManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Phase 1: Signal copy loops to exit gracefully (allows close_notify/FIN)
|
||||
conn_cancel.cancel();
|
||||
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||
c2b_abort.abort();
|
||||
b2c_abort.abort();
|
||||
});
|
||||
|
||||
let bytes_in = c2b.await.unwrap_or(0);
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartproxy',
|
||||
version: '25.10.3',
|
||||
version: '25.11.6',
|
||||
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user