Compare commits

..

16 Commits

Author SHA1 Message Date
91832c368d v25.11.24
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 16:47:57 +00:00
c9d0fccb2d fix(rustproxy-http): improve async static file serving, websocket handshake buffering, and shared metric metadata handling 2026-03-17 16:47:57 +00:00
5dccbbc9d1 v25.11.23
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:22:51 +00:00
92d7113c6c fix(rustproxy-http,rustproxy-metrics): reduce per-frame metrics overhead by batching body byte accounting 2026-03-17 12:22:51 +00:00
8f6bb30367 v25.11.22
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:12:24 +00:00
ef9bac80ff fix(rustproxy-http): reuse healthy HTTP/2 upstream connections after requests with bodies 2026-03-17 12:12:24 +00:00
9c78701038 v25.11.21
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 11:33:34 +00:00
26fd9409a7 fix(rustproxy-http): reuse pooled HTTP/2 connections for requests with and without bodies 2026-03-17 11:33:34 +00:00
cfff128499 v25.11.20
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 01:32:35 +00:00
3baff354bd fix(rustproxy-http): avoid downgrading cached backend protocol on H2 stream errors 2026-03-17 01:32:35 +00:00
c2eacd1b30 v25.11.19
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 20:53:39 +00:00
1fdbfcf0aa fix(rustproxy-http): avoid reusing pooled HTTP/2 connections for requests with bodies to prevent upload flow-control stalls 2026-03-16 20:53:39 +00:00
9b184acc8c v25.11.18
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 17:42:14 +00:00
b475968f4e fix(repo): no changes to commit 2026-03-16 17:42:14 +00:00
878eab6e88 v25.11.17
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 14:30:43 +00:00
77abe0804d fix(rustproxy-http): prevent stale HTTP/2 connection drivers from evicting newer pooled connections 2026-03-16 14:30:43 +00:00
7 changed files with 351 additions and 202 deletions

View File

@@ -1,5 +1,55 @@
# Changelog
## 2026-03-17 - 25.11.24 - fix(rustproxy-http)
improve async static file serving, websocket handshake buffering, and shared metric metadata handling
- convert static file serving to async filesystem operations and await directory/file checks
- preserve and forward bytes read past the WebSocket handshake header terminator to avoid dropping buffered upstream data
- reuse Arc<str> values for route and source identifiers across counting bodies and metric reporting
- standardize backend key propagation across H1/H2 forwarding, retry, and fallback paths for consistent logging and metrics
## 2026-03-17 - 25.11.23 - fix(rustproxy-http,rustproxy-metrics)
reduce per-frame metrics overhead by batching body byte accounting
- Buffer HTTP body byte counts and flush them every 64 KB, at end of stream, and on drop to keep totals accurate while preserving throughput sampling.
- Skip zero-value counter updates in metrics collection to avoid unnecessary atomic and DashMap operations for the unused direction.
## 2026-03-17 - 25.11.22 - fix(rustproxy-http)
reuse healthy HTTP/2 upstream connections after requests with bodies
- Registers successful HTTP/2 connections in the pool regardless of whether the proxied request included a body
- Continues to avoid pooling upstream connections that returned 502 Bad Gateway responses
## 2026-03-17 - 25.11.21 - fix(rustproxy-http)
reuse pooled HTTP/2 connections for requests with and without bodies
- remove the bodyless-request restriction from HTTP/2 pool checkout
- always return successful HTTP/2 senders to the connection pool after requests
## 2026-03-17 - 25.11.20 - fix(rustproxy-http)
avoid downgrading cached backend protocol on H2 stream errors
- Treat HTTP/2 stream-level failures as retryable request errors instead of evidence that the backend only supports HTTP/1.1
- Keep protocol cache entries unchanged after successful H2 handshakes so future requests continue using HTTP/2
- Lower log severity for this fallback path from warning to debug while still recording backend H2 failure metrics
## 2026-03-16 - 25.11.19 - fix(rustproxy-http)
avoid reusing pooled HTTP/2 connections for requests with bodies to prevent upload flow-control stalls
- Limit HTTP/2 pool checkout to bodyless requests such as GET, HEAD, and DELETE
- Skip re-registering HTTP/2 connections in the pool after requests that send a body
- Prevent stalled uploads caused by depleted connection-level flow control windows on reused HTTP/2 connections
## 2026-03-16 - 25.11.18 - fix(repo)
no changes to commit
## 2026-03-16 - 25.11.17 - fix(rustproxy-http)
prevent stale HTTP/2 connection drivers from evicting newer pooled connections
- add generation IDs to pooled HTTP/2 senders so pool removal only affects the matching connection
- update HTTP/2 proxy and retry paths to register generation-tagged connections and skip eviction before registration completes
## 2026-03-16 - 25.11.16 - fix(repo)
no changes to commit

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/smartproxy",
"version": "25.11.16",
"version": "25.11.24",
"private": false,
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
"main": "dist_ts/index.js",

View File

@@ -4,13 +4,13 @@
//! HTTP/2 connections are multiplexed (clone the sender for each request).
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use bytes::Bytes;
use dashmap::DashMap;
use http_body_util::combinators::BoxBody;
use hyper::client::conn::{http1, http2};
// No per-request logging in the pool — only log on actual failures (in proxy_service.rs)
/// Maximum idle connections per backend key.
const MAX_IDLE_PER_KEY: usize = 16;
@@ -38,10 +38,13 @@ struct IdleH1 {
idle_since: Instant,
}
/// A pooled HTTP/2 sender (multiplexed, Clone-able).
/// A pooled HTTP/2 sender (multiplexed, Clone-able) with a generation tag.
struct PooledH2 {
sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
created_at: Instant,
/// Unique generation ID. Connection drivers use this to only remove their OWN
/// entry, preventing phantom eviction when multiple connections share the same key.
generation: u64,
}
/// Backend connection pool.
@@ -50,6 +53,8 @@ pub struct ConnectionPool {
h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>>,
/// HTTP/2 multiplexed connections indexed by backend key.
h2_pool: Arc<DashMap<PoolKey, PooledH2>>,
/// Monotonic generation counter for H2 pool entries.
h2_generation: AtomicU64,
/// Handle for the background eviction task.
eviction_handle: Option<tokio::task::JoinHandle<()>>,
}
@@ -69,6 +74,7 @@ impl ConnectionPool {
Self {
h1_pool,
h2_pool,
h2_generation: AtomicU64::new(0),
eviction_handle: Some(eviction_handle),
}
}
@@ -132,22 +138,39 @@ impl ConnectionPool {
None
}
/// Remove a dead HTTP/2 sender from the pool.
/// Remove a dead HTTP/2 sender from the pool (unconditional).
/// Called when `send_request` fails to prevent subsequent requests from reusing the stale sender.
pub fn remove_h2(&self, key: &PoolKey) {
self.h2_pool.remove(key);
}
/// Register an HTTP/2 sender in the pool. Since h2 is multiplexed,
/// only one sender per key is stored (it's Clone-able).
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) {
/// Remove an HTTP/2 sender ONLY if the current entry has the expected generation.
/// This prevents phantom eviction: when multiple connections share the same key,
/// an old connection's driver won't accidentally remove a newer connection's entry.
pub fn remove_h2_if_generation(&self, key: &PoolKey, expected_gen: u64) {
if let Some(entry) = self.h2_pool.get(key) {
if entry.value().generation == expected_gen {
drop(entry); // release DashMap ref before remove
self.h2_pool.remove(key);
}
// else: a newer connection replaced ours — don't touch it
}
}
/// Register an HTTP/2 sender in the pool. Returns the generation ID for this entry.
/// The caller should pass this generation to the connection driver so it can use
/// `remove_h2_if_generation` instead of `remove_h2` to avoid phantom eviction.
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) -> u64 {
let gen = self.h2_generation.fetch_add(1, Ordering::Relaxed);
if sender.is_closed() {
return;
return gen;
}
self.h2_pool.insert(key, PooledH2 {
sender,
created_at: Instant::now(),
generation: gen,
});
gen
}
/// Background eviction loop — runs every EVICTION_INTERVAL to remove stale connections.

View File

@@ -9,19 +9,28 @@ use bytes::Bytes;
use http_body::Frame;
use rustproxy_metrics::MetricsCollector;
/// Flush accumulated bytes to the metrics collector every 64 KB.
/// This reduces per-frame DashMap shard-locked reads from ~15 to ~1 per 4 frames
/// (assuming typical 16 KB upload frames). The 1 Hz throughput sampler still sees
/// data within one sampling period even at low transfer rates.
const BYTE_FLUSH_THRESHOLD: u64 = 65_536;
/// Wraps any `http_body::Body` and counts data bytes passing through.
///
/// Each chunk is reported to the `MetricsCollector` immediately so that
/// the throughput tracker (sampled at 1 Hz) reflects real-time data flow.
/// Bytes are accumulated and flushed to the `MetricsCollector` every
/// [`BYTE_FLUSH_THRESHOLD`] bytes (and on Drop) so the throughput tracker
/// (sampled at 1 Hz) reflects real-time data flow without per-frame overhead.
///
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
pub struct CountingBody<B> {
inner: Pin<Box<B>>,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
source_ip: Option<String>,
route_id: Option<Arc<str>>,
source_ip: Option<Arc<str>>,
/// Whether we count bytes as "in" (request body) or "out" (response body).
direction: Direction,
/// Accumulated bytes not yet flushed to the metrics collector.
pending_bytes: u64,
/// Optional connection-level activity tracker. When set, poll_frame updates this
/// to keep the idle watchdog alive during active body streaming (uploads/downloads).
connection_activity: Option<Arc<AtomicU64>>,
@@ -47,8 +56,8 @@ impl<B> CountingBody<B> {
pub fn new(
inner: B,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
source_ip: Option<String>,
route_id: Option<Arc<str>>,
source_ip: Option<Arc<str>>,
direction: Direction,
) -> Self {
Self {
@@ -57,6 +66,7 @@ impl<B> CountingBody<B> {
route_id,
source_ip,
direction,
pending_bytes: 0,
connection_activity: None,
activity_start: None,
active_requests: None,
@@ -81,14 +91,19 @@ impl<B> CountingBody<B> {
self
}
/// Report a chunk of bytes immediately to the metrics collector.
/// Flush accumulated bytes to the metrics collector.
#[inline]
fn report_chunk(&self, len: u64) {
fn flush_pending(&mut self) {
if self.pending_bytes == 0 {
return;
}
let bytes = self.pending_bytes;
self.pending_bytes = 0;
let route_id = self.route_id.as_deref();
let source_ip = self.source_ip.as_deref();
match self.direction {
Direction::In => self.metrics.record_bytes(len, 0, route_id, source_ip),
Direction::Out => self.metrics.record_bytes(0, len, route_id, source_ip),
Direction::In => self.metrics.record_bytes(bytes, 0, route_id, source_ip),
Direction::Out => self.metrics.record_bytes(0, bytes, route_id, source_ip),
}
}
}
@@ -113,9 +128,12 @@ where
Poll::Ready(Some(Ok(frame))) => {
if let Some(data) = frame.data_ref() {
let len = data.len() as u64;
// Report bytes immediately so the 1 Hz throughput sampler sees them
this.report_chunk(len);
// Keep the connection-level idle watchdog alive during body streaming
this.pending_bytes += len;
if this.pending_bytes >= BYTE_FLUSH_THRESHOLD {
this.flush_pending();
}
// Keep the connection-level idle watchdog alive on every frame
// (this is just one atomic store — cheap enough per-frame)
if let (Some(activity), Some(start)) = (&this.connection_activity, &this.activity_start) {
activity.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
}
@@ -123,7 +141,11 @@ where
Poll::Ready(Some(Ok(frame)))
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(None) => {
// End of stream — flush any remaining bytes
this.flush_pending();
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
}
}
@@ -139,6 +161,8 @@ where
impl<B> Drop for CountingBody<B> {
fn drop(&mut self) {
// Flush any remaining accumulated bytes so totals stay accurate
self.flush_pending();
// Decrement the active-request counter so the HTTP idle watchdog
// knows this response body is no longer streaming.
if let Some(ref counter) = self.active_requests {

View File

@@ -502,7 +502,7 @@ impl HttpProxyService {
// Check for static file serving
if let Some(ref advanced) = route_match.route.action.advanced {
if let Some(ref static_files) = advanced.static_files {
return Ok(Self::serve_static_file(&path, static_files));
return Ok(Self::serve_static_file(&path, static_files).await);
}
}
@@ -615,11 +615,10 @@ impl HttpProxyService {
};
// X-Forwarded-For: append client IP to existing chain
let client_ip = peer_addr.ip().to_string();
let xff_value = if let Some(existing) = upstream_headers.get("x-forwarded-for") {
format!("{}, {}", existing.to_str().unwrap_or(""), client_ip)
format!("{}, {}", existing.to_str().unwrap_or(""), ip_str)
} else {
client_ip
ip_str.clone()
};
if let Ok(val) = hyper::header::HeaderValue::from_str(&xff_value) {
upstream_headers.insert(
@@ -677,11 +676,9 @@ impl HttpProxyService {
h2: use_h2,
};
// H2 pool checkout with async readiness validation.
// checkout_h2 does synchronous is_closed()/is_ready() checks, but these
// reflect cached state — the H2 connection driver (a separate tokio task)
// may not have processed a pending GOAWAY/RST yet. The ready().await
// forces the runtime to yield, giving the driver a chance to detect failures.
// H2 pool checkout — reuse pooled connections for all requests.
// The h2 crate properly replenishes connection-level flow control
// windows via release_capacity() as data is consumed.
if use_h2 {
if let Some((mut sender, age)) = self.connection_pool.checkout_h2(&pool_key) {
match tokio::time::timeout(
@@ -693,7 +690,7 @@ impl HttpProxyService {
self.metrics.set_backend_protocol(&upstream_key, "h2");
let result = self.forward_h2_pooled(
sender, parts, body, upstream_headers, &upstream_path,
route_match.route, route_id, &ip_str, &pool_key, domain_str, &conn_activity,
route_match.route, route_id, &ip_str, &pool_key, domain_str, &conn_activity, &upstream_key,
).await;
self.upstream_selector.connection_ended(&upstream_key);
return result;
@@ -846,19 +843,19 @@ impl HttpProxyService {
self.forward_h2_with_fallback(
io, parts, body, upstream_headers, &upstream_path,
&upstream, route_match.route, route_id, &ip_str, &final_pool_key,
host.clone(), domain_str, &conn_activity,
host.clone(), domain_str, &conn_activity, &upstream_key,
).await
} else {
// Explicit H2 mode: hard-fail on handshake error (preserved behavior)
self.forward_h2(
io, parts, body, upstream_headers, &upstream_path,
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity,
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity, &upstream_key,
).await
}
} else {
self.forward_h1(
io, parts, body, upstream_headers, &upstream_path,
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity,
&upstream, route_match.route, route_id, &ip_str, &final_pool_key, domain_str, &conn_activity, &upstream_key,
).await
};
self.upstream_selector.connection_ended(&upstream_key);
@@ -882,15 +879,14 @@ impl HttpProxyService {
pool_key: &crate::connection_pool::PoolKey,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
// Try pooled H1 connection first — avoids TCP+TLS handshake
if let Some(pooled_sender) = self.connection_pool.checkout_h1(pool_key) {
self.metrics.backend_pool_hit(&backend_key);
self.metrics.backend_pool_hit(backend_key);
return self.forward_h1_with_sender(
pooled_sender, parts, body, upstream_headers, upstream_path,
route, route_id, source_ip, pool_key, domain, conn_activity,
route, route_id, source_ip, domain, conn_activity, backend_key,
).await;
}
@@ -902,7 +898,7 @@ impl HttpProxyService {
Ok(h) => h,
Err(e) => {
error!(backend = %backend_key, domain = %domain, error = %e, "Backend H1 handshake failed");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_handshake_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend handshake failed"));
}
};
@@ -913,7 +909,7 @@ impl HttpProxyService {
}
});
self.forward_h1_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, pool_key, domain, conn_activity).await
self.forward_h1_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, domain, conn_activity, backend_key).await
}
/// Common H1 forwarding logic used by both fresh and pooled paths.
@@ -927,9 +923,9 @@ impl HttpProxyService {
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
pool_key: &crate::connection_pool::PoolKey,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
// Always use HTTP/1.1 for h1 backend connections (h2 incoming requests have version HTTP/2.0)
let mut upstream_req = Request::builder()
@@ -941,12 +937,16 @@ impl HttpProxyService {
*headers = upstream_headers;
}
// Compute Arc<str> once for both request and response CountingBody
let rid: Option<Arc<str>> = route_id.map(Arc::from);
let sip: Arc<str> = Arc::from(source_ip);
// Wrap the request body in CountingBody then box it for the uniform pool type
let counting_req_body = CountingBody::new(
body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
rid.clone(),
Some(Arc::clone(&sip)),
Direction::In,
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
@@ -956,9 +956,8 @@ impl HttpProxyService {
let upstream_response = match sender.send_request(upstream_req).await {
Ok(resp) => resp,
Err(e) => {
let bk = format!("{}:{}", pool_key.host, pool_key.port);
error!(backend = %bk, domain = %domain, error = %e, "Backend H1 request failed");
self.metrics.backend_request_error(&bk);
error!(backend = %backend_key, domain = %domain, error = %e, "Backend H1 request failed");
self.metrics.backend_request_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend request failed"));
}
};
@@ -973,7 +972,7 @@ impl HttpProxyService {
// of large streaming responses (e.g. 352MB Docker layers) takes priority.
drop(sender);
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
self.build_streaming_response(upstream_response, route, rid, sip, conn_activity).await
}
/// Forward request to backend via HTTP/2 with body streaming (fresh connection).
@@ -992,8 +991,8 @@ impl HttpProxyService {
pool_key: &crate::connection_pool::PoolKey,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
let exec = hyper_util::rt::TokioExecutor::new();
let mut h2_builder = hyper::client::conn::http2::Builder::new(exec);
h2_builder
@@ -1009,34 +1008,42 @@ impl HttpProxyService {
Ok(Ok(h)) => h,
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, error_debug = ?e, "Backend H2 handshake failed");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_handshake_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 handshake failed"));
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "Backend H2 handshake timeout");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_handshake_error(backend_key);
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend H2 handshake timeout"));
}
};
// Spawn the H2 connection driver; proactively evict from pool on exit
// so the next request gets a fresh connection instead of a dead sender.
// Shared generation ID: driver reads it after registration sets it.
// Uses u64::MAX as sentinel for "not yet registered" (driver waits/skips eviction).
let gen_holder = Arc::new(std::sync::atomic::AtomicU64::new(u64::MAX));
// Spawn the H2 connection driver; evict from pool on exit using generation-tagged
// removal to prevent phantom eviction when multiple connections share the same key.
{
let pool = Arc::clone(&self.connection_pool);
let key = pool_key.clone();
let gen = Arc::clone(&gen_holder);
tokio::spawn(async move {
if let Err(e) = conn.await {
warn!("HTTP/2 upstream connection error: {} ({:?})", e, e);
}
pool.remove_h2(&key);
let g = gen.load(std::sync::atomic::Ordering::Relaxed);
if g != u64::MAX {
pool.remove_h2_if_generation(&key, g);
}
});
}
// Clone sender for potential pool registration; register only after first request succeeds
let sender_for_pool = sender.clone();
let result = self.forward_h2_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, Some(pool_key), domain, conn_activity).await;
let result = self.forward_h2_with_sender(sender, parts, body, upstream_headers, upstream_path, route, route_id, source_ip, Some(pool_key), domain, conn_activity, backend_key).await;
if matches!(&result, Ok(ref resp) if resp.status() != StatusCode::BAD_GATEWAY) {
self.connection_pool.register_h2(pool_key.clone(), sender_for_pool);
let g = self.connection_pool.register_h2(pool_key.clone(), sender_for_pool);
gen_holder.store(g, std::sync::atomic::Ordering::Relaxed);
}
result
}
@@ -1057,6 +1064,7 @@ impl HttpProxyService {
pool_key: &crate::connection_pool::PoolKey,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
// Save retry state for bodyless requests (cheap: Method is an enum, HeaderMap clones Arc-backed Bytes)
let retry_state = if body.is_end_stream() {
@@ -1067,18 +1075,18 @@ impl HttpProxyService {
let result = self.forward_h2_with_sender(
sender, parts, body, upstream_headers, upstream_path,
route, route_id, source_ip, Some(pool_key), domain, conn_activity,
route, route_id, source_ip, Some(pool_key), domain, conn_activity, backend_key,
).await;
// If the request failed (502) and we can retry with an empty body, do so
let is_502 = matches!(&result, Ok(resp) if resp.status() == StatusCode::BAD_GATEWAY);
if is_502 {
if let Some((method, headers)) = retry_state {
warn!(backend = %format!("{}:{}", pool_key.host, pool_key.port), domain = %domain,
warn!(backend = %backend_key, domain = %domain,
"Stale pooled H2 sender, retrying with fresh connection");
return self.retry_h2_with_fresh_connection(
method, headers, upstream_path,
pool_key, route, route_id, source_ip, domain, conn_activity,
pool_key, route, route_id, source_ip, domain, conn_activity, backend_key,
).await;
}
}
@@ -1098,8 +1106,8 @@ impl HttpProxyService {
source_ip: &str,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
// Establish fresh backend connection
let retry_connect_start = std::time::Instant::now();
@@ -1111,12 +1119,12 @@ impl HttpProxyService {
Ok(Ok(tls)) => BackendStream::Tls(tls),
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H2 retry: TLS connect failed");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable on H2 retry"));
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "H2 retry: TLS connect timeout");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend timeout on H2 retry"));
}
}
@@ -1131,17 +1139,17 @@ impl HttpProxyService {
}
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H2 retry: TCP connect failed");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable on H2 retry"));
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "H2 retry: TCP connect timeout");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend timeout on H2 retry"));
}
}
};
self.metrics.backend_connection_opened(&backend_key, retry_connect_start.elapsed());
self.metrics.backend_connection_opened(backend_key, retry_connect_start.elapsed());
let io = TokioIo::new(backend);
let exec = hyper_util::rt::TokioExecutor::new();
@@ -1159,27 +1167,32 @@ impl HttpProxyService {
Ok(Ok(h)) => h,
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, error_debug = ?e, "H2 retry: handshake failed");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_connection_closed(&backend_key);
self.metrics.backend_handshake_error(backend_key);
self.metrics.backend_connection_closed(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 retry handshake failed"));
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "H2 retry: handshake timeout");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_connection_closed(&backend_key);
self.metrics.backend_handshake_error(backend_key);
self.metrics.backend_connection_closed(backend_key);
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend H2 retry handshake timeout"));
}
};
// Spawn the H2 connection driver; proactively evict from pool on exit.
// Spawn the H2 connection driver with generation-tagged eviction.
let gen_holder = Arc::new(std::sync::atomic::AtomicU64::new(u64::MAX));
{
let pool = Arc::clone(&self.connection_pool);
let key = pool_key.clone();
let gen = Arc::clone(&gen_holder);
tokio::spawn(async move {
if let Err(e) = conn.await {
warn!("H2 retry: upstream connection error: {} ({:?})", e, e);
}
pool.remove_h2(&key);
let g = gen.load(std::sync::atomic::Ordering::Relaxed);
if g != u64::MAX {
pool.remove_h2_if_generation(&key, g);
}
});
}
@@ -1207,17 +1220,18 @@ impl HttpProxyService {
match sender.send_request(upstream_req).await {
Ok(resp) => {
// Register in pool only after request succeeds
self.connection_pool.register_h2(pool_key.clone(), sender);
let result = self.build_streaming_response(resp, route, route_id, source_ip, conn_activity).await;
let g = self.connection_pool.register_h2(pool_key.clone(), sender);
gen_holder.store(g, std::sync::atomic::Ordering::Relaxed);
let result = self.build_streaming_response(resp, route, route_id.map(Arc::from), Arc::from(source_ip), conn_activity).await;
// Close the fresh backend connection (opened above)
self.metrics.backend_connection_closed(&backend_key);
self.metrics.backend_connection_closed(backend_key);
result
}
Err(e) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H2 retry: request failed");
self.metrics.backend_request_error(&backend_key);
self.metrics.backend_request_error(backend_key);
// Close the fresh backend connection (opened above)
self.metrics.backend_connection_closed(&backend_key);
self.metrics.backend_connection_closed(backend_key);
Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 request failed on retry"))
}
}
@@ -1245,6 +1259,7 @@ impl HttpProxyService {
requested_host: Option<String>,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let exec = hyper_util::rt::TokioExecutor::new();
let mut h2_builder = hyper::client::conn::http2::Builder::new(exec);
@@ -1262,14 +1277,13 @@ impl HttpProxyService {
match handshake_result {
Err(_) => {
// H2 handshake timed out — fall back to H1
let bk = format!("{}:{}", upstream.host, upstream.port);
warn!(
backend = %bk,
backend = %backend_key,
domain = %domain,
"H2 handshake timeout, falling back to H1"
);
self.metrics.backend_h2_failure(&bk);
self.metrics.backend_handshake_error(&bk);
self.metrics.backend_h2_failure(backend_key);
self.metrics.backend_handshake_error(backend_key);
let cache_key = crate::protocol_cache::ProtocolCacheKey {
host: upstream.host.clone(),
@@ -1278,7 +1292,7 @@ impl HttpProxyService {
};
self.protocol_cache.insert(cache_key, crate::protocol_cache::DetectedProtocol::H1);
match self.reconnect_backend(upstream, domain).await {
match self.reconnect_backend(upstream, domain, backend_key).await {
Some(fallback_backend) => {
let h1_pool_key = crate::connection_pool::PoolKey {
host: upstream.host.clone(),
@@ -1289,9 +1303,9 @@ impl HttpProxyService {
let fallback_io = TokioIo::new(fallback_backend);
let result = self.forward_h1(
fallback_io, parts, body, upstream_headers, upstream_path,
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity, backend_key,
).await;
self.metrics.backend_connection_closed(&bk);
self.metrics.backend_connection_closed(backend_key);
result
}
None => {
@@ -1300,15 +1314,20 @@ impl HttpProxyService {
}
}
Ok(Ok((mut sender, conn))) => {
// Spawn the H2 connection driver; proactively evict from pool on exit.
// Spawn the H2 connection driver with generation-tagged eviction.
let gen_holder = Arc::new(std::sync::atomic::AtomicU64::new(u64::MAX));
{
let pool = Arc::clone(&self.connection_pool);
let key = pool_key.clone();
let gen = Arc::clone(&gen_holder);
tokio::spawn(async move {
if let Err(e) = conn.await {
warn!("HTTP/2 upstream connection error: {} ({:?})", e, e);
}
pool.remove_h2(&key);
let g = gen.load(std::sync::atomic::Ordering::Relaxed);
if g != u64::MAX {
pool.remove_h2_if_generation(&key, g);
}
});
}
@@ -1337,11 +1356,13 @@ impl HttpProxyService {
*headers = upstream_headers;
}
let rid: Option<Arc<str>> = route_id.map(Arc::from);
let sip: Arc<str> = Arc::from(source_ip);
let counting_req_body = CountingBody::new(
body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
rid.clone(),
Some(Arc::clone(&sip)),
Direction::In,
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
@@ -1349,46 +1370,35 @@ impl HttpProxyService {
match sender.send_request(upstream_req).await {
Ok(upstream_response) => {
// H2 works! Register sender in pool for multiplexed reuse
self.connection_pool.register_h2(pool_key.clone(), sender);
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
let g = self.connection_pool.register_h2(pool_key.clone(), sender);
gen_holder.store(g, std::sync::atomic::Ordering::Relaxed);
self.build_streaming_response(upstream_response, route, rid, sip, conn_activity).await
}
Err(e) => {
// H2 request failed — backend advertises h2 via ALPN but doesn't
// actually speak it. Update cache so future requests use H1.
let bk = format!("{}:{}", upstream.host, upstream.port);
warn!(
backend = %bk,
// H2 request failed on a stream level (e.g. RST_STREAM PROTOCOL_ERROR).
// The H2 handshake succeeded, so the backend genuinely speaks H2 — don't
// poison the protocol cache. Only handshake-level failures (below) should
// downgrade the cache to H1.
debug!(
backend = %backend_key,
domain = %domain,
error = %e,
error_debug = ?e,
"Auto-detect: H2 request failed, falling back to H1"
"H2 stream error, retrying this request as H1"
);
self.metrics.backend_h2_failure(&bk);
let cache_key = crate::protocol_cache::ProtocolCacheKey {
host: upstream.host.clone(),
port: upstream.port,
requested_host: requested_host.clone(),
};
self.protocol_cache.insert(cache_key, crate::protocol_cache::DetectedProtocol::H1);
self.metrics.backend_h2_failure(backend_key);
// Retry as H1 for bodyless requests; return 502 for requests with bodies
if let Some((method, headers)) = retry_state {
match self.reconnect_backend(upstream, domain).await {
match self.reconnect_backend(upstream, domain, backend_key).await {
Some(fallback_backend) => {
let h1_pool_key = crate::connection_pool::PoolKey {
host: upstream.host.clone(),
port: upstream.port,
use_tls: upstream.use_tls,
h2: false,
};
let fallback_io = TokioIo::new(fallback_backend);
let result = self.forward_h1_empty_body(
fallback_io, method, headers, upstream_path,
route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
route, route_id, source_ip, domain, conn_activity, backend_key,
).await;
// Close the reconnected backend connection (opened in reconnect_backend)
self.metrics.backend_connection_closed(&bk);
self.metrics.backend_connection_closed(backend_key);
result
}
None => {
@@ -1404,15 +1414,14 @@ impl HttpProxyService {
Ok(Err(e)) => {
// H2 handshake truly failed — fall back to H1
// Body is NOT consumed yet, so we can retry the full request.
let bk = format!("{}:{}", upstream.host, upstream.port);
warn!(
backend = %bk,
backend = %backend_key,
domain = %domain,
error = %e,
"H2 handshake failed, falling back to H1"
);
self.metrics.backend_h2_failure(&bk);
self.metrics.backend_handshake_error(&bk);
self.metrics.backend_h2_failure(backend_key);
self.metrics.backend_handshake_error(backend_key);
// Update cache to H1 so subsequent requests skip H2
let cache_key = crate::protocol_cache::ProtocolCacheKey {
@@ -1423,7 +1432,7 @@ impl HttpProxyService {
self.protocol_cache.insert(cache_key, crate::protocol_cache::DetectedProtocol::H1);
// Reconnect for H1 (the original io was consumed by the failed h2 handshake)
match self.reconnect_backend(upstream, domain).await {
match self.reconnect_backend(upstream, domain, backend_key).await {
Some(fallback_backend) => {
let h1_pool_key = crate::connection_pool::PoolKey {
host: upstream.host.clone(),
@@ -1434,10 +1443,10 @@ impl HttpProxyService {
let fallback_io = TokioIo::new(fallback_backend);
let result = self.forward_h1(
fallback_io, parts, body, upstream_headers, upstream_path,
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity,
upstream, route, route_id, source_ip, &h1_pool_key, domain, conn_activity, backend_key,
).await;
// Close the reconnected backend connection (opened in reconnect_backend)
self.metrics.backend_connection_closed(&bk);
self.metrics.backend_connection_closed(backend_key);
result
}
None => {
@@ -1459,11 +1468,10 @@ impl HttpProxyService {
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
pool_key: &crate::connection_pool::PoolKey,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let backend_key = format!("{}:{}", pool_key.host, pool_key.port);
let (mut sender, conn): (
hyper::client::conn::http1::SendRequest<BoxBody<Bytes, hyper::Error>>,
hyper::client::conn::http1::Connection<TokioIo<BackendStream>, BoxBody<Bytes, hyper::Error>>,
@@ -1471,7 +1479,7 @@ impl HttpProxyService {
Ok(h) => h,
Err(e) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H1 fallback: handshake failed");
self.metrics.backend_handshake_error(&backend_key);
self.metrics.backend_handshake_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H1 fallback handshake failed"));
}
};
@@ -1500,7 +1508,7 @@ impl HttpProxyService {
Ok(resp) => resp,
Err(e) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H1 fallback: request failed");
self.metrics.backend_request_error(&backend_key);
self.metrics.backend_request_error(backend_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H1 fallback request failed"));
}
};
@@ -1508,7 +1516,7 @@ impl HttpProxyService {
// Don't pool the sender while response body is still streaming (same safety as forward_h1_with_sender)
drop(sender);
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
self.build_streaming_response(upstream_response, route, route_id.map(Arc::from), Arc::from(source_ip), conn_activity).await
}
/// Reconnect to a backend (used for H2→H1 fallback).
@@ -1516,8 +1524,8 @@ impl HttpProxyService {
&self,
upstream: &crate::upstream_selector::UpstreamSelection,
domain: &str,
backend_key: &str,
) -> Option<BackendStream> {
let backend_key = format!("{}:{}", upstream.host, upstream.port);
let reconnect_start = std::time::Instant::now();
if upstream.use_tls {
match tokio::time::timeout(
@@ -1525,17 +1533,17 @@ impl HttpProxyService {
connect_tls_backend(&self.backend_tls_config, &upstream.host, upstream.port),
).await {
Ok(Ok(tls)) => {
self.metrics.backend_connection_opened(&backend_key, reconnect_start.elapsed());
self.metrics.backend_connection_opened(backend_key, reconnect_start.elapsed());
Some(BackendStream::Tls(tls))
}
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H1 fallback: TLS reconnect failed");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
None
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "H1 fallback: TLS reconnect timeout");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
None
}
}
@@ -1549,17 +1557,17 @@ impl HttpProxyService {
let _ = socket2::SockRef::from(&s).set_tcp_keepalive(
&socket2::TcpKeepalive::new().with_time(std::time::Duration::from_secs(60))
);
self.metrics.backend_connection_opened(&backend_key, reconnect_start.elapsed());
self.metrics.backend_connection_opened(backend_key, reconnect_start.elapsed());
Some(BackendStream::Plain(s))
}
Ok(Err(e)) => {
error!(backend = %backend_key, domain = %domain, error = %e, "H1 fallback: TCP reconnect failed");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
None
}
Err(_) => {
error!(backend = %backend_key, domain = %domain, "H1 fallback: TCP reconnect timeout");
self.metrics.backend_connect_error(&backend_key);
self.metrics.backend_connect_error(backend_key);
None
}
}
@@ -1580,6 +1588,7 @@ impl HttpProxyService {
pool_key: Option<&crate::connection_pool::PoolKey>,
domain: &str,
conn_activity: &ConnActivity,
backend_key: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
// Build absolute URI for H2 pseudo-headers (:scheme, :authority)
// Use the requested domain as authority (not backend address) so :authority matches Host header
@@ -1601,12 +1610,16 @@ impl HttpProxyService {
*headers = upstream_headers;
}
// Compute Arc<str> once for both request and response CountingBody
let rid: Option<Arc<str>> = route_id.map(Arc::from);
let sip: Arc<str> = Arc::from(source_ip);
// Wrap the request body in CountingBody then box it for the uniform pool type
let counting_req_body = CountingBody::new(
body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
rid.clone(),
Some(Arc::clone(&sip)),
Direction::In,
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
let boxed_body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_req_body);
@@ -1618,9 +1631,8 @@ impl HttpProxyService {
Err(e) => {
// Evict the dead sender so subsequent requests get fresh connections
if let Some(key) = pool_key {
let bk = format!("{}:{}", key.host, key.port);
error!(backend = %bk, domain = %domain, error = %e, error_debug = ?e, "Backend H2 request failed");
self.metrics.backend_request_error(&bk);
error!(backend = %backend_key, domain = %domain, error = %e, error_debug = ?e, "Backend H2 request failed");
self.metrics.backend_request_error(backend_key);
self.connection_pool.remove_h2(key);
} else {
error!(domain = %domain, error = %e, error_debug = ?e, "Backend H2 request failed");
@@ -1629,7 +1641,7 @@ impl HttpProxyService {
}
};
self.build_streaming_response(upstream_response, route, route_id, source_ip, conn_activity).await
self.build_streaming_response(upstream_response, route, rid, sip, conn_activity).await
}
/// Build the client-facing response from an upstream response, streaming the body.
@@ -1640,8 +1652,8 @@ impl HttpProxyService {
&self,
upstream_response: Response<Incoming>,
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
route_id: Option<Arc<str>>,
source_ip: Arc<str>,
conn_activity: &ConnActivity,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let (resp_parts, resp_body) = upstream_response.into_parts();
@@ -1673,8 +1685,8 @@ impl HttpProxyService {
let counting_body = CountingBody::new(
resp_body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
route_id,
Some(source_ip),
Direction::Out,
).with_connection_activity(Arc::clone(&conn_activity.last_activity), conn_activity.start);
@@ -1893,21 +1905,26 @@ impl HttpProxyService {
}
let mut response_buf = Vec::with_capacity(4096);
let mut temp = [0u8; 1];
let mut read_buf = [0u8; 4096];
let extra_bytes: Vec<u8>;
loop {
match upstream_stream.read(&mut temp).await {
match upstream_stream.read(&mut read_buf).await {
Ok(0) => {
error!("WebSocket: upstream closed before completing handshake");
self.upstream_selector.connection_ended(upstream_key);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend closed"));
}
Ok(_) => {
response_buf.push(temp[0]);
if response_buf.len() >= 4 {
let len = response_buf.len();
if response_buf[len-4..] == *b"\r\n\r\n" {
break;
}
Ok(n) => {
let prev_len = response_buf.len();
response_buf.extend_from_slice(&read_buf[..n]);
// Scan for \r\n\r\n, backing up 3 bytes to handle split across reads
let search_start = prev_len.saturating_sub(3);
if let Some(pos) = response_buf[search_start..].windows(4)
.position(|w| w == b"\r\n\r\n")
{
let header_end = search_start + pos + 4;
extra_bytes = response_buf.split_off(header_end);
break;
}
if response_buf.len() > 8192 {
error!("WebSocket: upstream response headers too large");
@@ -1982,8 +1999,8 @@ impl HttpProxyService {
);
let metrics = Arc::clone(&self.metrics);
let route_id_owned = route_id.map(|s| s.to_string());
let source_ip_owned = source_ip.to_string();
let route_id_owned: Option<Arc<str>> = route_id.map(Arc::from);
let source_ip_owned: Arc<str> = Arc::from(source_ip);
let upstream_selector = self.upstream_selector.clone();
let upstream_key_owned = upstream_key.to_string();
let ws_inactivity_timeout = self.ws_inactivity_timeout;
@@ -2037,7 +2054,7 @@ impl HttpProxyService {
break;
}
total += n as u64;
metrics_c2u.record_bytes(n as u64, 0, route_c2u.as_deref(), Some(&ip_c2u));
metrics_c2u.record_bytes(n as u64, 0, route_c2u.as_deref(), Some(&*ip_c2u));
la1.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some((ref ca, ca_start)) = conn_act_c2u {
ca.store(ca_start.elapsed().as_millis() as u64, Ordering::Relaxed);
@@ -2059,6 +2076,23 @@ impl HttpProxyService {
let u2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
// Forward any bytes buffered past the HTTP header terminator during handshake
if !extra_bytes.is_empty() {
let n = extra_bytes.len();
if cw.write_all(&extra_bytes).await.is_err() {
let _ = tokio::time::timeout(
std::time::Duration::from_secs(2),
cw.shutdown(),
).await;
return 0u64;
}
total += n as u64;
metrics_u2c.record_bytes(0, n as u64, route_u2c.as_deref(), Some(&*ip_u2c));
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some((ref ca, ca_start)) = conn_act_u2c {
ca.store(ca_start.elapsed().as_millis() as u64, Ordering::Relaxed);
}
}
loop {
let n = tokio::select! {
result = ur.read(&mut buf) => match result {
@@ -2071,7 +2105,7 @@ impl HttpProxyService {
break;
}
total += n as u64;
metrics_u2c.record_bytes(0, n as u64, route_u2c.as_deref(), Some(&ip_u2c));
metrics_u2c.record_bytes(0, n as u64, route_u2c.as_deref(), Some(&*ip_u2c));
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some((ref ca, ca_start)) = conn_act_u2c {
ca.store(ca_start.elapsed().as_millis() as u64, Ordering::Relaxed);
@@ -2211,13 +2245,13 @@ impl HttpProxyService {
}
/// Serve a static file from the configured directory.
fn serve_static_file(
async fn serve_static_file(
path: &str,
config: &rustproxy_config::RouteStaticFiles,
) -> Response<BoxBody<Bytes, hyper::Error>> {
use std::path::Path;
use std::path::PathBuf;
let root = Path::new(&config.root);
let root = PathBuf::from(&config.root);
// Sanitize path to prevent directory traversal
let clean_path = path.trim_start_matches('/');
@@ -2226,7 +2260,12 @@ impl HttpProxyService {
let mut file_path = root.join(&clean_path);
// If path points to a directory, try index files
if file_path.is_dir() || clean_path.is_empty() {
let is_dir = if clean_path.is_empty() {
true
} else {
tokio::fs::metadata(&file_path).await.map(|m| m.is_dir()).unwrap_or(false)
};
if is_dir {
let index_files = config.index_files.as_deref()
.or(config.index.as_deref())
.unwrap_or(&[]);
@@ -2240,7 +2279,7 @@ impl HttpProxyService {
} else {
file_path.join(index)
};
if candidate.is_file() {
if tokio::fs::metadata(&candidate).await.map(|m| m.is_file()).unwrap_or(false) {
file_path = candidate;
found = true;
break;
@@ -2252,11 +2291,11 @@ impl HttpProxyService {
}
// Ensure the resolved path is within the root (prevent traversal)
let canonical_root = match root.canonicalize() {
let canonical_root = match tokio::fs::canonicalize(&root).await {
Ok(p) => p,
Err(_) => return error_response(StatusCode::NOT_FOUND, "Not found"),
};
let canonical_file = match file_path.canonicalize() {
let canonical_file = match tokio::fs::canonicalize(&file_path).await {
Ok(p) => p,
Err(_) => return error_response(StatusCode::NOT_FOUND, "Not found"),
};
@@ -2270,7 +2309,7 @@ impl HttpProxyService {
}
// Read the file
match std::fs::read(&file_path) {
match tokio::fs::read(&file_path).await {
Ok(content) => {
let content_type = guess_content_type(&file_path);
let mut response = Response::builder()

View File

@@ -259,40 +259,49 @@ impl MetricsCollector {
/// Called per-chunk in the TCP copy loop. Only touches AtomicU64 counters —
/// no Mutex is taken. The throughput trackers are fed during `sample_all()`.
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>, source_ip: Option<&str>) {
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into lock-free pending throughput counters
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
// Short-circuit: only touch counters for the direction that has data.
// CountingBody always calls with one direction zero — skipping the zero
// direction avoids ~50% of DashMap shard-locked reads per call.
if bytes_in > 0 {
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
}
if bytes_out > 0 {
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
}
// Per-route tracking: use get() first (zero-alloc fast path for existing entries),
// fall back to entry() with to_string() only on the rare first-chunk miss.
if let Some(route_id) = route_id {
if let Some(counter) = self.route_bytes_in.get(route_id) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.route_bytes_in.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
if bytes_in > 0 {
if let Some(counter) = self.route_bytes_in.get(route_id) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.route_bytes_in.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
}
}
if let Some(counter) = self.route_bytes_out.get(route_id) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.route_bytes_out.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_out > 0 {
if let Some(counter) = self.route_bytes_out.get(route_id) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.route_bytes_out.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
}
}
// Accumulate into per-route pending throughput counters (lock-free)
if let Some(entry) = self.route_pending_tp.get(route_id) {
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
} else {
let entry = self.route_pending_tp.entry(route_id.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
}
}
@@ -302,30 +311,34 @@ impl MetricsCollector {
// This prevents orphaned entries when record_bytes races with
// connection_closed (which evicts all per-IP data on last close).
if self.ip_connections.contains_key(ip) {
if let Some(counter) = self.ip_bytes_in.get(ip) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.ip_bytes_in.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
if bytes_in > 0 {
if let Some(counter) = self.ip_bytes_in.get(ip) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.ip_bytes_in.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
}
}
if let Some(counter) = self.ip_bytes_out.get(ip) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.ip_bytes_out.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_out > 0 {
if let Some(counter) = self.ip_bytes_out.get(ip) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.ip_bytes_out.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
}
}
// Accumulate into per-IP pending throughput counters (lock-free)
if let Some(entry) = self.ip_pending_tp.get(ip) {
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
} else {
let entry = self.ip_pending_tp.entry(ip.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
}
}
}

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartproxy',
version: '25.11.16',
version: '25.11.24',
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
}