Compare commits
64 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cfa958cf3d | |||
| db2e586da2 | |||
| 91832c368d | |||
| c9d0fccb2d | |||
| 5dccbbc9d1 | |||
| 92d7113c6c | |||
| 8f6bb30367 | |||
| ef9bac80ff | |||
| 9c78701038 | |||
| 26fd9409a7 | |||
| cfff128499 | |||
| 3baff354bd | |||
| c2eacd1b30 | |||
| 1fdbfcf0aa | |||
| 9b184acc8c | |||
| b475968f4e | |||
| 878eab6e88 | |||
| 77abe0804d | |||
| ae0342d018 | |||
| 365981d9cf | |||
| 2cc0ff0030 | |||
| 72935e7ee0 | |||
| 61db285e04 | |||
| d165829022 | |||
| 5e6cf391ab | |||
| 2b1a21c599 | |||
| b8e1c9f3cf | |||
| c65369540c | |||
| 59e108edbd | |||
| 1e2ca68fc7 | |||
| 4c76a9f9f3 | |||
| 8e76c42cea | |||
| b1f4181139 | |||
| a1b8d40011 | |||
| 246b44913e | |||
| b3d4949225 | |||
| 0475e6b442 | |||
| 8cdb95a853 | |||
| 8cefe9d66a | |||
| d5e08c83fc | |||
| 1247f48856 | |||
| e3bae4c399 | |||
| 0930f7e10c | |||
| aa9e6dfd94 | |||
| 211d5cf835 | |||
| 2ce1899337 | |||
| 2e2ffc4485 | |||
| da26816af5 | |||
| d598bffec3 | |||
| a9dbccfaff | |||
| 386859a2bd | |||
| 2b58615d24 | |||
| 95adf56e52 | |||
| c96a493fb6 | |||
| b92587cc16 | |||
| b3dc0a6db2 | |||
| de3b8d3f58 | |||
| 75089ec975 | |||
| b106db932b | |||
| fb0c0dcc31 | |||
| 61b67b91a0 | |||
| fc64f5a95e | |||
| 90b83a9dbe | |||
| 508621e231 |
203
changelog.md
203
changelog.md
@@ -1,5 +1,208 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 2026-03-19 - 25.12.0 - feat(proxy-protocol)
|
||||||
|
add PROXY protocol v2 support to the Rust passthrough listener and streamline TypeScript proxy protocol exports
|
||||||
|
|
||||||
|
- detect and parse PROXY protocol v2 headers in the Rust TCP listener, including TCP and UDP address families
|
||||||
|
- add Rust v2 header generation, incomplete-header handling, and broader parser test coverage
|
||||||
|
- remove deprecated TypeScript proxy protocol parser exports and tests, leaving shared type definitions only
|
||||||
|
|
||||||
|
## 2026-03-17 - 25.11.24 - fix(rustproxy-http)
|
||||||
|
improve async static file serving, websocket handshake buffering, and shared metric metadata handling
|
||||||
|
|
||||||
|
- convert static file serving to async filesystem operations and await directory/file checks
|
||||||
|
- preserve and forward bytes read past the WebSocket handshake header terminator to avoid dropping buffered upstream data
|
||||||
|
- reuse Arc<str> values for route and source identifiers across counting bodies and metric reporting
|
||||||
|
- standardize backend key propagation across H1/H2 forwarding, retry, and fallback paths for consistent logging and metrics
|
||||||
|
|
||||||
|
## 2026-03-17 - 25.11.23 - fix(rustproxy-http,rustproxy-metrics)
|
||||||
|
reduce per-frame metrics overhead by batching body byte accounting
|
||||||
|
|
||||||
|
- Buffer HTTP body byte counts and flush them every 64 KB, at end of stream, and on drop to keep totals accurate while preserving throughput sampling.
|
||||||
|
- Skip zero-value counter updates in metrics collection to avoid unnecessary atomic and DashMap operations for the unused direction.
|
||||||
|
|
||||||
|
## 2026-03-17 - 25.11.22 - fix(rustproxy-http)
|
||||||
|
reuse healthy HTTP/2 upstream connections after requests with bodies
|
||||||
|
|
||||||
|
- Registers successful HTTP/2 connections in the pool regardless of whether the proxied request included a body
|
||||||
|
- Continues to avoid pooling upstream connections that returned 502 Bad Gateway responses
|
||||||
|
|
||||||
|
## 2026-03-17 - 25.11.21 - fix(rustproxy-http)
|
||||||
|
reuse pooled HTTP/2 connections for requests with and without bodies
|
||||||
|
|
||||||
|
- remove the bodyless-request restriction from HTTP/2 pool checkout
|
||||||
|
- always return successful HTTP/2 senders to the connection pool after requests
|
||||||
|
|
||||||
|
## 2026-03-17 - 25.11.20 - fix(rustproxy-http)
|
||||||
|
avoid downgrading cached backend protocol on H2 stream errors
|
||||||
|
|
||||||
|
- Treat HTTP/2 stream-level failures as retryable request errors instead of evidence that the backend only supports HTTP/1.1
|
||||||
|
- Keep protocol cache entries unchanged after successful H2 handshakes so future requests continue using HTTP/2
|
||||||
|
- Lower log severity for this fallback path from warning to debug while still recording backend H2 failure metrics
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.19 - fix(rustproxy-http)
|
||||||
|
avoid reusing pooled HTTP/2 connections for requests with bodies to prevent upload flow-control stalls
|
||||||
|
|
||||||
|
- Limit HTTP/2 pool checkout to bodyless requests such as GET, HEAD, and DELETE
|
||||||
|
- Skip re-registering HTTP/2 connections in the pool after requests that send a body
|
||||||
|
- Prevent stalled uploads caused by depleted connection-level flow control windows on reused HTTP/2 connections
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.18 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.17 - fix(rustproxy-http)
|
||||||
|
prevent stale HTTP/2 connection drivers from evicting newer pooled connections
|
||||||
|
|
||||||
|
- add generation IDs to pooled HTTP/2 senders so pool removal only affects the matching connection
|
||||||
|
- update HTTP/2 proxy and retry paths to register generation-tagged connections and skip eviction before registration completes
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.16 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.15 - fix(rustproxy-http)
|
||||||
|
implement vectored write support for backend streams
|
||||||
|
|
||||||
|
- Add poll_write_vectored forwarding for both plain and TLS backend stream variants
|
||||||
|
- Expose is_write_vectored so the proxy can correctly report vectored write capability
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.14 - fix(rustproxy-http)
|
||||||
|
forward vectored write support in ShutdownOnDrop AsyncWrite wrapper
|
||||||
|
|
||||||
|
- Implements poll_write_vectored by delegating to the wrapped writer
|
||||||
|
- Exposes is_write_vectored so the wrapper preserves underlying AsyncWrite capabilities
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.13 - fix(rustproxy-http)
|
||||||
|
remove hot-path debug logging from HTTP/1 connection pool hits
|
||||||
|
|
||||||
|
- Stops emitting debug logs when reusing HTTP/1 idle connections in the connection pool.
|
||||||
|
- Keeps pool hit behavior unchanged while reducing overhead on a frequently executed path.
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.12 - fix(rustproxy-http)
|
||||||
|
remove connection pool hit logging and keep logging limited to actual failures
|
||||||
|
|
||||||
|
- Removes debug and warning logs for HTTP/2 connection pool hits and age checks.
|
||||||
|
- Keeps pool behavior unchanged while reducing noisy per-request logging in the Rust HTTP proxy layer.
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.11 - fix(rustproxy-http)
|
||||||
|
improve HTTP/2 proxy error logging with warning-level connection failures and debug error details
|
||||||
|
|
||||||
|
- Adds debug-formatted error fields to HTTP/2 handshake, retry, fallback, and request failure logs
|
||||||
|
- Promotes upstream HTTP/2 connection error logs from debug to warn to improve operational visibility
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.10 - fix(rustproxy-http)
|
||||||
|
validate pooled HTTP/2 connections asynchronously before reuse and evict stale senders
|
||||||
|
|
||||||
|
- Add an async ready() check with a 500ms timeout before reusing pooled HTTP/2 senders to catch GOAWAY/RST states before forwarding requests
|
||||||
|
- Return connection age from the HTTP/2 pool checkout path and log warnings for older pooled connections
|
||||||
|
- Evict pooled HTTP/2 senders when they are closed, exceed max age, fail readiness validation, or time out during readiness checks
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.9 - fix(rustproxy-routing)
|
||||||
|
reduce hot-path allocations in routing, metrics, and proxy protocol handling
|
||||||
|
|
||||||
|
- skip HTTP header map construction unless a route on the current port uses header matching
|
||||||
|
- reuse computed client IP strings during HTTP route matching to avoid redundant allocations
|
||||||
|
- optimize per-route and per-IP metric updates with get-first lookups to avoid unnecessary String creation on existing entries
|
||||||
|
- replace heap-allocated PROXY protocol peek and discard buffers with stack-allocated buffers in the TCP listener
|
||||||
|
- improve domain matcher case-insensitive wildcard checks while preserving glob fallback behavior
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.8 - fix(rustproxy-http)
|
||||||
|
prevent premature idle timeouts during streamed HTTP responses and ensure TLS close_notify is sent on dropped connections
|
||||||
|
|
||||||
|
- track active streaming response bodies so the HTTP idle watchdog does not close connections mid-transfer
|
||||||
|
- add a ShutdownOnDrop wrapper for TLS-terminated HTTP connections to send shutdown on drop and avoid improperly terminated TLS sessions
|
||||||
|
- apply the shutdown wrapper in passthrough TLS terminate and terminate+reencrypt HTTP handling
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.7 - fix(rustproxy)
|
||||||
|
prevent TLS route reload certificate mismatches and tighten passthrough connection handling
|
||||||
|
|
||||||
|
- Load updated TLS configs before swapping the route manager so newly visible routes always have their certificates available.
|
||||||
|
- Add timeouts when peeking initial decrypted data after TLS handshake to avoid leaked idle connections.
|
||||||
|
- Raise dropped, blocked, unmatched, and errored passthrough connection events from debug to warn for better operational visibility.
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.6 - fix(rustproxy-http,rustproxy-passthrough)
|
||||||
|
improve upstream connection cleanup and graceful tunnel shutdown
|
||||||
|
|
||||||
|
- Evict pooled HTTP/2 connections when their driver exits and shorten the maximum pooled H2 age to reduce reuse of stale upstream connections.
|
||||||
|
- Strip hop-by-hop headers from backend responses before forwarding to HTTP/2 clients to avoid invalid H2 response handling.
|
||||||
|
- Replace immediate task aborts in WebSocket and TCP tunnel watchdogs with cancellation-driven graceful shutdown plus timed fallback aborts.
|
||||||
|
- Use non-blocking semaphore acquisition in the TCP listener so connection limits do not stall the accept loop for the entire port.
|
||||||
|
|
||||||
|
## 2026-03-16 - 25.11.5 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-15 - 25.11.4 - fix(rustproxy-http)
|
||||||
|
report streamed HTTP and WebSocket bytes per chunk for real-time throughput metrics
|
||||||
|
|
||||||
|
- Update CountingBody to record bytes immediately on each data frame instead of aggregating until completion or drop
|
||||||
|
- Record WebSocket tunnel traffic inside both copy loops and remove the final aggregate byte report to keep throughput metrics current
|
||||||
|
|
||||||
|
## 2026-03-15 - 25.11.3 - fix(repo)
|
||||||
|
no changes to commit
|
||||||
|
|
||||||
|
|
||||||
|
## 2026-03-15 - 25.11.2 - fix(rustproxy-http)
|
||||||
|
avoid reusing HTTP/1 senders during streaming responses and relax HTTP/2 keep-alive timeouts
|
||||||
|
|
||||||
|
- Stop returning HTTP/1 senders to the connection pool before upstream response bodies finish streaming to prevent unsafe reuse on active connections.
|
||||||
|
- Increase HTTP/2 keep-alive timeout from 5 seconds to 30 seconds in proxy connection builders to better support longer-lived backend streams.
|
||||||
|
- Improves reliability for large streaming payloads and backend fallback request handling.
|
||||||
|
|
||||||
|
## 2026-03-15 - 25.11.1 - fix(rustproxy-http)
|
||||||
|
keep connection idle tracking alive during streaming and tune HTTP/2 connection lifetimes
|
||||||
|
|
||||||
|
- Propagate connection activity tracking through HTTP/1, HTTP/2, and WebSocket forwarding so active request and response body streams do not trigger the idle watchdog.
|
||||||
|
- Update CountingBody to refresh connection activity timestamps while data frames are polled during uploads and downloads.
|
||||||
|
- Increase pooled HTTP/2 max age and set explicit HTTP/2 connection window sizes to improve long-lived streaming behavior.
|
||||||
|
|
||||||
|
## 2026-03-15 - 25.11.0 - feat(rustproxy-http)
|
||||||
|
add HTTP/2 Extended CONNECT WebSocket proxy support
|
||||||
|
|
||||||
|
- Enable HTTP/2 CONNECT protocol support on the Hyper auto connection builder
|
||||||
|
- Detect WebSocket requests for both HTTP/1 Upgrade and HTTP/2 Extended CONNECT flows
|
||||||
|
- Translate HTTP/2 WebSocket requests to an HTTP/1.1 backend handshake and return RFC-compliant client responses
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.7 - fix(rustproxy-http)
|
||||||
|
remove Host header from HTTP/2 upstream requests while preserving it for HTTP/1 retries
|
||||||
|
|
||||||
|
- strips the Host header before sending HTTP/2 upstream requests so :authority from the URI is used instead
|
||||||
|
- avoids 400 responses from nginx caused by sending both Host and :authority headers
|
||||||
|
- keeps a cloned header set for bodyless request retries so HTTP/1 fallback still retains the Host header
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.6 - fix(rustproxy-http)
|
||||||
|
use the requested domain as HTTP/2 authority instead of the backend host and port
|
||||||
|
|
||||||
|
- build HTTP/2 absolute URIs from the client-facing domain so the :authority pseudo-header matches the Host header
|
||||||
|
- remove backend port from generated HTTP/2 request URIs and fall back to the upstream host only when no domain is available
|
||||||
|
- apply the authority handling consistently across pooled, inline, and generic upstream request paths
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.5 - fix(rustproxy-http)
|
||||||
|
configure HTTP/2 client builders with a Tokio timer for keep-alive handling
|
||||||
|
|
||||||
|
- Adds TokioTimer to all HTTP/2 client builder instances in proxy_service.
|
||||||
|
- Ensures configured HTTP/2 keep-alive interval and timeout settings have the required timer runtime support.
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.4 - fix(rustproxy-http)
|
||||||
|
stabilize upstream HTTP/2 forwarding and fallback behavior
|
||||||
|
|
||||||
|
- Remove hop-by-hop headers before forwarding requests to HTTP/2 backends to comply with RFC 9113.
|
||||||
|
- Use ALPN-enabled TLS configuration whenever HTTP/2 is possible, including explicit H2 connections and retries.
|
||||||
|
- Add HTTP/2 handshake timeouts, tuned connection settings, and fallback to HTTP/1 when H2 negotiation times out or fails.
|
||||||
|
- Register pooled HTTP/2 senders only after a successful first request to avoid reusing broken connections.
|
||||||
|
- Build absolute URIs for HTTP/2 upstream requests so pseudo-headers such as scheme and authority are derived correctly.
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.3 - fix(rustproxy-http)
|
||||||
|
include request domain in backend proxy error and protocol detection logs
|
||||||
|
|
||||||
|
- Adds domain context to backend TCP/TLS connect, handshake, request failure, retry, and fallback log entries in the Rust HTTP proxy service.
|
||||||
|
- Propagates the resolved host/domain through H1, H2, pooled, and fallback forwarding paths so backend-level diagnostics can be correlated with the original request domain.
|
||||||
|
|
||||||
|
## 2026-03-12 - 25.10.2 - fix(repo)
|
||||||
|
no code changes to release
|
||||||
|
|
||||||
|
|
||||||
## 2026-03-12 - 25.10.1 - fix(repo)
|
## 2026-03-12 - 25.10.1 - fix(repo)
|
||||||
no changes to commit
|
no changes to commit
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartproxy",
|
"name": "@push.rocks/smartproxy",
|
||||||
"version": "25.10.1",
|
"version": "25.12.0",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
|
|||||||
@@ -4,13 +4,13 @@
|
|||||||
//! HTTP/2 connections are multiplexed (clone the sender for each request).
|
//! HTTP/2 connections are multiplexed (clone the sender for each request).
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use http_body_util::combinators::BoxBody;
|
use http_body_util::combinators::BoxBody;
|
||||||
use hyper::client::conn::{http1, http2};
|
use hyper::client::conn::{http1, http2};
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
/// Maximum idle connections per backend key.
|
/// Maximum idle connections per backend key.
|
||||||
const MAX_IDLE_PER_KEY: usize = 16;
|
const MAX_IDLE_PER_KEY: usize = 16;
|
||||||
@@ -20,6 +20,7 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(90);
|
|||||||
const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
|
const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
|
||||||
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
|
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
|
||||||
/// Prevents staleness from backends that close idle connections (e.g. nginx GOAWAY).
|
/// Prevents staleness from backends that close idle connections (e.g. nginx GOAWAY).
|
||||||
|
/// 120s is well within typical server GOAWAY windows (nginx: ~60s idle, envoy: ~60s).
|
||||||
const MAX_H2_AGE: Duration = Duration::from_secs(120);
|
const MAX_H2_AGE: Duration = Duration::from_secs(120);
|
||||||
|
|
||||||
/// Identifies a unique backend endpoint.
|
/// Identifies a unique backend endpoint.
|
||||||
@@ -37,10 +38,13 @@ struct IdleH1 {
|
|||||||
idle_since: Instant,
|
idle_since: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A pooled HTTP/2 sender (multiplexed, Clone-able).
|
/// A pooled HTTP/2 sender (multiplexed, Clone-able) with a generation tag.
|
||||||
struct PooledH2 {
|
struct PooledH2 {
|
||||||
sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
|
sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
|
||||||
created_at: Instant,
|
created_at: Instant,
|
||||||
|
/// Unique generation ID. Connection drivers use this to only remove their OWN
|
||||||
|
/// entry, preventing phantom eviction when multiple connections share the same key.
|
||||||
|
generation: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Backend connection pool.
|
/// Backend connection pool.
|
||||||
@@ -49,6 +53,8 @@ pub struct ConnectionPool {
|
|||||||
h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>>,
|
h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>>,
|
||||||
/// HTTP/2 multiplexed connections indexed by backend key.
|
/// HTTP/2 multiplexed connections indexed by backend key.
|
||||||
h2_pool: Arc<DashMap<PoolKey, PooledH2>>,
|
h2_pool: Arc<DashMap<PoolKey, PooledH2>>,
|
||||||
|
/// Monotonic generation counter for H2 pool entries.
|
||||||
|
h2_generation: AtomicU64,
|
||||||
/// Handle for the background eviction task.
|
/// Handle for the background eviction task.
|
||||||
eviction_handle: Option<tokio::task::JoinHandle<()>>,
|
eviction_handle: Option<tokio::task::JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
@@ -68,6 +74,7 @@ impl ConnectionPool {
|
|||||||
Self {
|
Self {
|
||||||
h1_pool,
|
h1_pool,
|
||||||
h2_pool,
|
h2_pool,
|
||||||
|
h2_generation: AtomicU64::new(0),
|
||||||
eviction_handle: Some(eviction_handle),
|
eviction_handle: Some(eviction_handle),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -81,7 +88,7 @@ impl ConnectionPool {
|
|||||||
while let Some(idle) = idles.pop() {
|
while let Some(idle) = idles.pop() {
|
||||||
// Check if the connection is still alive and ready
|
// Check if the connection is still alive and ready
|
||||||
if idle.idle_since.elapsed() < IDLE_TIMEOUT && idle.sender.is_ready() && !idle.sender.is_closed() {
|
if idle.idle_since.elapsed() < IDLE_TIMEOUT && idle.sender.is_ready() && !idle.sender.is_closed() {
|
||||||
debug!("Pool hit (h1): {}:{}", key.host, key.port);
|
// H1 pool hit — no logging on hot path
|
||||||
return Some(idle.sender);
|
return Some(idle.sender);
|
||||||
}
|
}
|
||||||
// Stale or closed — drop it
|
// Stale or closed — drop it
|
||||||
@@ -114,40 +121,56 @@ impl ConnectionPool {
|
|||||||
|
|
||||||
/// Try to get a cloned HTTP/2 sender for the given key.
|
/// Try to get a cloned HTTP/2 sender for the given key.
|
||||||
/// HTTP/2 senders are Clone-able (multiplexed), so we clone rather than remove.
|
/// HTTP/2 senders are Clone-able (multiplexed), so we clone rather than remove.
|
||||||
pub fn checkout_h2(&self, key: &PoolKey) -> Option<http2::SendRequest<BoxBody<Bytes, hyper::Error>>> {
|
pub fn checkout_h2(&self, key: &PoolKey) -> Option<(http2::SendRequest<BoxBody<Bytes, hyper::Error>>, Duration)> {
|
||||||
let entry = self.h2_pool.get(key)?;
|
let entry = self.h2_pool.get(key)?;
|
||||||
let pooled = entry.value();
|
let pooled = entry.value();
|
||||||
|
let age = pooled.created_at.elapsed();
|
||||||
|
|
||||||
// Check if the h2 connection is still alive and not too old
|
if pooled.sender.is_closed() || age >= MAX_H2_AGE {
|
||||||
if pooled.sender.is_closed() || pooled.created_at.elapsed() >= MAX_H2_AGE {
|
|
||||||
drop(entry);
|
drop(entry);
|
||||||
self.h2_pool.remove(key);
|
self.h2_pool.remove(key);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if pooled.sender.is_ready() {
|
if pooled.sender.is_ready() {
|
||||||
debug!("Pool hit (h2): {}:{}", key.host, key.port);
|
return Some((pooled.sender.clone(), age));
|
||||||
return Some(pooled.sender.clone());
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a dead HTTP/2 sender from the pool.
|
/// Remove a dead HTTP/2 sender from the pool (unconditional).
|
||||||
/// Called when `send_request` fails to prevent subsequent requests from reusing the stale sender.
|
/// Called when `send_request` fails to prevent subsequent requests from reusing the stale sender.
|
||||||
pub fn remove_h2(&self, key: &PoolKey) {
|
pub fn remove_h2(&self, key: &PoolKey) {
|
||||||
self.h2_pool.remove(key);
|
self.h2_pool.remove(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register an HTTP/2 sender in the pool. Since h2 is multiplexed,
|
/// Remove an HTTP/2 sender ONLY if the current entry has the expected generation.
|
||||||
/// only one sender per key is stored (it's Clone-able).
|
/// This prevents phantom eviction: when multiple connections share the same key,
|
||||||
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) {
|
/// an old connection's driver won't accidentally remove a newer connection's entry.
|
||||||
|
pub fn remove_h2_if_generation(&self, key: &PoolKey, expected_gen: u64) {
|
||||||
|
if let Some(entry) = self.h2_pool.get(key) {
|
||||||
|
if entry.value().generation == expected_gen {
|
||||||
|
drop(entry); // release DashMap ref before remove
|
||||||
|
self.h2_pool.remove(key);
|
||||||
|
}
|
||||||
|
// else: a newer connection replaced ours — don't touch it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register an HTTP/2 sender in the pool. Returns the generation ID for this entry.
|
||||||
|
/// The caller should pass this generation to the connection driver so it can use
|
||||||
|
/// `remove_h2_if_generation` instead of `remove_h2` to avoid phantom eviction.
|
||||||
|
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) -> u64 {
|
||||||
|
let gen = self.h2_generation.fetch_add(1, Ordering::Relaxed);
|
||||||
if sender.is_closed() {
|
if sender.is_closed() {
|
||||||
return;
|
return gen;
|
||||||
}
|
}
|
||||||
self.h2_pool.insert(key, PooledH2 {
|
self.h2_pool.insert(key, PooledH2 {
|
||||||
sender,
|
sender,
|
||||||
created_at: Instant::now(),
|
created_at: Instant::now(),
|
||||||
|
generation: gen,
|
||||||
});
|
});
|
||||||
|
gen
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Background eviction loop — runs every EVICTION_INTERVAL to remove stale connections.
|
/// Background eviction loop — runs every EVICTION_INTERVAL to remove stale connections.
|
||||||
|
|||||||
@@ -9,22 +9,37 @@ use bytes::Bytes;
|
|||||||
use http_body::Frame;
|
use http_body::Frame;
|
||||||
use rustproxy_metrics::MetricsCollector;
|
use rustproxy_metrics::MetricsCollector;
|
||||||
|
|
||||||
|
/// Flush accumulated bytes to the metrics collector every 64 KB.
|
||||||
|
/// This reduces per-frame DashMap shard-locked reads from ~15 to ~1 per 4 frames
|
||||||
|
/// (assuming typical 16 KB upload frames). The 1 Hz throughput sampler still sees
|
||||||
|
/// data within one sampling period even at low transfer rates.
|
||||||
|
const BYTE_FLUSH_THRESHOLD: u64 = 65_536;
|
||||||
|
|
||||||
/// Wraps any `http_body::Body` and counts data bytes passing through.
|
/// Wraps any `http_body::Body` and counts data bytes passing through.
|
||||||
///
|
///
|
||||||
/// When the body is fully consumed or dropped, accumulated byte counts
|
/// Bytes are accumulated and flushed to the `MetricsCollector` every
|
||||||
/// are reported to the `MetricsCollector`.
|
/// [`BYTE_FLUSH_THRESHOLD`] bytes (and on Drop) so the throughput tracker
|
||||||
|
/// (sampled at 1 Hz) reflects real-time data flow without per-frame overhead.
|
||||||
///
|
///
|
||||||
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
|
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
|
||||||
pub struct CountingBody<B> {
|
pub struct CountingBody<B> {
|
||||||
inner: Pin<Box<B>>,
|
inner: Pin<Box<B>>,
|
||||||
counted_bytes: AtomicU64,
|
|
||||||
metrics: Arc<MetricsCollector>,
|
metrics: Arc<MetricsCollector>,
|
||||||
route_id: Option<String>,
|
route_id: Option<Arc<str>>,
|
||||||
source_ip: Option<String>,
|
source_ip: Option<Arc<str>>,
|
||||||
/// Whether we count bytes as "in" (request body) or "out" (response body).
|
/// Whether we count bytes as "in" (request body) or "out" (response body).
|
||||||
direction: Direction,
|
direction: Direction,
|
||||||
/// Whether we've already reported the bytes (to avoid double-reporting on drop).
|
/// Accumulated bytes not yet flushed to the metrics collector.
|
||||||
reported: bool,
|
pending_bytes: u64,
|
||||||
|
/// Optional connection-level activity tracker. When set, poll_frame updates this
|
||||||
|
/// to keep the idle watchdog alive during active body streaming (uploads/downloads).
|
||||||
|
connection_activity: Option<Arc<AtomicU64>>,
|
||||||
|
/// Start instant for computing elapsed ms for connection_activity.
|
||||||
|
activity_start: Option<std::time::Instant>,
|
||||||
|
/// Optional active-request counter. When set, CountingBody increments on creation
|
||||||
|
/// and decrements on Drop, keeping the HTTP idle watchdog aware that a response
|
||||||
|
/// body is still streaming (even after the request handler has returned).
|
||||||
|
active_requests: Option<Arc<AtomicU64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Which direction the bytes flow.
|
/// Which direction the bytes flow.
|
||||||
@@ -41,33 +56,49 @@ impl<B> CountingBody<B> {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
inner: B,
|
inner: B,
|
||||||
metrics: Arc<MetricsCollector>,
|
metrics: Arc<MetricsCollector>,
|
||||||
route_id: Option<String>,
|
route_id: Option<Arc<str>>,
|
||||||
source_ip: Option<String>,
|
source_ip: Option<Arc<str>>,
|
||||||
direction: Direction,
|
direction: Direction,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Box::pin(inner),
|
inner: Box::pin(inner),
|
||||||
counted_bytes: AtomicU64::new(0),
|
|
||||||
metrics,
|
metrics,
|
||||||
route_id,
|
route_id,
|
||||||
source_ip,
|
source_ip,
|
||||||
direction,
|
direction,
|
||||||
reported: false,
|
pending_bytes: 0,
|
||||||
|
connection_activity: None,
|
||||||
|
activity_start: None,
|
||||||
|
active_requests: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report accumulated bytes to the metrics collector.
|
/// Set the connection-level activity tracker. When set, each data frame
|
||||||
fn report(&mut self) {
|
/// updates this timestamp to prevent the idle watchdog from killing the
|
||||||
if self.reported {
|
/// connection during active body streaming.
|
||||||
|
pub fn with_connection_activity(mut self, activity: Arc<AtomicU64>, start: std::time::Instant) -> Self {
|
||||||
|
self.connection_activity = Some(activity);
|
||||||
|
self.activity_start = Some(start);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the active-request counter for the HTTP idle watchdog.
|
||||||
|
/// CountingBody increments on creation and decrements on Drop, ensuring the
|
||||||
|
/// idle watchdog sees an "active request" while the response body streams.
|
||||||
|
pub fn with_active_requests(mut self, counter: Arc<AtomicU64>) -> Self {
|
||||||
|
counter.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.active_requests = Some(counter);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush accumulated bytes to the metrics collector.
|
||||||
|
#[inline]
|
||||||
|
fn flush_pending(&mut self) {
|
||||||
|
if self.pending_bytes == 0 {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
self.reported = true;
|
let bytes = self.pending_bytes;
|
||||||
|
self.pending_bytes = 0;
|
||||||
let bytes = self.counted_bytes.load(Ordering::Relaxed);
|
|
||||||
if bytes == 0 {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let route_id = self.route_id.as_deref();
|
let route_id = self.route_id.as_deref();
|
||||||
let source_ip = self.source_ip.as_deref();
|
let source_ip = self.source_ip.as_deref();
|
||||||
match self.direction {
|
match self.direction {
|
||||||
@@ -77,12 +108,6 @@ impl<B> CountingBody<B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> Drop for CountingBody<B> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.report();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
|
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
|
||||||
impl<B> Unpin for CountingBody<B> {}
|
impl<B> Unpin for CountingBody<B> {}
|
||||||
|
|
||||||
@@ -102,14 +127,23 @@ where
|
|||||||
match this.inner.as_mut().poll_frame(cx) {
|
match this.inner.as_mut().poll_frame(cx) {
|
||||||
Poll::Ready(Some(Ok(frame))) => {
|
Poll::Ready(Some(Ok(frame))) => {
|
||||||
if let Some(data) = frame.data_ref() {
|
if let Some(data) = frame.data_ref() {
|
||||||
this.counted_bytes.fetch_add(data.len() as u64, Ordering::Relaxed);
|
let len = data.len() as u64;
|
||||||
|
this.pending_bytes += len;
|
||||||
|
if this.pending_bytes >= BYTE_FLUSH_THRESHOLD {
|
||||||
|
this.flush_pending();
|
||||||
|
}
|
||||||
|
// Keep the connection-level idle watchdog alive on every frame
|
||||||
|
// (this is just one atomic store — cheap enough per-frame)
|
||||||
|
if let (Some(activity), Some(start)) = (&this.connection_activity, &this.activity_start) {
|
||||||
|
activity.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Poll::Ready(Some(Ok(frame)))
|
Poll::Ready(Some(Ok(frame)))
|
||||||
}
|
}
|
||||||
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
|
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(None) => {
|
||||||
// Body is fully consumed — report now
|
// End of stream — flush any remaining bytes
|
||||||
this.report();
|
this.flush_pending();
|
||||||
Poll::Ready(None)
|
Poll::Ready(None)
|
||||||
}
|
}
|
||||||
Poll::Pending => Poll::Pending,
|
Poll::Pending => Poll::Pending,
|
||||||
@@ -124,3 +158,15 @@ where
|
|||||||
self.inner.size_hint()
|
self.inner.size_hint()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<B> Drop for CountingBody<B> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Flush any remaining accumulated bytes so totals stay accurate
|
||||||
|
self.flush_pending();
|
||||||
|
// Decrement the active-request counter so the HTTP idle watchdog
|
||||||
|
// knows this response body is no longer streaming.
|
||||||
|
if let Some(ref counter) = self.active_requests {
|
||||||
|
counter.fetch_sub(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ pub mod protocol_cache;
|
|||||||
pub mod proxy_service;
|
pub mod proxy_service;
|
||||||
pub mod request_filter;
|
pub mod request_filter;
|
||||||
pub mod response_filter;
|
pub mod response_filter;
|
||||||
|
pub mod shutdown_on_drop;
|
||||||
pub mod template;
|
pub mod template;
|
||||||
pub mod upstream_selector;
|
pub mod upstream_selector;
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
102
rust/crates/rustproxy-http/src/shutdown_on_drop.rs
Normal file
102
rust/crates/rustproxy-http/src/shutdown_on_drop.rs
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
//! Wrapper that ensures TLS close_notify is sent when the stream is dropped.
|
||||||
|
//!
|
||||||
|
//! When hyper drops an HTTP connection (backend error, timeout, normal H2 close),
|
||||||
|
//! the underlying TLS stream is dropped WITHOUT `shutdown()`. tokio-rustls cannot
|
||||||
|
//! send `close_notify` in Drop (requires async). This wrapper tracks whether
|
||||||
|
//! `poll_shutdown` was called and, if not, spawns a background task to send it.
|
||||||
|
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||||
|
|
||||||
|
/// Wraps an AsyncRead+AsyncWrite stream and ensures `shutdown()` is called when
|
||||||
|
/// dropped, even if the caller (e.g. hyper) doesn't explicitly shut down.
|
||||||
|
///
|
||||||
|
/// This guarantees TLS `close_notify` is sent for TLS-wrapped streams, preventing
|
||||||
|
/// "GnuTLS recv error (-110): The TLS connection was non-properly terminated" errors.
|
||||||
|
pub struct ShutdownOnDrop<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> {
|
||||||
|
inner: Option<S>,
|
||||||
|
shutdown_called: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> ShutdownOnDrop<S> {
|
||||||
|
/// Create a new wrapper around the given stream.
|
||||||
|
pub fn new(stream: S) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Some(stream),
|
||||||
|
shutdown_called: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> AsyncRead for ShutdownOnDrop<S> {
|
||||||
|
fn poll_read(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &mut ReadBuf<'_>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_read(cx, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> AsyncWrite for ShutdownOnDrop<S> {
|
||||||
|
fn poll_write(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_write(cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_write_vectored(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
bufs: &[io::IoSlice<'_>],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_write_vectored(cx, bufs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_write_vectored(&self) -> bool {
|
||||||
|
self.inner.as_ref().unwrap().is_write_vectored()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
let this = self.get_mut();
|
||||||
|
let result = Pin::new(this.inner.as_mut().unwrap()).poll_shutdown(cx);
|
||||||
|
if result.is_ready() {
|
||||||
|
this.shutdown_called = true;
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> Drop for ShutdownOnDrop<S> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// If shutdown was already called (hyper closed properly), nothing to do.
|
||||||
|
// If not (hyper dropped without shutdown — e.g. H2 close, error, timeout),
|
||||||
|
// spawn a background task to send close_notify / TCP FIN.
|
||||||
|
if !self.shutdown_called {
|
||||||
|
if let Some(mut stream) = self.inner.take() {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(2),
|
||||||
|
tokio::io::AsyncWriteExt::shutdown(&mut stream),
|
||||||
|
).await;
|
||||||
|
// stream is dropped here — all resources freed
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -259,51 +259,87 @@ impl MetricsCollector {
|
|||||||
/// Called per-chunk in the TCP copy loop. Only touches AtomicU64 counters —
|
/// Called per-chunk in the TCP copy loop. Only touches AtomicU64 counters —
|
||||||
/// no Mutex is taken. The throughput trackers are fed during `sample_all()`.
|
/// no Mutex is taken. The throughput trackers are fed during `sample_all()`.
|
||||||
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>, source_ip: Option<&str>) {
|
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>, source_ip: Option<&str>) {
|
||||||
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
|
// Short-circuit: only touch counters for the direction that has data.
|
||||||
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
|
// CountingBody always calls with one direction zero — skipping the zero
|
||||||
|
// direction avoids ~50% of DashMap shard-locked reads per call.
|
||||||
// Accumulate into lock-free pending throughput counters
|
if bytes_in > 0 {
|
||||||
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
|
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
|
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
|
}
|
||||||
if let Some(route_id) = route_id {
|
if bytes_out > 0 {
|
||||||
self.route_bytes_in
|
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
.entry(route_id.to_string())
|
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
.or_insert_with(|| AtomicU64::new(0))
|
|
||||||
.fetch_add(bytes_in, Ordering::Relaxed);
|
|
||||||
self.route_bytes_out
|
|
||||||
.entry(route_id.to_string())
|
|
||||||
.or_insert_with(|| AtomicU64::new(0))
|
|
||||||
.fetch_add(bytes_out, Ordering::Relaxed);
|
|
||||||
|
|
||||||
// Accumulate into per-route pending throughput counters (lock-free)
|
|
||||||
let entry = self.route_pending_tp
|
|
||||||
.entry(route_id.to_string())
|
|
||||||
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
|
|
||||||
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
|
|
||||||
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Per-route tracking: use get() first (zero-alloc fast path for existing entries),
|
||||||
|
// fall back to entry() with to_string() only on the rare first-chunk miss.
|
||||||
|
if let Some(route_id) = route_id {
|
||||||
|
if bytes_in > 0 {
|
||||||
|
if let Some(counter) = self.route_bytes_in.get(route_id) {
|
||||||
|
counter.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.route_bytes_in.entry(route_id.to_string())
|
||||||
|
.or_insert_with(|| AtomicU64::new(0))
|
||||||
|
.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bytes_out > 0 {
|
||||||
|
if let Some(counter) = self.route_bytes_out.get(route_id) {
|
||||||
|
counter.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.route_bytes_out.entry(route_id.to_string())
|
||||||
|
.or_insert_with(|| AtomicU64::new(0))
|
||||||
|
.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate into per-route pending throughput counters (lock-free)
|
||||||
|
if let Some(entry) = self.route_pending_tp.get(route_id) {
|
||||||
|
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
|
||||||
|
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
|
||||||
|
} else {
|
||||||
|
let entry = self.route_pending_tp.entry(route_id.to_string())
|
||||||
|
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
|
||||||
|
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
|
||||||
|
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-IP tracking: same get()-first pattern to avoid String allocation on hot path.
|
||||||
if let Some(ip) = source_ip {
|
if let Some(ip) = source_ip {
|
||||||
// Only record per-IP stats if the IP still has active connections.
|
// Only record per-IP stats if the IP still has active connections.
|
||||||
// This prevents orphaned entries when record_bytes races with
|
// This prevents orphaned entries when record_bytes races with
|
||||||
// connection_closed (which evicts all per-IP data on last close).
|
// connection_closed (which evicts all per-IP data on last close).
|
||||||
if self.ip_connections.contains_key(ip) {
|
if self.ip_connections.contains_key(ip) {
|
||||||
self.ip_bytes_in
|
if bytes_in > 0 {
|
||||||
.entry(ip.to_string())
|
if let Some(counter) = self.ip_bytes_in.get(ip) {
|
||||||
.or_insert_with(|| AtomicU64::new(0))
|
counter.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
.fetch_add(bytes_in, Ordering::Relaxed);
|
} else {
|
||||||
self.ip_bytes_out
|
self.ip_bytes_in.entry(ip.to_string())
|
||||||
.entry(ip.to_string())
|
.or_insert_with(|| AtomicU64::new(0))
|
||||||
.or_insert_with(|| AtomicU64::new(0))
|
.fetch_add(bytes_in, Ordering::Relaxed);
|
||||||
.fetch_add(bytes_out, Ordering::Relaxed);
|
}
|
||||||
|
}
|
||||||
|
if bytes_out > 0 {
|
||||||
|
if let Some(counter) = self.ip_bytes_out.get(ip) {
|
||||||
|
counter.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.ip_bytes_out.entry(ip.to_string())
|
||||||
|
.or_insert_with(|| AtomicU64::new(0))
|
||||||
|
.fetch_add(bytes_out, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Accumulate into per-IP pending throughput counters (lock-free)
|
// Accumulate into per-IP pending throughput counters (lock-free)
|
||||||
let entry = self.ip_pending_tp
|
if let Some(entry) = self.ip_pending_tp.get(ip) {
|
||||||
.entry(ip.to_string())
|
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
|
||||||
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
|
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
|
||||||
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
|
} else {
|
||||||
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
|
let entry = self.ip_pending_tp.entry(ip.to_string())
|
||||||
|
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
|
||||||
|
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
|
||||||
|
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,16 +97,25 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
let last_activity = Arc::new(AtomicU64::new(0));
|
let last_activity = Arc::new(AtomicU64::new(0));
|
||||||
let start = std::time::Instant::now();
|
let start = std::time::Instant::now();
|
||||||
|
|
||||||
|
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||||
|
// aborting tasks, so the copy loops can shut down gracefully (TCP FIN instead
|
||||||
|
// of RST, TLS close_notify if the stream is TLS-wrapped).
|
||||||
|
let conn_cancel = CancellationToken::new();
|
||||||
|
|
||||||
let la1 = Arc::clone(&last_activity);
|
let la1 = Arc::clone(&last_activity);
|
||||||
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
|
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
|
||||||
let metrics_c2b = metrics.clone();
|
let metrics_c2b = metrics.clone();
|
||||||
|
let cc1 = conn_cancel.clone();
|
||||||
let c2b = tokio::spawn(async move {
|
let c2b = tokio::spawn(async move {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut total = initial_len;
|
let mut total = initial_len;
|
||||||
loop {
|
loop {
|
||||||
let n = match client_read.read(&mut buf).await {
|
let n = tokio::select! {
|
||||||
Ok(0) | Err(_) => break,
|
result = client_read.read(&mut buf) => match result {
|
||||||
Ok(n) => n,
|
Ok(0) | Err(_) => break,
|
||||||
|
Ok(n) => n,
|
||||||
|
},
|
||||||
|
_ = cc1.cancelled() => break,
|
||||||
};
|
};
|
||||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||||
break;
|
break;
|
||||||
@@ -117,19 +126,27 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let _ = backend_write.shutdown().await;
|
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(2),
|
||||||
|
backend_write.shutdown(),
|
||||||
|
).await;
|
||||||
total
|
total
|
||||||
});
|
});
|
||||||
|
|
||||||
let la2 = Arc::clone(&last_activity);
|
let la2 = Arc::clone(&last_activity);
|
||||||
let metrics_b2c = metrics;
|
let metrics_b2c = metrics;
|
||||||
|
let cc2 = conn_cancel.clone();
|
||||||
let b2c = tokio::spawn(async move {
|
let b2c = tokio::spawn(async move {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut total = 0u64;
|
let mut total = 0u64;
|
||||||
loop {
|
loop {
|
||||||
let n = match backend_read.read(&mut buf).await {
|
let n = tokio::select! {
|
||||||
Ok(0) | Err(_) => break,
|
result = backend_read.read(&mut buf) => match result {
|
||||||
Ok(n) => n,
|
Ok(0) | Err(_) => break,
|
||||||
|
Ok(n) => n,
|
||||||
|
},
|
||||||
|
_ = cc2.cancelled() => break,
|
||||||
};
|
};
|
||||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||||
break;
|
break;
|
||||||
@@ -140,14 +157,20 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let _ = client_write.shutdown().await;
|
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(2),
|
||||||
|
client_write.shutdown(),
|
||||||
|
).await;
|
||||||
total
|
total
|
||||||
});
|
});
|
||||||
|
|
||||||
// Watchdog: inactivity, max lifetime, and cancellation
|
// Watchdog: inactivity, max lifetime, and cancellation.
|
||||||
|
// First cancels the per-connection token for graceful shutdown (FIN/close_notify),
|
||||||
|
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||||
let la_watch = Arc::clone(&last_activity);
|
let la_watch = Arc::clone(&last_activity);
|
||||||
let c2b_handle = c2b.abort_handle();
|
let c2b_abort = c2b.abort_handle();
|
||||||
let b2c_handle = b2c.abort_handle();
|
let b2c_abort = b2c.abort_handle();
|
||||||
let watchdog = tokio::spawn(async move {
|
let watchdog = tokio::spawn(async move {
|
||||||
let check_interval = std::time::Duration::from_secs(5);
|
let check_interval = std::time::Duration::from_secs(5);
|
||||||
let mut last_seen = 0u64;
|
let mut last_seen = 0u64;
|
||||||
@@ -155,16 +178,12 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = cancel.cancelled() => {
|
_ = cancel.cancelled() => {
|
||||||
debug!("Connection cancelled by shutdown");
|
debug!("Connection cancelled by shutdown");
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_ = tokio::time::sleep(check_interval) => {
|
_ = tokio::time::sleep(check_interval) => {
|
||||||
// Check max lifetime
|
// Check max lifetime
|
||||||
if start.elapsed() >= max_lifetime {
|
if start.elapsed() >= max_lifetime {
|
||||||
debug!("Connection exceeded max lifetime, closing");
|
debug!("Connection exceeded max lifetime, closing");
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,8 +193,6 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -183,6 +200,13 @@ pub async fn forward_bidirectional_with_timeouts(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Phase 1: Signal copy loops to exit gracefully (allows FIN/close_notify)
|
||||||
|
conn_cancel.cancel();
|
||||||
|
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||||
|
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||||
|
c2b_abort.abort();
|
||||||
|
b2c_abort.abort();
|
||||||
});
|
});
|
||||||
|
|
||||||
let bytes_in = c2b.await.unwrap_or(0);
|
let bytes_in = c2b.await.unwrap_or(0);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::net::SocketAddr;
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
@@ -9,9 +9,11 @@ pub enum ProxyProtocolError {
|
|||||||
UnsupportedVersion,
|
UnsupportedVersion,
|
||||||
#[error("Parse error: {0}")]
|
#[error("Parse error: {0}")]
|
||||||
Parse(String),
|
Parse(String),
|
||||||
|
#[error("Incomplete header: need {0} bytes, got {1}")]
|
||||||
|
Incomplete(usize, usize),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parsed PROXY protocol v1 header.
|
/// Parsed PROXY protocol header (v1 or v2).
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ProxyProtocolHeader {
|
pub struct ProxyProtocolHeader {
|
||||||
pub source_addr: SocketAddr,
|
pub source_addr: SocketAddr,
|
||||||
@@ -24,14 +26,29 @@ pub struct ProxyProtocolHeader {
|
|||||||
pub enum ProxyProtocol {
|
pub enum ProxyProtocol {
|
||||||
Tcp4,
|
Tcp4,
|
||||||
Tcp6,
|
Tcp6,
|
||||||
|
Udp4,
|
||||||
|
Udp6,
|
||||||
Unknown,
|
Unknown,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Transport type for PROXY v2 header generation.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum ProxyV2Transport {
|
||||||
|
Stream, // TCP
|
||||||
|
Datagram, // UDP
|
||||||
|
}
|
||||||
|
|
||||||
|
/// PROXY protocol v2 signature (12 bytes).
|
||||||
|
const PROXY_V2_SIGNATURE: [u8; 12] = [
|
||||||
|
0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A,
|
||||||
|
];
|
||||||
|
|
||||||
|
// ===== v1 (text format) =====
|
||||||
|
|
||||||
/// Parse a PROXY protocol v1 header from data.
|
/// Parse a PROXY protocol v1 header from data.
|
||||||
///
|
///
|
||||||
/// Format: `PROXY TCP4 <src_ip> <dst_ip> <src_port> <dst_port>\r\n`
|
/// Format: `PROXY TCP4 <src_ip> <dst_ip> <src_port> <dst_port>\r\n`
|
||||||
pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtocolError> {
|
pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtocolError> {
|
||||||
// Find the end of the header line
|
|
||||||
let line_end = data
|
let line_end = data
|
||||||
.windows(2)
|
.windows(2)
|
||||||
.position(|w| w == b"\r\n")
|
.position(|w| w == b"\r\n")
|
||||||
@@ -56,10 +73,10 @@ pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtoc
|
|||||||
_ => return Err(ProxyProtocolError::UnsupportedVersion),
|
_ => return Err(ProxyProtocolError::UnsupportedVersion),
|
||||||
};
|
};
|
||||||
|
|
||||||
let src_ip: std::net::IpAddr = parts[2]
|
let src_ip: IpAddr = parts[2]
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|_| ProxyProtocolError::Parse("Invalid source IP".to_string()))?;
|
.map_err(|_| ProxyProtocolError::Parse("Invalid source IP".to_string()))?;
|
||||||
let dst_ip: std::net::IpAddr = parts[3]
|
let dst_ip: IpAddr = parts[3]
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|_| ProxyProtocolError::Parse("Invalid destination IP".to_string()))?;
|
.map_err(|_| ProxyProtocolError::Parse("Invalid destination IP".to_string()))?;
|
||||||
let src_port: u16 = parts[4]
|
let src_port: u16 = parts[4]
|
||||||
@@ -75,7 +92,6 @@ pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtoc
|
|||||||
protocol,
|
protocol,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Consumed bytes = line + \r\n
|
|
||||||
Ok((header, line_end + 2))
|
Ok((header, line_end + 2))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,10 +113,219 @@ pub fn is_proxy_protocol_v1(data: &[u8]) -> bool {
|
|||||||
data.starts_with(b"PROXY ")
|
data.starts_with(b"PROXY ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ===== v2 (binary format) =====
|
||||||
|
|
||||||
|
/// Check if data starts with a PROXY protocol v2 header.
|
||||||
|
pub fn is_proxy_protocol_v2(data: &[u8]) -> bool {
|
||||||
|
data.len() >= 12 && data[..12] == PROXY_V2_SIGNATURE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a PROXY protocol v2 binary header.
|
||||||
|
///
|
||||||
|
/// Binary format:
|
||||||
|
/// - [0..12] signature (12 bytes)
|
||||||
|
/// - [12] version (high nibble) + command (low nibble)
|
||||||
|
/// - [13] address family (high nibble) + transport (low nibble)
|
||||||
|
/// - [14..16] address block length (big-endian u16)
|
||||||
|
/// - [16..] address block (variable, depends on family)
|
||||||
|
pub fn parse_v2(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtocolError> {
|
||||||
|
if data.len() < 16 {
|
||||||
|
return Err(ProxyProtocolError::Incomplete(16, data.len()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate signature
|
||||||
|
if data[..12] != PROXY_V2_SIGNATURE {
|
||||||
|
return Err(ProxyProtocolError::InvalidHeader);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version (high nibble of byte 12) must be 0x2
|
||||||
|
let version = (data[12] >> 4) & 0x0F;
|
||||||
|
if version != 2 {
|
||||||
|
return Err(ProxyProtocolError::UnsupportedVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command (low nibble of byte 12)
|
||||||
|
let command = data[12] & 0x0F;
|
||||||
|
// 0x0 = LOCAL, 0x1 = PROXY
|
||||||
|
if command > 1 {
|
||||||
|
return Err(ProxyProtocolError::Parse(format!("Unknown command: {}", command)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Address family (high nibble) + transport (low nibble) of byte 13
|
||||||
|
let family = (data[13] >> 4) & 0x0F;
|
||||||
|
let transport = data[13] & 0x0F;
|
||||||
|
|
||||||
|
// Address block length
|
||||||
|
let addr_len = u16::from_be_bytes([data[14], data[15]]) as usize;
|
||||||
|
let total_len = 16 + addr_len;
|
||||||
|
|
||||||
|
if data.len() < total_len {
|
||||||
|
return Err(ProxyProtocolError::Incomplete(total_len, data.len()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// LOCAL command: no real addresses, return unspecified
|
||||||
|
if command == 0 {
|
||||||
|
return Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||||
|
protocol: ProxyProtocol::Unknown,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// PROXY command: parse addresses based on family + transport
|
||||||
|
let addr_block = &data[16..16 + addr_len];
|
||||||
|
|
||||||
|
match (family, transport) {
|
||||||
|
// AF_INET (0x1) + STREAM (0x1) = TCP4
|
||||||
|
(0x1, 0x1) => {
|
||||||
|
if addr_len < 12 {
|
||||||
|
return Err(ProxyProtocolError::Parse("IPv4 address block too short".to_string()));
|
||||||
|
}
|
||||||
|
let src_ip = Ipv4Addr::new(addr_block[0], addr_block[1], addr_block[2], addr_block[3]);
|
||||||
|
let dst_ip = Ipv4Addr::new(addr_block[4], addr_block[5], addr_block[6], addr_block[7]);
|
||||||
|
let src_port = u16::from_be_bytes([addr_block[8], addr_block[9]]);
|
||||||
|
let dst_port = u16::from_be_bytes([addr_block[10], addr_block[11]]);
|
||||||
|
Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V4(src_ip), src_port),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V4(dst_ip), dst_port),
|
||||||
|
protocol: ProxyProtocol::Tcp4,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// AF_INET (0x1) + DGRAM (0x2) = UDP4
|
||||||
|
(0x1, 0x2) => {
|
||||||
|
if addr_len < 12 {
|
||||||
|
return Err(ProxyProtocolError::Parse("IPv4 address block too short".to_string()));
|
||||||
|
}
|
||||||
|
let src_ip = Ipv4Addr::new(addr_block[0], addr_block[1], addr_block[2], addr_block[3]);
|
||||||
|
let dst_ip = Ipv4Addr::new(addr_block[4], addr_block[5], addr_block[6], addr_block[7]);
|
||||||
|
let src_port = u16::from_be_bytes([addr_block[8], addr_block[9]]);
|
||||||
|
let dst_port = u16::from_be_bytes([addr_block[10], addr_block[11]]);
|
||||||
|
Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V4(src_ip), src_port),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V4(dst_ip), dst_port),
|
||||||
|
protocol: ProxyProtocol::Udp4,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// AF_INET6 (0x2) + STREAM (0x1) = TCP6
|
||||||
|
(0x2, 0x1) => {
|
||||||
|
if addr_len < 36 {
|
||||||
|
return Err(ProxyProtocolError::Parse("IPv6 address block too short".to_string()));
|
||||||
|
}
|
||||||
|
let src_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[0..16]).unwrap());
|
||||||
|
let dst_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[16..32]).unwrap());
|
||||||
|
let src_port = u16::from_be_bytes([addr_block[32], addr_block[33]]);
|
||||||
|
let dst_port = u16::from_be_bytes([addr_block[34], addr_block[35]]);
|
||||||
|
Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V6(src_ip), src_port),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V6(dst_ip), dst_port),
|
||||||
|
protocol: ProxyProtocol::Tcp6,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// AF_INET6 (0x2) + DGRAM (0x2) = UDP6
|
||||||
|
(0x2, 0x2) => {
|
||||||
|
if addr_len < 36 {
|
||||||
|
return Err(ProxyProtocolError::Parse("IPv6 address block too short".to_string()));
|
||||||
|
}
|
||||||
|
let src_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[0..16]).unwrap());
|
||||||
|
let dst_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[16..32]).unwrap());
|
||||||
|
let src_port = u16::from_be_bytes([addr_block[32], addr_block[33]]);
|
||||||
|
let dst_port = u16::from_be_bytes([addr_block[34], addr_block[35]]);
|
||||||
|
Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V6(src_ip), src_port),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V6(dst_ip), dst_port),
|
||||||
|
protocol: ProxyProtocol::Udp6,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// AF_UNSPEC or unknown
|
||||||
|
(0x0, _) => Ok((
|
||||||
|
ProxyProtocolHeader {
|
||||||
|
source_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||||
|
dest_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||||
|
protocol: ProxyProtocol::Unknown,
|
||||||
|
},
|
||||||
|
total_len,
|
||||||
|
)),
|
||||||
|
_ => Err(ProxyProtocolError::Parse(format!(
|
||||||
|
"Unsupported family/transport: 0x{:X}{:X}",
|
||||||
|
family, transport
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate a PROXY protocol v2 binary header.
|
||||||
|
pub fn generate_v2(
|
||||||
|
source: &SocketAddr,
|
||||||
|
dest: &SocketAddr,
|
||||||
|
transport: ProxyV2Transport,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
let transport_nibble: u8 = match transport {
|
||||||
|
ProxyV2Transport::Stream => 0x1,
|
||||||
|
ProxyV2Transport::Datagram => 0x2,
|
||||||
|
};
|
||||||
|
|
||||||
|
match (source.ip(), dest.ip()) {
|
||||||
|
(IpAddr::V4(src_ip), IpAddr::V4(dst_ip)) => {
|
||||||
|
let mut buf = Vec::with_capacity(28);
|
||||||
|
buf.extend_from_slice(&PROXY_V2_SIGNATURE);
|
||||||
|
buf.push(0x21); // version 2, PROXY command
|
||||||
|
buf.push(0x10 | transport_nibble); // AF_INET + transport
|
||||||
|
buf.extend_from_slice(&12u16.to_be_bytes()); // addr block length
|
||||||
|
buf.extend_from_slice(&src_ip.octets());
|
||||||
|
buf.extend_from_slice(&dst_ip.octets());
|
||||||
|
buf.extend_from_slice(&source.port().to_be_bytes());
|
||||||
|
buf.extend_from_slice(&dest.port().to_be_bytes());
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
(IpAddr::V6(src_ip), IpAddr::V6(dst_ip)) => {
|
||||||
|
let mut buf = Vec::with_capacity(52);
|
||||||
|
buf.extend_from_slice(&PROXY_V2_SIGNATURE);
|
||||||
|
buf.push(0x21); // version 2, PROXY command
|
||||||
|
buf.push(0x20 | transport_nibble); // AF_INET6 + transport
|
||||||
|
buf.extend_from_slice(&36u16.to_be_bytes()); // addr block length
|
||||||
|
buf.extend_from_slice(&src_ip.octets());
|
||||||
|
buf.extend_from_slice(&dst_ip.octets());
|
||||||
|
buf.extend_from_slice(&source.port().to_be_bytes());
|
||||||
|
buf.extend_from_slice(&dest.port().to_be_bytes());
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
// Mixed IPv4/IPv6: map IPv4 to IPv6-mapped address
|
||||||
|
_ => {
|
||||||
|
let src_v6 = match source.ip() {
|
||||||
|
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||||
|
IpAddr::V6(v6) => v6,
|
||||||
|
};
|
||||||
|
let dst_v6 = match dest.ip() {
|
||||||
|
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||||
|
IpAddr::V6(v6) => v6,
|
||||||
|
};
|
||||||
|
let src6 = SocketAddr::new(IpAddr::V6(src_v6), source.port());
|
||||||
|
let dst6 = SocketAddr::new(IpAddr::V6(dst_v6), dest.port());
|
||||||
|
generate_v2(&src6, &dst6, transport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
// ===== v1 tests =====
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_v1_tcp4() {
|
fn test_parse_v1_tcp4() {
|
||||||
let header = b"PROXY TCP4 192.168.1.100 10.0.0.1 12345 443\r\n";
|
let header = b"PROXY TCP4 192.168.1.100 10.0.0.1 12345 443\r\n";
|
||||||
@@ -126,4 +351,130 @@ mod tests {
|
|||||||
assert!(is_proxy_protocol_v1(b"PROXY TCP4 ..."));
|
assert!(is_proxy_protocol_v1(b"PROXY TCP4 ..."));
|
||||||
assert!(!is_proxy_protocol_v1(b"GET / HTTP/1.1"));
|
assert!(!is_proxy_protocol_v1(b"GET / HTTP/1.1"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ===== v2 tests =====
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_proxy_protocol_v2() {
|
||||||
|
assert!(is_proxy_protocol_v2(&PROXY_V2_SIGNATURE));
|
||||||
|
assert!(!is_proxy_protocol_v2(b"PROXY TCP4 ..."));
|
||||||
|
assert!(!is_proxy_protocol_v2(b"short"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_tcp4() {
|
||||||
|
let source: SocketAddr = "198.51.100.10:54321".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "203.0.113.25:8443".parse().unwrap();
|
||||||
|
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
|
||||||
|
|
||||||
|
assert_eq!(header.len(), 28);
|
||||||
|
let (parsed, consumed) = parse_v2(&header).unwrap();
|
||||||
|
assert_eq!(consumed, 28);
|
||||||
|
assert_eq!(parsed.protocol, ProxyProtocol::Tcp4);
|
||||||
|
assert_eq!(parsed.source_addr, source);
|
||||||
|
assert_eq!(parsed.dest_addr, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_udp4() {
|
||||||
|
let source: SocketAddr = "10.0.0.1:12345".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "10.0.0.2:53".parse().unwrap();
|
||||||
|
let header = generate_v2(&source, &dest, ProxyV2Transport::Datagram);
|
||||||
|
|
||||||
|
assert_eq!(header.len(), 28);
|
||||||
|
assert_eq!(header[13], 0x12); // AF_INET + DGRAM
|
||||||
|
|
||||||
|
let (parsed, consumed) = parse_v2(&header).unwrap();
|
||||||
|
assert_eq!(consumed, 28);
|
||||||
|
assert_eq!(parsed.protocol, ProxyProtocol::Udp4);
|
||||||
|
assert_eq!(parsed.source_addr, source);
|
||||||
|
assert_eq!(parsed.dest_addr, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_tcp6() {
|
||||||
|
let source: SocketAddr = "[2001:db8::1]:54321".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "[2001:db8::2]:443".parse().unwrap();
|
||||||
|
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
|
||||||
|
|
||||||
|
assert_eq!(header.len(), 52);
|
||||||
|
assert_eq!(header[13], 0x21); // AF_INET6 + STREAM
|
||||||
|
|
||||||
|
let (parsed, consumed) = parse_v2(&header).unwrap();
|
||||||
|
assert_eq!(consumed, 52);
|
||||||
|
assert_eq!(parsed.protocol, ProxyProtocol::Tcp6);
|
||||||
|
assert_eq!(parsed.source_addr, source);
|
||||||
|
assert_eq!(parsed.dest_addr, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_generate_v2_tcp4_byte_layout() {
|
||||||
|
let source: SocketAddr = "1.2.3.4:1000".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "5.6.7.8:443".parse().unwrap();
|
||||||
|
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
|
||||||
|
|
||||||
|
assert_eq!(&header[0..12], &PROXY_V2_SIGNATURE);
|
||||||
|
assert_eq!(header[12], 0x21); // v2, PROXY
|
||||||
|
assert_eq!(header[13], 0x11); // AF_INET, STREAM
|
||||||
|
assert_eq!(u16::from_be_bytes([header[14], header[15]]), 12); // addr len
|
||||||
|
assert_eq!(&header[16..20], &[1, 2, 3, 4]); // src ip
|
||||||
|
assert_eq!(&header[20..24], &[5, 6, 7, 8]); // dst ip
|
||||||
|
assert_eq!(u16::from_be_bytes([header[24], header[25]]), 1000); // src port
|
||||||
|
assert_eq!(u16::from_be_bytes([header[26], header[27]]), 443); // dst port
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_generate_v2_udp4_byte_layout() {
|
||||||
|
let source: SocketAddr = "10.0.0.1:5000".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "10.0.0.2:53".parse().unwrap();
|
||||||
|
let header = generate_v2(&source, &dest, ProxyV2Transport::Datagram);
|
||||||
|
|
||||||
|
assert_eq!(header[12], 0x21); // v2, PROXY
|
||||||
|
assert_eq!(header[13], 0x12); // AF_INET, DGRAM (UDP)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_local_command() {
|
||||||
|
// Build a LOCAL command header (no addresses)
|
||||||
|
let mut header = Vec::new();
|
||||||
|
header.extend_from_slice(&PROXY_V2_SIGNATURE);
|
||||||
|
header.push(0x20); // v2, LOCAL
|
||||||
|
header.push(0x00); // AF_UNSPEC
|
||||||
|
header.extend_from_slice(&0u16.to_be_bytes()); // 0-length address block
|
||||||
|
|
||||||
|
let (parsed, consumed) = parse_v2(&header).unwrap();
|
||||||
|
assert_eq!(consumed, 16);
|
||||||
|
assert_eq!(parsed.protocol, ProxyProtocol::Unknown);
|
||||||
|
assert_eq!(parsed.source_addr.port(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_incomplete() {
|
||||||
|
let data = &PROXY_V2_SIGNATURE[..8]; // only 8 bytes
|
||||||
|
assert!(parse_v2(data).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_v2_wrong_version() {
|
||||||
|
let mut header = Vec::new();
|
||||||
|
header.extend_from_slice(&PROXY_V2_SIGNATURE);
|
||||||
|
header.push(0x11); // version 1, not 2
|
||||||
|
header.push(0x11);
|
||||||
|
header.extend_from_slice(&12u16.to_be_bytes());
|
||||||
|
header.extend_from_slice(&[0u8; 12]);
|
||||||
|
assert!(matches!(parse_v2(&header), Err(ProxyProtocolError::UnsupportedVersion)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_v2_roundtrip_with_trailing_data() {
|
||||||
|
let source: SocketAddr = "192.168.1.1:8080".parse().unwrap();
|
||||||
|
let dest: SocketAddr = "10.0.0.1:443".parse().unwrap();
|
||||||
|
let mut data = generate_v2(&source, &dest, ProxyV2Transport::Stream);
|
||||||
|
data.extend_from_slice(b"GET / HTTP/1.1\r\n"); // trailing app data
|
||||||
|
|
||||||
|
let (parsed, consumed) = parse_v2(&data).unwrap();
|
||||||
|
assert_eq!(consumed, 28);
|
||||||
|
assert_eq!(parsed.source_addr, source);
|
||||||
|
assert_eq!(&data[consumed..], b"GET / HTTP/1.1\r\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -465,21 +465,19 @@ impl TcpListenerManager {
|
|||||||
Ok((stream, peer_addr)) => {
|
Ok((stream, peer_addr)) => {
|
||||||
let ip = peer_addr.ip();
|
let ip = peer_addr.ip();
|
||||||
|
|
||||||
// Global connection limit — acquire semaphore permit with timeout
|
// Global connection limit — non-blocking check.
|
||||||
let permit = match tokio::time::timeout(
|
// MUST NOT block the accept loop: a blocking acquire would stall
|
||||||
std::time::Duration::from_secs(5),
|
// ALL connections to this port (not just the one over limit), because
|
||||||
conn_semaphore.clone().acquire_owned(),
|
// listener.accept() is not polled while we await the semaphore.
|
||||||
).await {
|
let permit = match conn_semaphore.clone().try_acquire_owned() {
|
||||||
Ok(Ok(permit)) => permit,
|
Ok(permit) => permit,
|
||||||
Ok(Err(_)) => {
|
Err(tokio::sync::TryAcquireError::NoPermits) => {
|
||||||
// Semaphore closed — shouldn't happen, but be safe
|
warn!("Global connection limit reached, dropping connection from {}", peer_addr);
|
||||||
debug!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
|
||||||
drop(stream);
|
drop(stream);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(tokio::sync::TryAcquireError::Closed) => {
|
||||||
// Timeout — global limit reached
|
warn!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
||||||
debug!("Global connection limit reached, dropping connection from {}", peer_addr);
|
|
||||||
drop(stream);
|
drop(stream);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -487,7 +485,7 @@ impl TcpListenerManager {
|
|||||||
|
|
||||||
// Check per-IP limits and rate limiting
|
// Check per-IP limits and rate limiting
|
||||||
if !conn_tracker.try_accept(&ip) {
|
if !conn_tracker.try_accept(&ip) {
|
||||||
debug!("Rejected connection from {} (per-IP limit or rate limit)", peer_addr);
|
warn!("Rejected connection from {} (per-IP limit or rate limit)", peer_addr);
|
||||||
drop(stream);
|
drop(stream);
|
||||||
drop(permit);
|
drop(permit);
|
||||||
continue;
|
continue;
|
||||||
@@ -519,7 +517,7 @@ impl TcpListenerManager {
|
|||||||
stream, port, peer_addr, rm, m, tc, sa, hp, cc, cn, sr, rc,
|
stream, port, peer_addr, rm, m, tc, sa, hp, cc, cn, sr, rc,
|
||||||
).await;
|
).await;
|
||||||
if let Err(e) = result {
|
if let Err(e) = result {
|
||||||
debug!("Connection error from {}: {}", peer_addr, e);
|
warn!("Connection error from {}: {}", peer_addr, e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -563,8 +561,9 @@ impl TcpListenerManager {
|
|||||||
// Non-proxy connections skip the peek entirely (no latency cost).
|
// Non-proxy connections skip the peek entirely (no latency cost).
|
||||||
let mut effective_peer_addr = peer_addr;
|
let mut effective_peer_addr = peer_addr;
|
||||||
if !conn_config.proxy_ips.is_empty() && conn_config.proxy_ips.contains(&peer_addr.ip()) {
|
if !conn_config.proxy_ips.is_empty() && conn_config.proxy_ips.contains(&peer_addr.ip()) {
|
||||||
// Trusted proxy IP — peek for PROXY protocol header
|
// Trusted proxy IP — peek for PROXY protocol header.
|
||||||
let mut proxy_peek = vec![0u8; 256];
|
// Use stack-allocated buffers (PROXY v1 headers are max ~108 bytes).
|
||||||
|
let mut proxy_peek = [0u8; 256];
|
||||||
let pn = match tokio::time::timeout(
|
let pn = match tokio::time::timeout(
|
||||||
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
||||||
stream.peek(&mut proxy_peek),
|
stream.peek(&mut proxy_peek),
|
||||||
@@ -574,18 +573,30 @@ impl TcpListenerManager {
|
|||||||
Err(_) => return Err("Initial data timeout (proxy protocol peek)".into()),
|
Err(_) => return Err("Initial data timeout (proxy protocol peek)".into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
if pn > 0 && crate::proxy_protocol::is_proxy_protocol_v1(&proxy_peek[..pn]) {
|
if pn > 0 {
|
||||||
match crate::proxy_protocol::parse_v1(&proxy_peek[..pn]) {
|
if crate::proxy_protocol::is_proxy_protocol_v1(&proxy_peek[..pn]) {
|
||||||
Ok((header, consumed)) => {
|
match crate::proxy_protocol::parse_v1(&proxy_peek[..pn]) {
|
||||||
debug!("PROXY protocol: real client {} -> {}", header.source_addr, header.dest_addr);
|
Ok((header, consumed)) => {
|
||||||
effective_peer_addr = header.source_addr;
|
debug!("PROXY v1: real client {} -> {}", header.source_addr, header.dest_addr);
|
||||||
// Consume the proxy protocol header bytes
|
effective_peer_addr = header.source_addr;
|
||||||
let mut discard = vec![0u8; consumed];
|
let mut discard = [0u8; 128];
|
||||||
stream.read_exact(&mut discard).await?;
|
stream.read_exact(&mut discard[..consumed]).await?;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!("Failed to parse PROXY v1 header: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
} else if crate::proxy_protocol::is_proxy_protocol_v2(&proxy_peek[..pn]) {
|
||||||
debug!("Failed to parse PROXY protocol header: {}", e);
|
match crate::proxy_protocol::parse_v2(&proxy_peek[..pn]) {
|
||||||
// Not a PROXY protocol header, continue normally
|
Ok((header, consumed)) => {
|
||||||
|
debug!("PROXY v2: real client {} -> {} ({:?})", header.source_addr, header.dest_addr, header.protocol);
|
||||||
|
effective_peer_addr = header.source_addr;
|
||||||
|
let mut discard = [0u8; 256];
|
||||||
|
stream.read_exact(&mut discard[..consumed]).await?;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!("Failed to parse PROXY v2 header: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -664,7 +675,7 @@ impl TcpListenerManager {
|
|||||||
if !rustproxy_http::request_filter::RequestFilter::check_ip_security(
|
if !rustproxy_http::request_filter::RequestFilter::check_ip_security(
|
||||||
security, &peer_addr.ip(),
|
security, &peer_addr.ip(),
|
||||||
) {
|
) {
|
||||||
debug!("Connection from {} blocked by route security", peer_addr);
|
warn!("Connection from {} blocked by route security", peer_addr);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -810,7 +821,7 @@ impl TcpListenerManager {
|
|||||||
let route_match = match route_match {
|
let route_match = match route_match {
|
||||||
Some(rm) => rm,
|
Some(rm) => rm,
|
||||||
None => {
|
None => {
|
||||||
debug!("No route matched for port {} domain {:?}", port, domain);
|
warn!("No route matched for port {} domain {:?} from {}", port, domain, peer_addr);
|
||||||
if is_http {
|
if is_http {
|
||||||
// Send a proper HTTP error instead of dropping the connection
|
// Send a proper HTTP error instead of dropping the connection
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
@@ -844,7 +855,7 @@ impl TcpListenerManager {
|
|||||||
security,
|
security,
|
||||||
&peer_addr.ip(),
|
&peer_addr.ip(),
|
||||||
) {
|
) {
|
||||||
debug!("Connection from {} blocked by route security", peer_addr);
|
warn!("Connection from {} blocked by route security", peer_addr);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -987,13 +998,18 @@ impl TcpListenerManager {
|
|||||||
Err(_) => return Err("TLS handshake timeout".into()),
|
Err(_) => return Err("TLS handshake timeout".into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Peek at decrypted data to determine if HTTP
|
// Peek at decrypted data to determine if HTTP.
|
||||||
|
// Timeout prevents connection leak if client completes TLS
|
||||||
|
// but never sends application data (scanners, health probes, slow-loris).
|
||||||
let mut buf_stream = tokio::io::BufReader::new(tls_stream);
|
let mut buf_stream = tokio::io::BufReader::new(tls_stream);
|
||||||
let peeked = {
|
let peeked = {
|
||||||
use tokio::io::AsyncBufReadExt;
|
use tokio::io::AsyncBufReadExt;
|
||||||
match buf_stream.fill_buf().await {
|
match tokio::time::timeout(
|
||||||
Ok(data) => sni_parser::is_http(data),
|
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
||||||
Err(_) => false,
|
buf_stream.fill_buf(),
|
||||||
|
).await {
|
||||||
|
Ok(Ok(data)) => sni_parser::is_http(data),
|
||||||
|
Ok(Err(_)) | Err(_) => false,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1011,7 +1027,11 @@ impl TcpListenerManager {
|
|||||||
"TLS Terminate + HTTP: {} -> {}:{} (domain: {:?})",
|
"TLS Terminate + HTTP: {} -> {}:{} (domain: {:?})",
|
||||||
peer_addr, target_host, target_port, domain
|
peer_addr, target_host, target_port, domain
|
||||||
);
|
);
|
||||||
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
|
// Wrap in ShutdownOnDrop to ensure TLS close_notify is sent
|
||||||
|
// even if hyper drops the connection without calling shutdown
|
||||||
|
// (e.g. H2 close, backend error, idle timeout drain).
|
||||||
|
let wrapped = rustproxy_http::shutdown_on_drop::ShutdownOnDrop::new(buf_stream);
|
||||||
|
http_proxy.handle_io(wrapped, peer_addr, port, cancel.clone()).await;
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
"TLS Terminate + TCP: {} -> {}:{} (domain: {:?})",
|
"TLS Terminate + TCP: {} -> {}:{} (domain: {:?})",
|
||||||
@@ -1062,13 +1082,18 @@ impl TcpListenerManager {
|
|||||||
Err(_) => return Err("TLS handshake timeout".into()),
|
Err(_) => return Err("TLS handshake timeout".into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Peek at decrypted data to detect protocol
|
// Peek at decrypted data to detect protocol.
|
||||||
|
// Timeout prevents connection leak if client completes TLS
|
||||||
|
// but never sends application data (scanners, health probes, slow-loris).
|
||||||
let mut buf_stream = tokio::io::BufReader::new(tls_stream);
|
let mut buf_stream = tokio::io::BufReader::new(tls_stream);
|
||||||
let is_http_data = {
|
let is_http_data = {
|
||||||
use tokio::io::AsyncBufReadExt;
|
use tokio::io::AsyncBufReadExt;
|
||||||
match buf_stream.fill_buf().await {
|
match tokio::time::timeout(
|
||||||
Ok(data) => sni_parser::is_http(data),
|
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
||||||
Err(_) => false,
|
buf_stream.fill_buf(),
|
||||||
|
).await {
|
||||||
|
Ok(Ok(data)) => sni_parser::is_http(data),
|
||||||
|
Ok(Err(_)) | Err(_) => false,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1088,7 +1113,10 @@ impl TcpListenerManager {
|
|||||||
"TLS Terminate+Reencrypt + HTTP: {} (domain: {:?})",
|
"TLS Terminate+Reencrypt + HTTP: {} (domain: {:?})",
|
||||||
peer_addr, domain
|
peer_addr, domain
|
||||||
);
|
);
|
||||||
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
|
// Wrap in ShutdownOnDrop to ensure TLS close_notify is sent
|
||||||
|
// even if hyper drops the connection without calling shutdown.
|
||||||
|
let wrapped = rustproxy_http::shutdown_on_drop::ShutdownOnDrop::new(buf_stream);
|
||||||
|
http_proxy.handle_io(wrapped, peer_addr, port, cancel.clone()).await;
|
||||||
} else {
|
} else {
|
||||||
// Non-HTTP: TLS-to-TLS tunnel (existing behavior for raw TCP protocols)
|
// Non-HTTP: TLS-to-TLS tunnel (existing behavior for raw TCP protocols)
|
||||||
debug!(
|
debug!(
|
||||||
@@ -1396,15 +1424,24 @@ impl TcpListenerManager {
|
|||||||
let last_activity = Arc::new(AtomicU64::new(0));
|
let last_activity = Arc::new(AtomicU64::new(0));
|
||||||
let start = std::time::Instant::now();
|
let start = std::time::Instant::now();
|
||||||
|
|
||||||
|
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||||
|
// aborting tasks, so the copy loops can shut down gracefully (TLS close_notify
|
||||||
|
// for terminate/reencrypt mode, TCP FIN for passthrough mode).
|
||||||
|
let conn_cancel = CancellationToken::new();
|
||||||
|
|
||||||
let la1 = Arc::clone(&last_activity);
|
let la1 = Arc::clone(&last_activity);
|
||||||
let metrics_c2b = metrics.clone();
|
let metrics_c2b = metrics.clone();
|
||||||
|
let cc1 = conn_cancel.clone();
|
||||||
let c2b = tokio::spawn(async move {
|
let c2b = tokio::spawn(async move {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut total = 0u64;
|
let mut total = 0u64;
|
||||||
loop {
|
loop {
|
||||||
let n = match client_read.read(&mut buf).await {
|
let n = tokio::select! {
|
||||||
Ok(0) | Err(_) => break,
|
result = client_read.read(&mut buf) => match result {
|
||||||
Ok(n) => n,
|
Ok(0) | Err(_) => break,
|
||||||
|
Ok(n) => n,
|
||||||
|
},
|
||||||
|
_ = cc1.cancelled() => break,
|
||||||
};
|
};
|
||||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||||
break;
|
break;
|
||||||
@@ -1418,19 +1455,27 @@ impl TcpListenerManager {
|
|||||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let _ = backend_write.shutdown().await;
|
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(2),
|
||||||
|
backend_write.shutdown(),
|
||||||
|
).await;
|
||||||
total
|
total
|
||||||
});
|
});
|
||||||
|
|
||||||
let la2 = Arc::clone(&last_activity);
|
let la2 = Arc::clone(&last_activity);
|
||||||
let metrics_b2c = metrics;
|
let metrics_b2c = metrics;
|
||||||
|
let cc2 = conn_cancel.clone();
|
||||||
let b2c = tokio::spawn(async move {
|
let b2c = tokio::spawn(async move {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut total = 0u64;
|
let mut total = 0u64;
|
||||||
loop {
|
loop {
|
||||||
let n = match backend_read.read(&mut buf).await {
|
let n = tokio::select! {
|
||||||
Ok(0) | Err(_) => break,
|
result = backend_read.read(&mut buf) => match result {
|
||||||
Ok(n) => n,
|
Ok(0) | Err(_) => break,
|
||||||
|
Ok(n) => n,
|
||||||
|
},
|
||||||
|
_ = cc2.cancelled() => break,
|
||||||
};
|
};
|
||||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||||
break;
|
break;
|
||||||
@@ -1444,14 +1489,20 @@ impl TcpListenerManager {
|
|||||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let _ = client_write.shutdown().await;
|
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||||
|
let _ = tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(2),
|
||||||
|
client_write.shutdown(),
|
||||||
|
).await;
|
||||||
total
|
total
|
||||||
});
|
});
|
||||||
|
|
||||||
// Watchdog task: check for inactivity, max lifetime, and cancellation
|
// Watchdog task: check for inactivity, max lifetime, and cancellation.
|
||||||
|
// First cancels the per-connection token for graceful shutdown (close_notify/FIN),
|
||||||
|
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||||
let la_watch = Arc::clone(&last_activity);
|
let la_watch = Arc::clone(&last_activity);
|
||||||
let c2b_handle = c2b.abort_handle();
|
let c2b_abort = c2b.abort_handle();
|
||||||
let b2c_handle = b2c.abort_handle();
|
let b2c_abort = b2c.abort_handle();
|
||||||
let watchdog = tokio::spawn(async move {
|
let watchdog = tokio::spawn(async move {
|
||||||
let check_interval = std::time::Duration::from_secs(5);
|
let check_interval = std::time::Duration::from_secs(5);
|
||||||
let mut last_seen = 0u64;
|
let mut last_seen = 0u64;
|
||||||
@@ -1459,16 +1510,12 @@ impl TcpListenerManager {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = cancel.cancelled() => {
|
_ = cancel.cancelled() => {
|
||||||
debug!("Split-stream connection cancelled by shutdown");
|
debug!("Split-stream connection cancelled by shutdown");
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_ = tokio::time::sleep(check_interval) => {
|
_ = tokio::time::sleep(check_interval) => {
|
||||||
// Check max lifetime
|
// Check max lifetime
|
||||||
if start.elapsed() >= max_lifetime {
|
if start.elapsed() >= max_lifetime {
|
||||||
debug!("Connection exceeded max lifetime, closing");
|
debug!("Connection exceeded max lifetime, closing");
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1479,8 +1526,6 @@ impl TcpListenerManager {
|
|||||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||||
c2b_handle.abort();
|
|
||||||
b2c_handle.abort();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1488,6 +1533,13 @@ impl TcpListenerManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Phase 1: Signal copy loops to exit gracefully (allows close_notify/FIN)
|
||||||
|
conn_cancel.cancel();
|
||||||
|
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||||
|
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||||
|
c2b_abort.abort();
|
||||||
|
b2c_abort.abort();
|
||||||
});
|
});
|
||||||
|
|
||||||
let bytes_in = c2b.await.unwrap_or(0);
|
let bytes_in = c2b.await.unwrap_or(0);
|
||||||
|
|||||||
@@ -6,25 +6,28 @@
|
|||||||
/// - `example.com` exact match
|
/// - `example.com` exact match
|
||||||
/// - `**.example.com` matches any depth of subdomain
|
/// - `**.example.com` matches any depth of subdomain
|
||||||
pub fn domain_matches(pattern: &str, domain: &str) -> bool {
|
pub fn domain_matches(pattern: &str, domain: &str) -> bool {
|
||||||
let pattern = pattern.trim().to_lowercase();
|
let pattern = pattern.trim();
|
||||||
let domain = domain.trim().to_lowercase();
|
let domain = domain.trim();
|
||||||
|
|
||||||
if pattern == "*" {
|
if pattern == "*" {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if pattern == domain {
|
if pattern.eq_ignore_ascii_case(domain) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wildcard patterns
|
// Wildcard patterns
|
||||||
if pattern.starts_with("*.") {
|
if pattern.starts_with("*.") || pattern.starts_with("*.") {
|
||||||
let suffix = &pattern[2..]; // e.g., "example.com"
|
let suffix = &pattern[2..]; // e.g., "example.com"
|
||||||
// Match exact parent or any single-level subdomain
|
// Match exact parent or any single-level subdomain
|
||||||
if domain == suffix {
|
if domain.eq_ignore_ascii_case(suffix) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if domain.ends_with(&format!(".{}", suffix)) {
|
if domain.len() > suffix.len() + 1
|
||||||
|
&& domain.as_bytes()[domain.len() - suffix.len() - 1] == b'.'
|
||||||
|
&& domain[domain.len() - suffix.len()..].eq_ignore_ascii_case(suffix)
|
||||||
|
{
|
||||||
// Check it's a single level subdomain for `*.`
|
// Check it's a single level subdomain for `*.`
|
||||||
let prefix = &domain[..domain.len() - suffix.len() - 1];
|
let prefix = &domain[..domain.len() - suffix.len() - 1];
|
||||||
return !prefix.contains('.');
|
return !prefix.contains('.');
|
||||||
@@ -35,11 +38,22 @@ pub fn domain_matches(pattern: &str, domain: &str) -> bool {
|
|||||||
if pattern.starts_with("**.") {
|
if pattern.starts_with("**.") {
|
||||||
let suffix = &pattern[3..];
|
let suffix = &pattern[3..];
|
||||||
// Match exact parent or any depth of subdomain
|
// Match exact parent or any depth of subdomain
|
||||||
return domain == suffix || domain.ends_with(&format!(".{}", suffix));
|
if domain.eq_ignore_ascii_case(suffix) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if domain.len() > suffix.len() + 1
|
||||||
|
&& domain.as_bytes()[domain.len() - suffix.len() - 1] == b'.'
|
||||||
|
&& domain[domain.len() - suffix.len()..].eq_ignore_ascii_case(suffix)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use glob-match for more complex patterns
|
// Use glob-match for more complex patterns (case-insensitive via lowercasing)
|
||||||
glob_match::glob_match(&pattern, &domain)
|
let pattern_lower = pattern.to_lowercase();
|
||||||
|
let domain_lower = domain.to_lowercase();
|
||||||
|
glob_match::glob_match(&pattern_lower, &domain_lower)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a domain matches any of the given patterns.
|
/// Check if a domain matches any of the given patterns.
|
||||||
|
|||||||
@@ -60,6 +60,16 @@ impl RouteManager {
|
|||||||
manager
|
manager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if any route on the given port uses header matching.
|
||||||
|
/// Used to skip expensive header HashMap construction when no route needs it.
|
||||||
|
pub fn any_route_has_headers(&self, port: u16) -> bool {
|
||||||
|
if let Some(indices) = self.port_index.get(&port) {
|
||||||
|
indices.iter().any(|&idx| self.routes[idx].route_match.headers.is_some())
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Find the best matching route for the given context.
|
/// Find the best matching route for the given context.
|
||||||
pub fn find_route<'a>(&'a self, ctx: &MatchContext<'_>) -> Option<RouteMatchResult<'a>> {
|
pub fn find_route<'a>(&'a self, ctx: &MatchContext<'_>) -> Option<RouteMatchResult<'a>> {
|
||||||
// Get routes for this port
|
// Get routes for this port
|
||||||
|
|||||||
@@ -632,15 +632,13 @@ impl RustProxy {
|
|||||||
let new_manager = Arc::new(new_manager);
|
let new_manager = Arc::new(new_manager);
|
||||||
self.route_table.store(Arc::clone(&new_manager));
|
self.route_table.store(Arc::clone(&new_manager));
|
||||||
|
|
||||||
// Update listener manager
|
// Update listener manager.
|
||||||
|
// IMPORTANT: TLS configs must be swapped BEFORE the route manager so that
|
||||||
|
// new routes only become visible after their certs are loaded. The reverse
|
||||||
|
// order (routes first) creates a window where connections match new routes
|
||||||
|
// but get the old TLS acceptor, causing cert mismatches.
|
||||||
if let Some(ref mut listener) = self.listener_manager {
|
if let Some(ref mut listener) = self.listener_manager {
|
||||||
listener.update_route_manager(Arc::clone(&new_manager));
|
// 1. Update TLS configs first (so new certs are available before new routes)
|
||||||
// Cancel connections on routes that were removed or disabled
|
|
||||||
listener.invalidate_removed_routes(&active_route_ids);
|
|
||||||
// Prune HTTP proxy caches (rate limiters, regex cache, round-robin counters)
|
|
||||||
listener.prune_http_proxy_caches(&active_route_ids);
|
|
||||||
|
|
||||||
// Update TLS configs
|
|
||||||
let mut tls_configs = Self::extract_tls_configs(&routes);
|
let mut tls_configs = Self::extract_tls_configs(&routes);
|
||||||
if let Some(ref cm_arc) = self.cert_manager {
|
if let Some(ref cm_arc) = self.cert_manager {
|
||||||
let cm = cm_arc.lock().await;
|
let cm = cm_arc.lock().await;
|
||||||
@@ -661,6 +659,13 @@ impl RustProxy {
|
|||||||
}
|
}
|
||||||
listener.set_tls_configs(tls_configs);
|
listener.set_tls_configs(tls_configs);
|
||||||
|
|
||||||
|
// 2. Now swap the route manager (new routes become visible with certs already loaded)
|
||||||
|
listener.update_route_manager(Arc::clone(&new_manager));
|
||||||
|
// Cancel connections on routes that were removed or disabled
|
||||||
|
listener.invalidate_removed_routes(&active_route_ids);
|
||||||
|
// Prune HTTP proxy caches (rate limiters, regex cache, round-robin counters)
|
||||||
|
listener.prune_http_proxy_caches(&active_route_ids);
|
||||||
|
|
||||||
// Add new ports
|
// Add new ports
|
||||||
for port in &new_ports {
|
for port in &new_ports {
|
||||||
if !old_ports.contains(port) {
|
if !old_ports.contains(port) {
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
|
||||||
import * as smartproxy from '../ts/index.js';
|
|
||||||
import { ProxyProtocolParser } from '../ts/core/utils/proxy-protocol.js';
|
|
||||||
|
|
||||||
tap.test('PROXY protocol v1 parser - valid headers', async () => {
|
|
||||||
// Test TCP4 format
|
|
||||||
const tcp4Header = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii');
|
|
||||||
const tcp4Result = ProxyProtocolParser.parse(tcp4Header);
|
|
||||||
|
|
||||||
expect(tcp4Result.proxyInfo).property('protocol').toEqual('TCP4');
|
|
||||||
expect(tcp4Result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
|
|
||||||
expect(tcp4Result.proxyInfo).property('sourcePort').toEqual(56324);
|
|
||||||
expect(tcp4Result.proxyInfo).property('destinationIP').toEqual('10.0.0.1');
|
|
||||||
expect(tcp4Result.proxyInfo).property('destinationPort').toEqual(443);
|
|
||||||
expect(tcp4Result.remainingData.length).toEqual(0);
|
|
||||||
|
|
||||||
// Test TCP6 format
|
|
||||||
const tcp6Header = Buffer.from('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n', 'ascii');
|
|
||||||
const tcp6Result = ProxyProtocolParser.parse(tcp6Header);
|
|
||||||
|
|
||||||
expect(tcp6Result.proxyInfo).property('protocol').toEqual('TCP6');
|
|
||||||
expect(tcp6Result.proxyInfo).property('sourceIP').toEqual('2001:db8::1');
|
|
||||||
expect(tcp6Result.proxyInfo).property('sourcePort').toEqual(56324);
|
|
||||||
expect(tcp6Result.proxyInfo).property('destinationIP').toEqual('2001:db8::2');
|
|
||||||
expect(tcp6Result.proxyInfo).property('destinationPort').toEqual(443);
|
|
||||||
|
|
||||||
// Test UNKNOWN protocol
|
|
||||||
const unknownHeader = Buffer.from('PROXY UNKNOWN\r\n', 'ascii');
|
|
||||||
const unknownResult = ProxyProtocolParser.parse(unknownHeader);
|
|
||||||
|
|
||||||
expect(unknownResult.proxyInfo).property('protocol').toEqual('UNKNOWN');
|
|
||||||
expect(unknownResult.proxyInfo).property('sourceIP').toEqual('');
|
|
||||||
expect(unknownResult.proxyInfo).property('sourcePort').toEqual(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('PROXY protocol v1 parser - with remaining data', async () => {
|
|
||||||
const headerWithData = Buffer.concat([
|
|
||||||
Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii'),
|
|
||||||
Buffer.from('GET / HTTP/1.1\r\n', 'ascii')
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = ProxyProtocolParser.parse(headerWithData);
|
|
||||||
|
|
||||||
expect(result.proxyInfo).property('protocol').toEqual('TCP4');
|
|
||||||
expect(result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
|
|
||||||
expect(result.remainingData.toString()).toEqual('GET / HTTP/1.1\r\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('PROXY protocol v1 parser - invalid headers', async () => {
|
|
||||||
// Not a PROXY protocol header
|
|
||||||
const notProxy = Buffer.from('GET / HTTP/1.1\r\n', 'ascii');
|
|
||||||
const notProxyResult = ProxyProtocolParser.parse(notProxy);
|
|
||||||
expect(notProxyResult.proxyInfo).toBeNull();
|
|
||||||
expect(notProxyResult.remainingData).toEqual(notProxy);
|
|
||||||
|
|
||||||
// Invalid protocol
|
|
||||||
expect(() => {
|
|
||||||
ProxyProtocolParser.parse(Buffer.from('PROXY INVALID 1.1.1.1 2.2.2.2 80 443\r\n', 'ascii'));
|
|
||||||
}).toThrow();
|
|
||||||
|
|
||||||
// Wrong number of fields
|
|
||||||
expect(() => {
|
|
||||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324\r\n', 'ascii'));
|
|
||||||
}).toThrow();
|
|
||||||
|
|
||||||
// Invalid port
|
|
||||||
expect(() => {
|
|
||||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 99999 443\r\n', 'ascii'));
|
|
||||||
}).toThrow();
|
|
||||||
|
|
||||||
// Invalid IP for protocol
|
|
||||||
expect(() => {
|
|
||||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 2001:db8::1 10.0.0.1 56324 443\r\n', 'ascii'));
|
|
||||||
}).toThrow();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('PROXY protocol v1 parser - incomplete headers', async () => {
|
|
||||||
// Header without terminator
|
|
||||||
const incomplete = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443', 'ascii');
|
|
||||||
const result = ProxyProtocolParser.parse(incomplete);
|
|
||||||
|
|
||||||
expect(result.proxyInfo).toBeNull();
|
|
||||||
expect(result.remainingData).toEqual(incomplete);
|
|
||||||
|
|
||||||
// Header exceeding max length - create a buffer that actually starts with PROXY
|
|
||||||
const longHeader = Buffer.from('PROXY TCP4 ' + '1'.repeat(100), 'ascii');
|
|
||||||
expect(() => {
|
|
||||||
ProxyProtocolParser.parse(longHeader);
|
|
||||||
}).toThrow();
|
|
||||||
});
|
|
||||||
|
|
||||||
tap.test('PROXY protocol v1 generator', async () => {
|
|
||||||
// Generate TCP4 header
|
|
||||||
const tcp4Info = {
|
|
||||||
protocol: 'TCP4' as const,
|
|
||||||
sourceIP: '192.168.1.1',
|
|
||||||
sourcePort: 56324,
|
|
||||||
destinationIP: '10.0.0.1',
|
|
||||||
destinationPort: 443
|
|
||||||
};
|
|
||||||
|
|
||||||
const tcp4Header = ProxyProtocolParser.generate(tcp4Info);
|
|
||||||
expect(tcp4Header.toString('ascii')).toEqual('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n');
|
|
||||||
|
|
||||||
// Generate TCP6 header
|
|
||||||
const tcp6Info = {
|
|
||||||
protocol: 'TCP6' as const,
|
|
||||||
sourceIP: '2001:db8::1',
|
|
||||||
sourcePort: 56324,
|
|
||||||
destinationIP: '2001:db8::2',
|
|
||||||
destinationPort: 443
|
|
||||||
};
|
|
||||||
|
|
||||||
const tcp6Header = ProxyProtocolParser.generate(tcp6Info);
|
|
||||||
expect(tcp6Header.toString('ascii')).toEqual('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n');
|
|
||||||
|
|
||||||
// Generate UNKNOWN header
|
|
||||||
const unknownInfo = {
|
|
||||||
protocol: 'UNKNOWN' as const,
|
|
||||||
sourceIP: '',
|
|
||||||
sourcePort: 0,
|
|
||||||
destinationIP: '',
|
|
||||||
destinationPort: 0
|
|
||||||
};
|
|
||||||
|
|
||||||
const unknownHeader = ProxyProtocolParser.generate(unknownInfo);
|
|
||||||
expect(unknownHeader.toString('ascii')).toEqual('PROXY UNKNOWN\r\n');
|
|
||||||
});
|
|
||||||
|
|
||||||
// Skipping integration tests for now - focus on unit tests
|
|
||||||
// Integration tests would require more complex setup and teardown
|
|
||||||
|
|
||||||
export default tap.start();
|
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartproxy',
|
name: '@push.rocks/smartproxy',
|
||||||
version: '25.10.1',
|
version: '25.12.0',
|
||||||
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
|
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,4 +15,3 @@ export * from './lifecycle-component.js';
|
|||||||
export * from './binary-heap.js';
|
export * from './binary-heap.js';
|
||||||
export * from './enhanced-connection-pool.js';
|
export * from './enhanced-connection-pool.js';
|
||||||
export * from './socket-utils.js';
|
export * from './socket-utils.js';
|
||||||
export * from './proxy-protocol.js';
|
|
||||||
|
|||||||
@@ -1,129 +0,0 @@
|
|||||||
import * as plugins from '../../plugins.js';
|
|
||||||
import { logger } from './logger.js';
|
|
||||||
import { ProxyProtocolParser as ProtocolParser, type IProxyInfo, type IProxyParseResult } from '../../protocols/proxy/index.js';
|
|
||||||
|
|
||||||
// Re-export types from protocols for backward compatibility
|
|
||||||
export type { IProxyInfo, IProxyParseResult } from '../../protocols/proxy/index.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parser for PROXY protocol v1 (text format)
|
|
||||||
* Spec: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
|
||||||
*
|
|
||||||
* This class now delegates to the protocol parser but adds
|
|
||||||
* smartproxy-specific features like socket reading and logging
|
|
||||||
*/
|
|
||||||
export class ProxyProtocolParser {
|
|
||||||
static readonly PROXY_V1_SIGNATURE = ProtocolParser.PROXY_V1_SIGNATURE;
|
|
||||||
static readonly MAX_HEADER_LENGTH = ProtocolParser.MAX_HEADER_LENGTH;
|
|
||||||
static readonly HEADER_TERMINATOR = ProtocolParser.HEADER_TERMINATOR;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse PROXY protocol v1 header from buffer
|
|
||||||
* Returns proxy info and remaining data after header
|
|
||||||
*/
|
|
||||||
static parse(data: Buffer): IProxyParseResult {
|
|
||||||
// Delegate to protocol parser
|
|
||||||
return ProtocolParser.parse(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate PROXY protocol v1 header
|
|
||||||
*/
|
|
||||||
static generate(info: IProxyInfo): Buffer {
|
|
||||||
// Delegate to protocol parser
|
|
||||||
return ProtocolParser.generate(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validate IP address format
|
|
||||||
*/
|
|
||||||
private static isValidIP(ip: string, protocol: 'TCP4' | 'TCP6' | 'UNKNOWN'): boolean {
|
|
||||||
return ProtocolParser.isValidIP(ip, protocol);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Attempt to read a complete PROXY protocol header from a socket
|
|
||||||
* Returns null if no PROXY protocol detected or incomplete
|
|
||||||
*/
|
|
||||||
static async readFromSocket(socket: plugins.net.Socket, timeout: number = 5000): Promise<IProxyParseResult | null> {
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
let buffer = Buffer.alloc(0);
|
|
||||||
let resolved = false;
|
|
||||||
|
|
||||||
const cleanup = () => {
|
|
||||||
socket.removeListener('data', onData);
|
|
||||||
socket.removeListener('error', onError);
|
|
||||||
clearTimeout(timer);
|
|
||||||
};
|
|
||||||
|
|
||||||
const timer = setTimeout(() => {
|
|
||||||
if (!resolved) {
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve({
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: buffer
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}, timeout);
|
|
||||||
|
|
||||||
const onData = (chunk: Buffer) => {
|
|
||||||
buffer = Buffer.concat([buffer, chunk]);
|
|
||||||
|
|
||||||
// Check if we have enough data
|
|
||||||
if (!buffer.toString('ascii', 0, Math.min(6, buffer.length)).startsWith(this.PROXY_V1_SIGNATURE)) {
|
|
||||||
// Not PROXY protocol
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve({
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: buffer
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse
|
|
||||||
try {
|
|
||||||
const result = this.parse(buffer);
|
|
||||||
if (result.proxyInfo) {
|
|
||||||
// Successfully parsed
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve(result);
|
|
||||||
} else if (buffer.length > this.MAX_HEADER_LENGTH) {
|
|
||||||
// Header too long
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve({
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: buffer
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Otherwise continue reading
|
|
||||||
} catch (error) {
|
|
||||||
// Parse error
|
|
||||||
logger.log('error', `PROXY protocol parse error: ${error.message}`);
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve({
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: buffer
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const onError = (error: Error) => {
|
|
||||||
logger.log('error', `Socket error while reading PROXY protocol: ${error.message}`);
|
|
||||||
resolved = true;
|
|
||||||
cleanup();
|
|
||||||
resolve({
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: buffer
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
socket.on('data', onData);
|
|
||||||
socket.on('error', onError);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
/**
|
/**
|
||||||
* PROXY Protocol Module
|
* PROXY Protocol Module
|
||||||
* HAProxy PROXY protocol implementation
|
* Type definitions for HAProxy PROXY protocol v1/v2
|
||||||
*/
|
*/
|
||||||
|
|
||||||
export * from './types.js';
|
export * from './types.js';
|
||||||
export * from './parser.js';
|
|
||||||
@@ -1,183 +0,0 @@
|
|||||||
/**
|
|
||||||
* PROXY Protocol Parser
|
|
||||||
* Implementation of HAProxy PROXY protocol v1 (text format)
|
|
||||||
* Spec: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
|
||||||
*/
|
|
||||||
|
|
||||||
import type { IProxyInfo, IProxyParseResult, TProxyProtocol } from './types.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* PROXY protocol parser
|
|
||||||
*/
|
|
||||||
export class ProxyProtocolParser {
|
|
||||||
static readonly PROXY_V1_SIGNATURE = 'PROXY ';
|
|
||||||
static readonly MAX_HEADER_LENGTH = 107; // Max length for v1 header
|
|
||||||
static readonly HEADER_TERMINATOR = '\r\n';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse PROXY protocol v1 header from buffer
|
|
||||||
* Returns proxy info and remaining data after header
|
|
||||||
*/
|
|
||||||
static parse(data: Buffer): IProxyParseResult {
|
|
||||||
// Check if buffer starts with PROXY signature
|
|
||||||
if (!data.toString('ascii', 0, 6).startsWith(this.PROXY_V1_SIGNATURE)) {
|
|
||||||
return {
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: data
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find header terminator
|
|
||||||
const headerEndIndex = data.indexOf(this.HEADER_TERMINATOR);
|
|
||||||
if (headerEndIndex === -1) {
|
|
||||||
// Header incomplete, need more data
|
|
||||||
if (data.length > this.MAX_HEADER_LENGTH) {
|
|
||||||
// Header too long, invalid
|
|
||||||
throw new Error('PROXY protocol header exceeds maximum length');
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
proxyInfo: null,
|
|
||||||
remainingData: data
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract header line
|
|
||||||
const headerLine = data.toString('ascii', 0, headerEndIndex);
|
|
||||||
const remainingData = data.slice(headerEndIndex + 2); // Skip \r\n
|
|
||||||
|
|
||||||
// Parse header
|
|
||||||
const parts = headerLine.split(' ');
|
|
||||||
|
|
||||||
if (parts.length < 2) {
|
|
||||||
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const [signature, protocol] = parts;
|
|
||||||
|
|
||||||
// Validate protocol
|
|
||||||
if (!['TCP4', 'TCP6', 'UNKNOWN'].includes(protocol)) {
|
|
||||||
throw new Error(`Invalid PROXY protocol: ${protocol}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// For UNKNOWN protocol, ignore addresses
|
|
||||||
if (protocol === 'UNKNOWN') {
|
|
||||||
return {
|
|
||||||
proxyInfo: {
|
|
||||||
protocol: 'UNKNOWN',
|
|
||||||
sourceIP: '',
|
|
||||||
sourcePort: 0,
|
|
||||||
destinationIP: '',
|
|
||||||
destinationPort: 0
|
|
||||||
},
|
|
||||||
remainingData
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// For TCP4/TCP6, we need all 6 parts
|
|
||||||
if (parts.length !== 6) {
|
|
||||||
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const [, , srcIP, dstIP, srcPort, dstPort] = parts;
|
|
||||||
|
|
||||||
// Validate and parse ports
|
|
||||||
const sourcePort = parseInt(srcPort, 10);
|
|
||||||
const destinationPort = parseInt(dstPort, 10);
|
|
||||||
|
|
||||||
if (isNaN(sourcePort) || sourcePort < 0 || sourcePort > 65535) {
|
|
||||||
throw new Error(`Invalid source port: ${srcPort}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNaN(destinationPort) || destinationPort < 0 || destinationPort > 65535) {
|
|
||||||
throw new Error(`Invalid destination port: ${dstPort}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate IP addresses
|
|
||||||
const protocolType = protocol as TProxyProtocol;
|
|
||||||
if (!this.isValidIP(srcIP, protocolType)) {
|
|
||||||
throw new Error(`Invalid source IP for ${protocol}: ${srcIP}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!this.isValidIP(dstIP, protocolType)) {
|
|
||||||
throw new Error(`Invalid destination IP for ${protocol}: ${dstIP}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
proxyInfo: {
|
|
||||||
protocol: protocolType,
|
|
||||||
sourceIP: srcIP,
|
|
||||||
sourcePort,
|
|
||||||
destinationIP: dstIP,
|
|
||||||
destinationPort
|
|
||||||
},
|
|
||||||
remainingData
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate PROXY protocol v1 header
|
|
||||||
*/
|
|
||||||
static generate(info: IProxyInfo): Buffer {
|
|
||||||
if (info.protocol === 'UNKNOWN') {
|
|
||||||
return Buffer.from(`PROXY UNKNOWN\r\n`, 'ascii');
|
|
||||||
}
|
|
||||||
|
|
||||||
const header = `PROXY ${info.protocol} ${info.sourceIP} ${info.destinationIP} ${info.sourcePort} ${info.destinationPort}\r\n`;
|
|
||||||
|
|
||||||
if (header.length > this.MAX_HEADER_LENGTH) {
|
|
||||||
throw new Error('Generated PROXY protocol header exceeds maximum length');
|
|
||||||
}
|
|
||||||
|
|
||||||
return Buffer.from(header, 'ascii');
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validate IP address format
|
|
||||||
*/
|
|
||||||
static isValidIP(ip: string, protocol: TProxyProtocol): boolean {
|
|
||||||
if (protocol === 'TCP4') {
|
|
||||||
return this.isIPv4(ip);
|
|
||||||
} else if (protocol === 'TCP6') {
|
|
||||||
return this.isIPv6(ip);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if string is valid IPv4
|
|
||||||
*/
|
|
||||||
static isIPv4(ip: string): boolean {
|
|
||||||
const parts = ip.split('.');
|
|
||||||
if (parts.length !== 4) return false;
|
|
||||||
|
|
||||||
for (const part of parts) {
|
|
||||||
const num = parseInt(part, 10);
|
|
||||||
if (isNaN(num) || num < 0 || num > 255 || part !== num.toString()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if string is valid IPv6
|
|
||||||
*/
|
|
||||||
static isIPv6(ip: string): boolean {
|
|
||||||
// Basic IPv6 validation
|
|
||||||
const ipv6Regex = /^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$/;
|
|
||||||
return ipv6Regex.test(ip);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a connection ID string for tracking
|
|
||||||
*/
|
|
||||||
static createConnectionId(connectionInfo: {
|
|
||||||
sourceIp?: string;
|
|
||||||
sourcePort?: number;
|
|
||||||
destIp?: string;
|
|
||||||
destPort?: number;
|
|
||||||
}): string {
|
|
||||||
const { sourceIp, sourcePort, destIp, destPort } = connectionInfo;
|
|
||||||
return `${sourceIp}:${sourcePort}-${destIp}:${destPort}`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -11,7 +11,7 @@ export type TProxyProtocolVersion = 'v1' | 'v2';
|
|||||||
/**
|
/**
|
||||||
* Connection protocol type
|
* Connection protocol type
|
||||||
*/
|
*/
|
||||||
export type TProxyProtocol = 'TCP4' | 'TCP6' | 'UNKNOWN';
|
export type TProxyProtocol = 'TCP4' | 'TCP6' | 'UDP4' | 'UDP6' | 'UNKNOWN';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface representing parsed PROXY protocol information
|
* Interface representing parsed PROXY protocol information
|
||||||
|
|||||||
Reference in New Issue
Block a user