Compare commits

...

31 Commits

Author SHA1 Message Date
61b67b91a0 v25.10.3
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 21:04:58 +00:00
fc64f5a95e fix(rustproxy-http): include request domain in backend proxy error and protocol detection logs 2026-03-12 21:04:58 +00:00
90b83a9dbe v25.10.2
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 20:11:55 +00:00
508621e231 fix(repo): no code changes to release 2026-03-12 20:11:55 +00:00
9ef21dcb41 v25.10.1
Some checks failed
Default (tags) / security (push) Successful in 44s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 20:00:56 +00:00
0acd907431 fix(repo): no changes to commit 2026-03-12 20:00:56 +00:00
80276a70e8 v25.10.0
Some checks failed
Default (tags) / security (push) Successful in 48s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 15:16:11 +00:00
0d4399d7f1 feat(metrics): add per-backend connection, error, protocol, and pool metrics with stale backend pruning 2026-03-12 15:16:11 +00:00
0380a957d0 v25.9.3
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-11 11:28:57 +00:00
5271447264 fix(rustproxy-http): Evict stale HTTP/2 pooled senders and retry bodyless requests with fresh backend connections to avoid 502s 2026-03-11 11:28:57 +00:00
be9898805f v25.9.2
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-08 15:24:18 +00:00
d4aa46aed7 fix(protocol-cache): Include requested_host in protocol detection cache key to avoid cache oscillation when multiple frontend domains share the same backend 2026-03-08 15:24:18 +00:00
4f1c5c919f v25.9.1
Some checks failed
Default (tags) / security (push) Successful in 48s
Default (tags) / test (push) Failing after 4m3s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-03 16:14:16 +00:00
d51b2c5890 fix(rustproxy): Cancel connections for routes removed/disabled by adding per-route cancellation tokens and make RouteManager swappable (ArcSwap) for runtime updates 2026-03-03 16:14:16 +00:00
bb471a8cc9 v25.9.0
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-03 11:04:01 +00:00
c52128f12d feat(rustproxy-http): add HTTP/2 auto-detection via ALPN with TTL-backed protocol cache and h1-only/h2 ALPN client configs 2026-03-03 11:04:01 +00:00
e69de246e9 v25.8.5
Some checks failed
Default (tags) / security (push) Successful in 43s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 21:31:38 +00:00
5126049ae6 fix(release): bump patch version (no source changes) 2026-02-26 21:31:38 +00:00
8db621657f fix(proxy): close connection buildup vectors in HTTP idle, WebSocket, socket relay, and TLS forwarding paths
- Add HTTP keep-alive idle timeout (60s default) with periodic watchdog that
  skips active requests (panic-safe via RAII ActiveRequestGuard)
- Make WebSocket inactivity/max-lifetime timeouts configurable from ConnectionConfig
  instead of hardcoded 1h/24h
- Replace bare copy_bidirectional in socket handler relay with timeout+cancel-aware
  split forwarding (inactivity, max lifetime, graceful shutdown)
- Add CancellationToken to forward_bidirectional_split_with_timeouts so TLS-terminated
  TCP connections respond to graceful shutdown
- Fix graceful_stop to actually abort listener tasks that exceed the shutdown deadline
  (previously they detached and ran forever)
- Add 10s metadata parsing timeout on TS socket-handler-server to prevent stuck sockets
2026-02-26 21:29:19 +00:00
ef060d5e79 v25.8.4
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 17:32:35 +00:00
cd7f3f7f75 fix(proxy): adjust default proxy timeouts and keep-alive behavior to shorter, more consistent values 2026-02-26 17:32:35 +00:00
8df18728d4 v25.8.3
Some checks failed
Default (tags) / security (push) Successful in 29s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 17:01:57 +00:00
bedecc6b6b fix(smartproxy): no code or dependency changes detected; no version bump required 2026-02-26 17:01:57 +00:00
b5f166bc92 v25.8.2
Some checks failed
Default (tags) / security (push) Successful in 31s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 16:58:30 +00:00
94266222fe fix(connection): improve connection handling and timeouts 2026-02-26 16:58:30 +00:00
697d51a9d4 v25.8.1
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m4s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-25 00:12:41 +00:00
7e5fe2bec3 fix(allocator): switch global allocator from tikv-jemallocator to mimalloc 2026-02-25 00:12:41 +00:00
f592bf627f v25.8.0
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-24 23:22:49 +00:00
6114a00fb8 feat(rustproxy): use tikv-jemallocator as the global allocator to reduce glibc fragmentation and slow RSS growth; add allocator dependency and enable it in rustproxy, update lockfile, and run tsrust before tests 2026-02-24 23:22:49 +00:00
98089b0351 v25.7.10
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-24 20:56:37 +00:00
33cd5330c4 fix(rustproxy): Use cooperative cancellation for background tasks, prune stale caches and metric entries, and switch tests to dynamic port allocation to avoid port conflicts 2026-02-24 20:56:37 +00:00
45 changed files with 2280 additions and 706 deletions

View File

@@ -1,5 +1,123 @@
# Changelog # Changelog
## 2026-03-12 - 25.10.3 - fix(rustproxy-http)
include request domain in backend proxy error and protocol detection logs
- Adds domain context to backend TCP/TLS connect, handshake, request failure, retry, and fallback log entries in the Rust HTTP proxy service.
- Propagates the resolved host/domain through H1, H2, pooled, and fallback forwarding paths so backend-level diagnostics can be correlated with the original request domain.
## 2026-03-12 - 25.10.2 - fix(repo)
no code changes to release
## 2026-03-12 - 25.10.1 - fix(repo)
no changes to commit
## 2026-03-12 - 25.10.0 - feat(metrics)
add per-backend connection, error, protocol, and pool metrics with stale backend pruning
- tracks backend connection lifecycle, connect timing, protocol detection, pool hit/miss rates, handshake/request errors, and h2 fallback failures in Rust metrics
- exposes backend metrics through the TypeScript metrics adapter with backend listings, protocol lookup, and top error summaries
- prunes backend metrics for backends no longer referenced by active routes, including preserved-port targets expanded across listening ports
## 2026-03-11 - 25.9.3 - fix(rustproxy-http)
Evict stale HTTP/2 pooled senders and retry bodyless requests with fresh backend connections to avoid 502s
- Introduce MAX_H2_AGE (120s) and evict HTTP/2 senders older than this or closed
- Check MAX_H2_AGE on checkout and during background eviction to prevent reuse of stale h2 connections
- Add connection_pool.remove_h2() to explicitly remove dead H2 senders from the pool
- When a pooled H2 request returns a 502 and the original request had an empty body, retry using a fresh H2 connection (retry_h2_with_fresh_connection)
- On H2 auto-detect failures, retry as HTTP/1.1 for bodyless requests via forward_h1_empty_body; return 502 for requests with bodies
- Evict dead H2 senders on backend request failures in reconnect_backend so subsequent attempts create fresh connections
## 2026-03-08 - 25.9.2 - fix(protocol-cache)
Include requested_host in protocol detection cache key to avoid cache oscillation when multiple frontend domains share the same backend
- Add ProtocolCacheKey.requested_host: Option<String> to distinguish cache entries by incoming request Host/:authority
- Update protocol cache lookups/inserts in proxy_service to populate requested_host
- Enhance debug logging to show requested_host on cache hits
- Fixes repeated ALPN probing / cache oscillation when different frontend domains share a backend with differing HTTP/2 support
## 2026-03-03 - 25.9.1 - fix(rustproxy)
Cancel connections for routes removed/disabled by adding per-route cancellation tokens and make RouteManager swappable (ArcSwap) for runtime updates
- Add per-route CancellationToken map (DashMap) to TcpListenerManager and call token.cancel() when routes are removed (invalidate_removed_routes)
- Propagate Arc<ArcSwap<RouteManager>> into HttpProxyService and passthrough listener so the route manager can be hot-swapped without restarting listeners
- Use per-route child cancellation tokens in accept/connection handling and forwarders to terminate existing connections when a route is removed
- Prune HTTP proxy caches and retain/cleanup per-route tokens when routes are active/removed
- Update test.test.sni-requirement.node.ts to allocate unique free ports via findFreePorts to avoid port conflicts during tests
## 2026-03-03 - 25.9.0 - feat(rustproxy-http)
add HTTP/2 auto-detection via ALPN with TTL-backed protocol cache and h1-only/h2 ALPN client configs
- Add protocol_cache module: bounded, TTL-based cache (5min TTL), max entries (4096), background cleanup task and clear() to discard stale detections.
- Introduce BackendProtocol::Auto and expose 'auto' in TypeScript route types to allow ALPN-based protocol auto-detection.
- Add build_tls_acceptor_h1_only() to create a TLS acceptor that advertises only http/1.1 (used for backends/tests that speak plain HTTP/1.1).
- Add shared_backend_tls_config_alpn() and default_backend_tls_config_with_alpn() to provide client TLS configs advertising h2+http/1.1 for auto-detection.
- Wire backend_tls_config_alpn and protocol_cache into proxy_service, tcp_listener and passthrough paths; add set_backend_tls_config_alpn() and prune protocol_cache on route updates.
- Update passthrough tests to use h1-only acceptor to avoid false HTTP/2 detection when backends speak plain HTTP/1.1.
- Include reconnection/fallback handling and ensure ALPN-enabled client config is used for auto-detection mode.
## 2026-02-26 - 25.8.5 - fix(release)
bump patch version (no source changes)
- No changes detected in git diff
- Current version: 25.8.4
- Recommend patch bump to 25.8.5 to record release without code changes
## 2026-02-26 - 25.8.4 - fix(proxy)
adjust default proxy timeouts and keep-alive behavior to shorter, more consistent values
- Increase connection timeout default from 30,000ms to 60,000ms (30s -> 60s).
- Reduce socket timeout default from 3,600,000ms to 60,000ms (1h -> 60s).
- Reduce max connection lifetime default from 86,400,000ms to 3,600,000ms (24h -> 1h).
- Change inactivity timeout default from 14,400,000ms to 75,000ms (4h -> 75s).
- Update keep-alive defaults: keepAliveTreatment 'extended' -> 'standard', keepAliveInactivityMultiplier 6 -> 4, extendedKeepAliveLifetime 604800000 -> 3,600,000ms (7d -> 1h).
- Apply these consistent default values across Rust crates (rustproxy-config, rustproxy-passthrough) and the TypeScript smart-proxy implementation.
- Update unit test expectations to match the new defaults.
## 2026-02-26 - 25.8.3 - fix(smartproxy)
no code or dependency changes detected; no version bump required
- No files changed in the provided diff (No changes).
- package.json version remains 25.8.2.
- No dependency or source updates detected; skip release.
## 2026-02-26 - 25.8.2 - fix(connection)
improve connection handling and timeouts
- Flush logs on process beforeExit and avoid calling process.exit in SIGINT/SIGTERM handlers to preserve host graceful shutdown
- Store protocol entries with a createdAt timestamp in ProtocolDetector and remove stale entries older than 30s to prevent leaked state from abandoned handshakes or port scanners
- Add backend connect timeout (30s) and idle timeouts (5 minutes) for dynamic forwards; destroy sockets on timeout and emit logs for timeout events
## 2026-02-25 - 25.8.1 - fix(allocator)
switch global allocator from tikv-jemallocator to mimalloc
- Replaced tikv-jemallocator with mimalloc in rust/Cargo.toml workspace dependencies.
- Updated rust/crates/rustproxy/Cargo.toml to use mimalloc as a workspace dependency.
- Updated rust/Cargo.lock: added mimalloc and libmimalloc-sys entries and removed tikv-jemallocator and tikv-jemalloc-sys entries.
- Changed the global allocator in crates/rustproxy/src/main.rs from tikv_jemallocator::Jemalloc to mimalloc::MiMalloc.
- Impact: runtime memory allocator is changed which may affect memory usage and performance; no public API changes but recommend testing memory/performance in deployments.
## 2026-02-24 - 25.8.0 - feat(rustproxy)
use tikv-jemallocator as the global allocator to reduce glibc fragmentation and slow RSS growth; add allocator dependency and enable it in rustproxy, update lockfile, and run tsrust before tests
- Added tikv-jemallocator dependency to rust/Cargo.toml and rust/crates/rustproxy/Cargo.toml
- Enabled tikv_jemallocator as the global allocator in rust/crates/rustproxy/src/main.rs
- Updated rust/Cargo.lock with tikv-jemallocator and tikv-jemalloc-sys entries
- Modified package.json test script to run tsrust before tstest
## 2026-02-24 - 25.7.10 - fix(rustproxy)
Use cooperative cancellation for background tasks, prune stale caches and metric entries, and switch tests to dynamic port allocation to avoid port conflicts
- Introduce tokio_util::sync::CancellationToken to coordinate graceful shutdown of sampling and renewal tasks; await handles on stop and reset the token so the proxy can be restarted.
- Add safety Drop impls (RustProxy, TcpListenerManager) as a last-resort abort path when stop() is not called.
- MetricsCollector: avoid creating per-IP metric entries when the IP has no active connections; prune orphaned per-IP metric maps during sampling; add tests covering late record_bytes races and pruning behavior.
- Passthrough/ConnectionTracker: remove per-connection record/zombie-scanner complexity, add cleanup_stale_timestamps to prune rate-limit timestamp entries, and add an RAII ConnectionTrackerGuard to guarantee connection_closed is invoked.
- HTTP proxy improvements: add prune_stale_routes and reset_round_robin to clear caches (rate limiters, regex cache, round-robin counters) on route updates.
- Tests: add test/helpers/port-allocator.ts and update many tests to use findFreePorts/assertPortsFree (dynamic ports + post-test port assertions) to avoid flakiness and port collisions in CI.
## 2026-02-21 - 25.7.9 - fix(tests) ## 2026-02-21 - 25.7.9 - fix(tests)
use high non-privileged ports in tests to avoid conflicts and CI failures use high non-privileged ports in tests to avoid conflicts and CI failures

View File

@@ -1,6 +1,6 @@
{ {
"name": "@push.rocks/smartproxy", "name": "@push.rocks/smartproxy",
"version": "25.7.9", "version": "25.10.3",
"private": false, "private": false,
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.", "description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
"main": "dist_ts/index.js", "main": "dist_ts/index.js",
@@ -9,7 +9,7 @@
"author": "Lossless GmbH", "author": "Lossless GmbH",
"license": "MIT", "license": "MIT",
"scripts": { "scripts": {
"test": "(tstest test/**/test*.ts --verbose --timeout 60 --logfile)", "test": "(tsrust) && (tstest test/**/test*.ts --verbose --timeout 60 --logfile)",
"build": "(tsbuild tsfolders --allowimplicitany) && (tsrust)", "build": "(tsbuild tsfolders --allowimplicitany) && (tsrust)",
"format": "(gitzone format)", "format": "(gitzone format)",
"buildDocs": "tsdoc" "buildDocs": "tsdoc"

20
rust/Cargo.lock generated
View File

@@ -612,6 +612,16 @@ version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "libmimalloc-sys"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870"
dependencies = [
"cc",
"libc",
]
[[package]] [[package]]
name = "lock_api" name = "lock_api"
version = "0.4.14" version = "0.4.14"
@@ -642,6 +652,15 @@ version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "mimalloc"
version = "0.1.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8"
dependencies = [
"libmimalloc-sys",
]
[[package]] [[package]]
name = "mio" name = "mio"
version = "1.1.1" version = "1.1.1"
@@ -924,6 +943,7 @@ dependencies = [
"http-body-util", "http-body-util",
"hyper", "hyper",
"hyper-util", "hyper-util",
"mimalloc",
"rcgen", "rcgen",
"rustls", "rustls",
"rustproxy-config", "rustproxy-config",

View File

@@ -91,6 +91,9 @@ libc = "0.2"
# Socket-level options (keepalive, etc.) # Socket-level options (keepalive, etc.)
socket2 = { version = "0.5", features = ["all"] } socket2 = { version = "0.5", features = ["all"] }
# mimalloc allocator (prevents glibc fragmentation / slow RSS growth)
mimalloc = "0.1"
# Internal crates # Internal crates
rustproxy-config = { path = "crates/rustproxy-config" } rustproxy-config = { path = "crates/rustproxy-config" }
rustproxy-routing = { path = "crates/rustproxy-routing" } rustproxy-routing = { path = "crates/rustproxy-routing" }

View File

@@ -298,7 +298,7 @@ impl RustProxyOptions {
/// Get the effective connection timeout in milliseconds. /// Get the effective connection timeout in milliseconds.
pub fn effective_connection_timeout(&self) -> u64 { pub fn effective_connection_timeout(&self) -> u64 {
self.connection_timeout.unwrap_or(30_000) self.connection_timeout.unwrap_or(60_000)
} }
/// Get the effective initial data timeout in milliseconds. /// Get the effective initial data timeout in milliseconds.
@@ -308,12 +308,12 @@ impl RustProxyOptions {
/// Get the effective socket timeout in milliseconds. /// Get the effective socket timeout in milliseconds.
pub fn effective_socket_timeout(&self) -> u64 { pub fn effective_socket_timeout(&self) -> u64 {
self.socket_timeout.unwrap_or(3_600_000) self.socket_timeout.unwrap_or(60_000)
} }
/// Get the effective max connection lifetime in milliseconds. /// Get the effective max connection lifetime in milliseconds.
pub fn effective_max_connection_lifetime(&self) -> u64 { pub fn effective_max_connection_lifetime(&self) -> u64 {
self.max_connection_lifetime.unwrap_or(86_400_000) self.max_connection_lifetime.unwrap_or(3_600_000)
} }
/// Get all unique ports that routes listen on. /// Get all unique ports that routes listen on.
@@ -377,10 +377,10 @@ mod tests {
#[test] #[test]
fn test_default_timeouts() { fn test_default_timeouts() {
let options = RustProxyOptions::default(); let options = RustProxyOptions::default();
assert_eq!(options.effective_connection_timeout(), 30_000); assert_eq!(options.effective_connection_timeout(), 60_000);
assert_eq!(options.effective_initial_data_timeout(), 60_000); assert_eq!(options.effective_initial_data_timeout(), 60_000);
assert_eq!(options.effective_socket_timeout(), 3_600_000); assert_eq!(options.effective_socket_timeout(), 60_000);
assert_eq!(options.effective_max_connection_lifetime(), 86_400_000); assert_eq!(options.effective_max_connection_lifetime(), 3_600_000);
} }
#[test] #[test]

View File

@@ -367,6 +367,7 @@ pub struct NfTablesOptions {
pub enum BackendProtocol { pub enum BackendProtocol {
Http1, Http1,
Http2, Http2,
Auto,
} }
/// Action options. /// Action options.

View File

@@ -18,6 +18,9 @@ const MAX_IDLE_PER_KEY: usize = 16;
const IDLE_TIMEOUT: Duration = Duration::from_secs(90); const IDLE_TIMEOUT: Duration = Duration::from_secs(90);
/// Background eviction interval. /// Background eviction interval.
const EVICTION_INTERVAL: Duration = Duration::from_secs(30); const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
/// Prevents staleness from backends that close idle connections (e.g. nginx GOAWAY).
const MAX_H2_AGE: Duration = Duration::from_secs(120);
/// Identifies a unique backend endpoint. /// Identifies a unique backend endpoint.
#[derive(Clone, Debug, Hash, Eq, PartialEq)] #[derive(Clone, Debug, Hash, Eq, PartialEq)]
@@ -37,7 +40,6 @@ struct IdleH1 {
/// A pooled HTTP/2 sender (multiplexed, Clone-able). /// A pooled HTTP/2 sender (multiplexed, Clone-able).
struct PooledH2 { struct PooledH2 {
sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
#[allow(dead_code)] // Reserved for future age-based eviction
created_at: Instant, created_at: Instant,
} }
@@ -116,8 +118,8 @@ impl ConnectionPool {
let entry = self.h2_pool.get(key)?; let entry = self.h2_pool.get(key)?;
let pooled = entry.value(); let pooled = entry.value();
// Check if the h2 connection is still alive // Check if the h2 connection is still alive and not too old
if pooled.sender.is_closed() { if pooled.sender.is_closed() || pooled.created_at.elapsed() >= MAX_H2_AGE {
drop(entry); drop(entry);
self.h2_pool.remove(key); self.h2_pool.remove(key);
return None; return None;
@@ -130,6 +132,12 @@ impl ConnectionPool {
None None
} }
/// Remove a dead HTTP/2 sender from the pool.
/// Called when `send_request` fails to prevent subsequent requests from reusing the stale sender.
pub fn remove_h2(&self, key: &PoolKey) {
self.h2_pool.remove(key);
}
/// Register an HTTP/2 sender in the pool. Since h2 is multiplexed, /// Register an HTTP/2 sender in the pool. Since h2 is multiplexed,
/// only one sender per key is stored (it's Clone-able). /// only one sender per key is stored (it's Clone-able).
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) { pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) {
@@ -165,10 +173,10 @@ impl ConnectionPool {
h1_pool.remove(&key); h1_pool.remove(&key);
} }
// Evict dead H2 connections // Evict dead or aged-out H2 connections
let mut dead_h2 = Vec::new(); let mut dead_h2 = Vec::new();
for entry in h2_pool.iter() { for entry in h2_pool.iter() {
if entry.value().sender.is_closed() { if entry.value().sender.is_closed() || entry.value().created_at.elapsed() >= MAX_H2_AGE {
dead_h2.push(entry.key().clone()); dead_h2.push(entry.key().clone());
} }
} }

View File

@@ -5,6 +5,7 @@
pub mod connection_pool; pub mod connection_pool;
pub mod counting_body; pub mod counting_body;
pub mod protocol_cache;
pub mod proxy_service; pub mod proxy_service;
pub mod request_filter; pub mod request_filter;
pub mod response_filter; pub mod response_filter;

View File

@@ -0,0 +1,140 @@
//! Bounded, TTL-based protocol detection cache for HTTP/2 auto-detection.
//!
//! Caches the ALPN-negotiated protocol (H1 or H2) per backend endpoint and requested
//! domain (host:port + requested_host). This prevents cache oscillation when multiple
//! frontend domains share the same backend but differ in HTTP/2 support.
use std::sync::Arc;
use std::time::{Duration, Instant};
use dashmap::DashMap;
use tracing::debug;
/// TTL for cached protocol detection results.
/// After this duration, the next request will re-probe the backend.
const PROTOCOL_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
/// Maximum number of entries in the protocol cache.
/// Prevents unbounded growth when backends come and go.
const PROTOCOL_CACHE_MAX_ENTRIES: usize = 4096;
/// Background cleanup interval for the protocol cache.
const PROTOCOL_CACHE_CLEANUP_INTERVAL: Duration = Duration::from_secs(60);
/// Detected backend protocol.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DetectedProtocol {
H1,
H2,
}
/// Key for the protocol cache: (host, port, requested_host).
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct ProtocolCacheKey {
pub host: String,
pub port: u16,
/// The incoming request's domain (Host header / :authority).
/// Distinguishes protocol detection when multiple domains share the same backend.
pub requested_host: Option<String>,
}
/// A cached protocol detection result with a timestamp.
struct CachedEntry {
protocol: DetectedProtocol,
detected_at: Instant,
}
/// Bounded, TTL-based protocol detection cache.
///
/// Memory safety guarantees:
/// - Hard cap at `PROTOCOL_CACHE_MAX_ENTRIES` — cannot grow unboundedly.
/// - TTL expiry — stale entries naturally age out on lookup.
/// - Background cleanup task — proactively removes expired entries every 60s.
/// - `clear()` — called on route updates to discard stale detections.
/// - `Drop` — aborts the background task to prevent dangling tokio tasks.
pub struct ProtocolCache {
cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>>,
cleanup_handle: Option<tokio::task::JoinHandle<()>>,
}
impl ProtocolCache {
/// Create a new protocol cache and start the background cleanup task.
pub fn new() -> Self {
let cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>> = Arc::new(DashMap::new());
let cache_clone = Arc::clone(&cache);
let cleanup_handle = tokio::spawn(async move {
Self::cleanup_loop(cache_clone).await;
});
Self {
cache,
cleanup_handle: Some(cleanup_handle),
}
}
/// Look up the cached protocol for a backend endpoint.
/// Returns `None` if not cached or expired (caller should probe via ALPN).
pub fn get(&self, key: &ProtocolCacheKey) -> Option<DetectedProtocol> {
let entry = self.cache.get(key)?;
if entry.detected_at.elapsed() < PROTOCOL_CACHE_TTL {
debug!("Protocol cache hit: {:?} for {}:{} (requested: {:?})", entry.protocol, key.host, key.port, key.requested_host);
Some(entry.protocol)
} else {
// Expired — remove and return None to trigger re-probe
drop(entry); // release DashMap ref before remove
self.cache.remove(key);
None
}
}
/// Insert a detected protocol into the cache.
/// If the cache is at capacity, evict the oldest entry first.
pub fn insert(&self, key: ProtocolCacheKey, protocol: DetectedProtocol) {
if self.cache.len() >= PROTOCOL_CACHE_MAX_ENTRIES && !self.cache.contains_key(&key) {
// Evict the oldest entry to stay within bounds
let oldest = self.cache.iter()
.min_by_key(|entry| entry.value().detected_at)
.map(|entry| entry.key().clone());
if let Some(oldest_key) = oldest {
self.cache.remove(&oldest_key);
}
}
self.cache.insert(key, CachedEntry {
protocol,
detected_at: Instant::now(),
});
}
/// Clear all entries. Called on route updates to discard stale detections.
pub fn clear(&self) {
self.cache.clear();
}
/// Background cleanup loop — removes expired entries every `PROTOCOL_CACHE_CLEANUP_INTERVAL`.
async fn cleanup_loop(cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>>) {
let mut interval = tokio::time::interval(PROTOCOL_CACHE_CLEANUP_INTERVAL);
loop {
interval.tick().await;
let expired: Vec<ProtocolCacheKey> = cache.iter()
.filter(|entry| entry.value().detected_at.elapsed() >= PROTOCOL_CACHE_TTL)
.map(|entry| entry.key().clone())
.collect();
if !expired.is_empty() {
debug!("Protocol cache cleanup: removing {} expired entries", expired.len());
for key in expired {
cache.remove(&key);
}
}
}
}
}
impl Drop for ProtocolCache {
fn drop(&mut self) {
if let Some(handle) = self.cleanup_handle.take() {
handle.abort();
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -131,6 +131,14 @@ impl UpstreamSelector {
} }
} }
/// Clear stale round-robin counters on route update.
/// Resetting is harmless — counters just restart cycling from index 0.
pub fn reset_round_robin(&self) {
if let Ok(mut counters) = self.round_robin.lock() {
counters.clear();
}
}
fn ip_hash(addr: &SocketAddr) -> usize { fn ip_hash(addr: &SocketAddr) -> usize {
let ip_str = addr.ip().to_string(); let ip_str = addr.ip().to_string();
let mut hash: usize = 5381; let mut hash: usize = 5381;

View File

@@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Mutex; use std::sync::Mutex;
use std::time::Duration;
use crate::throughput::{ThroughputSample, ThroughputTracker}; use crate::throughput::{ThroughputSample, ThroughputTracker};
@@ -20,6 +21,7 @@ pub struct Metrics {
pub throughput_recent_out_bytes_per_sec: u64, pub throughput_recent_out_bytes_per_sec: u64,
pub routes: std::collections::HashMap<String, RouteMetrics>, pub routes: std::collections::HashMap<String, RouteMetrics>,
pub ips: std::collections::HashMap<String, IpMetrics>, pub ips: std::collections::HashMap<String, IpMetrics>,
pub backends: std::collections::HashMap<String, BackendMetrics>,
pub throughput_history: Vec<ThroughputSample>, pub throughput_history: Vec<ThroughputSample>,
pub total_http_requests: u64, pub total_http_requests: u64,
pub http_requests_per_sec: u64, pub http_requests_per_sec: u64,
@@ -52,6 +54,23 @@ pub struct IpMetrics {
pub throughput_out_bytes_per_sec: u64, pub throughput_out_bytes_per_sec: u64,
} }
/// Per-backend metrics (keyed by "host:port").
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BackendMetrics {
pub active_connections: u64,
pub total_connections: u64,
pub protocol: String,
pub connect_errors: u64,
pub handshake_errors: u64,
pub request_errors: u64,
pub total_connect_time_us: u64,
pub connect_count: u64,
pub pool_hits: u64,
pub pool_misses: u64,
pub h2_failures: u64,
}
/// Statistics snapshot. /// Statistics snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@@ -69,6 +88,9 @@ const DEFAULT_RETENTION_SECONDS: usize = 3600;
/// Maximum number of IPs to include in a snapshot (top by active connections). /// Maximum number of IPs to include in a snapshot (top by active connections).
const MAX_IPS_IN_SNAPSHOT: usize = 100; const MAX_IPS_IN_SNAPSHOT: usize = 100;
/// Maximum number of backends to include in a snapshot (top by total connections).
const MAX_BACKENDS_IN_SNAPSHOT: usize = 100;
/// Metrics collector tracking connections and throughput. /// Metrics collector tracking connections and throughput.
/// ///
/// Design: The hot path (`record_bytes`) is entirely lock-free — it only touches /// Design: The hot path (`record_bytes`) is entirely lock-free — it only touches
@@ -96,6 +118,19 @@ pub struct MetricsCollector {
ip_pending_tp: DashMap<String, (AtomicU64, AtomicU64)>, ip_pending_tp: DashMap<String, (AtomicU64, AtomicU64)>,
ip_throughput: DashMap<String, Mutex<ThroughputTracker>>, ip_throughput: DashMap<String, Mutex<ThroughputTracker>>,
// ── Per-backend tracking (keyed by "host:port") ──
backend_active: DashMap<String, AtomicU64>,
backend_total: DashMap<String, AtomicU64>,
backend_protocol: DashMap<String, String>,
backend_connect_errors: DashMap<String, AtomicU64>,
backend_handshake_errors: DashMap<String, AtomicU64>,
backend_request_errors: DashMap<String, AtomicU64>,
backend_connect_time_us: DashMap<String, AtomicU64>,
backend_connect_count: DashMap<String, AtomicU64>,
backend_pool_hits: DashMap<String, AtomicU64>,
backend_pool_misses: DashMap<String, AtomicU64>,
backend_h2_failures: DashMap<String, AtomicU64>,
// ── HTTP request tracking ── // ── HTTP request tracking ──
total_http_requests: AtomicU64, total_http_requests: AtomicU64,
pending_http_requests: AtomicU64, pending_http_requests: AtomicU64,
@@ -134,6 +169,17 @@ impl MetricsCollector {
ip_bytes_out: DashMap::new(), ip_bytes_out: DashMap::new(),
ip_pending_tp: DashMap::new(), ip_pending_tp: DashMap::new(),
ip_throughput: DashMap::new(), ip_throughput: DashMap::new(),
backend_active: DashMap::new(),
backend_total: DashMap::new(),
backend_protocol: DashMap::new(),
backend_connect_errors: DashMap::new(),
backend_handshake_errors: DashMap::new(),
backend_request_errors: DashMap::new(),
backend_connect_time_us: DashMap::new(),
backend_connect_count: DashMap::new(),
backend_pool_hits: DashMap::new(),
backend_pool_misses: DashMap::new(),
backend_h2_failures: DashMap::new(),
total_http_requests: AtomicU64::new(0), total_http_requests: AtomicU64::new(0),
pending_http_requests: AtomicU64::new(0), pending_http_requests: AtomicU64::new(0),
http_request_throughput: Mutex::new(ThroughputTracker::new(retention_seconds)), http_request_throughput: Mutex::new(ThroughputTracker::new(retention_seconds)),
@@ -239,21 +285,26 @@ impl MetricsCollector {
} }
if let Some(ip) = source_ip { if let Some(ip) = source_ip {
self.ip_bytes_in // Only record per-IP stats if the IP still has active connections.
.entry(ip.to_string()) // This prevents orphaned entries when record_bytes races with
.or_insert_with(|| AtomicU64::new(0)) // connection_closed (which evicts all per-IP data on last close).
.fetch_add(bytes_in, Ordering::Relaxed); if self.ip_connections.contains_key(ip) {
self.ip_bytes_out self.ip_bytes_in
.entry(ip.to_string()) .entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0)) .or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed); .fetch_add(bytes_in, Ordering::Relaxed);
self.ip_bytes_out
.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into per-IP pending throughput counters (lock-free) // Accumulate into per-IP pending throughput counters (lock-free)
let entry = self.ip_pending_tp let entry = self.ip_pending_tp
.entry(ip.to_string()) .entry(ip.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0))); .or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed); entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed); entry.1.fetch_add(bytes_out, Ordering::Relaxed);
}
} }
} }
@@ -263,6 +314,113 @@ impl MetricsCollector {
self.pending_http_requests.fetch_add(1, Ordering::Relaxed); self.pending_http_requests.fetch_add(1, Ordering::Relaxed);
} }
// ── Per-backend recording methods ──
/// Record a successful backend connection with its connect duration.
pub fn backend_connection_opened(&self, key: &str, connect_time: Duration) {
self.backend_active
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
self.backend_total
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
self.backend_connect_time_us
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(connect_time.as_micros() as u64, Ordering::Relaxed);
self.backend_connect_count
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record a backend connection closing.
pub fn backend_connection_closed(&self, key: &str) {
if let Some(counter) = self.backend_active.get(key) {
let val = counter.load(Ordering::Relaxed);
if val > 0 {
counter.fetch_sub(1, Ordering::Relaxed);
}
}
}
/// Record a backend connect error (TCP or TLS connect failure/timeout).
pub fn backend_connect_error(&self, key: &str) {
self.backend_connect_errors
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record a backend handshake error (H1 or H2 handshake failure).
pub fn backend_handshake_error(&self, key: &str) {
self.backend_handshake_errors
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record a backend request error (send_request failure).
pub fn backend_request_error(&self, key: &str) {
self.backend_request_errors
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record a connection pool hit for a backend.
pub fn backend_pool_hit(&self, key: &str) {
self.backend_pool_hits
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record a connection pool miss for a backend.
pub fn backend_pool_miss(&self, key: &str) {
self.backend_pool_misses
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Record an H2 failure (h2 attempted but fell back to h1).
pub fn backend_h2_failure(&self, key: &str) {
self.backend_h2_failures
.entry(key.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
/// Set the protocol in use for a backend ("h1" or "h2").
pub fn set_backend_protocol(&self, key: &str, protocol: &str) {
self.backend_protocol
.entry(key.to_string())
.and_modify(|v| {
if v != protocol {
*v = protocol.to_string();
}
})
.or_insert_with(|| protocol.to_string());
}
/// Remove per-backend metrics for backends no longer in any route target.
pub fn retain_backends(&self, active_backends: &HashSet<String>) {
self.backend_active.retain(|k, _| active_backends.contains(k));
self.backend_total.retain(|k, _| active_backends.contains(k));
self.backend_protocol.retain(|k, _| active_backends.contains(k));
self.backend_connect_errors.retain(|k, _| active_backends.contains(k));
self.backend_handshake_errors.retain(|k, _| active_backends.contains(k));
self.backend_request_errors.retain(|k, _| active_backends.contains(k));
self.backend_connect_time_us.retain(|k, _| active_backends.contains(k));
self.backend_connect_count.retain(|k, _| active_backends.contains(k));
self.backend_pool_hits.retain(|k, _| active_backends.contains(k));
self.backend_pool_misses.retain(|k, _| active_backends.contains(k));
self.backend_h2_failures.retain(|k, _| active_backends.contains(k));
}
/// Take a throughput sample on all trackers (cold path, call at 1Hz or configured interval). /// Take a throughput sample on all trackers (cold path, call at 1Hz or configured interval).
/// ///
/// Drains the lock-free pending counters and feeds the accumulated bytes /// Drains the lock-free pending counters and feeds the accumulated bytes
@@ -347,6 +505,15 @@ impl MetricsCollector {
tracker.record_bytes(pending_reqs, 0); tracker.record_bytes(pending_reqs, 0);
tracker.sample(); tracker.sample();
} }
// Safety-net: prune orphaned per-IP entries that have no corresponding
// ip_connections entry. This catches any entries created by a race between
// record_bytes and connection_closed.
self.ip_bytes_in.retain(|k, _| self.ip_connections.contains_key(k));
self.ip_bytes_out.retain(|k, _| self.ip_connections.contains_key(k));
self.ip_pending_tp.retain(|k, _| self.ip_connections.contains_key(k));
self.ip_throughput.retain(|k, _| self.ip_connections.contains_key(k));
self.ip_total_connections.retain(|k, _| self.ip_connections.contains_key(k));
} }
/// Remove per-route metrics for route IDs that are no longer active. /// Remove per-route metrics for route IDs that are no longer active.
@@ -474,6 +641,72 @@ impl MetricsCollector {
}); });
} }
// Collect per-backend metrics, capped at top MAX_BACKENDS_IN_SNAPSHOT by total connections
let mut backend_entries: Vec<(String, BackendMetrics)> = Vec::new();
for entry in self.backend_total.iter() {
let key = entry.key().clone();
let total = entry.value().load(Ordering::Relaxed);
let active = self.backend_active
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let protocol = self.backend_protocol
.get(&key)
.map(|v| v.value().clone())
.unwrap_or_else(|| "unknown".to_string());
let connect_errors = self.backend_connect_errors
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let handshake_errors = self.backend_handshake_errors
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let request_errors = self.backend_request_errors
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let total_connect_time_us = self.backend_connect_time_us
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let connect_count = self.backend_connect_count
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let pool_hits = self.backend_pool_hits
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let pool_misses = self.backend_pool_misses
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let h2_failures = self.backend_h2_failures
.get(&key)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
backend_entries.push((key, BackendMetrics {
active_connections: active,
total_connections: total,
protocol,
connect_errors,
handshake_errors,
request_errors,
total_connect_time_us,
connect_count,
pool_hits,
pool_misses,
h2_failures,
}));
}
// Sort by total connections descending, then cap
backend_entries.sort_by(|a, b| b.1.total_connections.cmp(&a.1.total_connections));
backend_entries.truncate(MAX_BACKENDS_IN_SNAPSHOT);
let backends: std::collections::HashMap<String, BackendMetrics> = backend_entries.into_iter().collect();
// HTTP request rates // HTTP request rates
let (http_rps, http_rps_recent) = self.http_request_throughput let (http_rps, http_rps_recent) = self.http_request_throughput
.lock() .lock()
@@ -495,6 +728,7 @@ impl MetricsCollector {
throughput_recent_out_bytes_per_sec: global_recent_out, throughput_recent_out_bytes_per_sec: global_recent_out,
routes, routes,
ips, ips,
backends,
throughput_history, throughput_history,
total_http_requests: self.total_http_requests.load(Ordering::Relaxed), total_http_requests: self.total_http_requests.load(Ordering::Relaxed),
http_requests_per_sec: http_rps, http_requests_per_sec: http_rps,
@@ -733,6 +967,49 @@ mod tests {
assert!(collector.route_total_connections.get("route-c").is_some()); assert!(collector.route_total_connections.get("route-c").is_some());
} }
#[test]
fn test_record_bytes_after_close_no_orphan() {
let collector = MetricsCollector::with_retention(60);
// Open a connection, record bytes, then close
collector.connection_opened(Some("route-a"), Some("10.0.0.1"));
collector.record_bytes(100, 200, Some("route-a"), Some("10.0.0.1"));
collector.connection_closed(Some("route-a"), Some("10.0.0.1"));
// IP should be fully evicted
assert!(collector.ip_connections.get("10.0.0.1").is_none());
// Now record_bytes arrives late (simulates race) — should NOT re-create entries
collector.record_bytes(50, 75, Some("route-a"), Some("10.0.0.1"));
assert!(collector.ip_bytes_in.get("10.0.0.1").is_none());
assert!(collector.ip_bytes_out.get("10.0.0.1").is_none());
assert!(collector.ip_pending_tp.get("10.0.0.1").is_none());
// Global bytes should still be counted
assert_eq!(collector.total_bytes_in.load(Ordering::Relaxed), 150);
assert_eq!(collector.total_bytes_out.load(Ordering::Relaxed), 275);
}
#[test]
fn test_sample_all_prunes_orphaned_ip_entries() {
let collector = MetricsCollector::with_retention(60);
// Manually insert orphaned entries (simulates the race before the guard)
collector.ip_bytes_in.insert("orphan-ip".to_string(), AtomicU64::new(100));
collector.ip_bytes_out.insert("orphan-ip".to_string(), AtomicU64::new(200));
collector.ip_pending_tp.insert("orphan-ip".to_string(), (AtomicU64::new(0), AtomicU64::new(0)));
// No ip_connections entry for "orphan-ip"
assert!(collector.ip_connections.get("orphan-ip").is_none());
// sample_all should prune the orphans
collector.sample_all();
assert!(collector.ip_bytes_in.get("orphan-ip").is_none());
assert!(collector.ip_bytes_out.get("orphan-ip").is_none());
assert!(collector.ip_pending_tp.get("orphan-ip").is_none());
}
#[test] #[test]
fn test_throughput_history_in_snapshot() { fn test_throughput_history_in_snapshot() {
let collector = MetricsCollector::with_retention(60); let collector = MetricsCollector::with_retention(60);
@@ -748,4 +1025,120 @@ mod tests {
assert_eq!(snapshot.throughput_history[0].bytes_in, 100); assert_eq!(snapshot.throughput_history[0].bytes_in, 100);
assert_eq!(snapshot.throughput_history[4].bytes_in, 500); assert_eq!(snapshot.throughput_history[4].bytes_in, 500);
} }
#[test]
fn test_backend_metrics_basic() {
let collector = MetricsCollector::new();
let key = "backend1:8080";
// Open connections with timing
collector.backend_connection_opened(key, Duration::from_millis(15));
collector.backend_connection_opened(key, Duration::from_millis(25));
assert_eq!(collector.backend_active.get(key).unwrap().load(Ordering::Relaxed), 2);
assert_eq!(collector.backend_total.get(key).unwrap().load(Ordering::Relaxed), 2);
assert_eq!(collector.backend_connect_count.get(key).unwrap().load(Ordering::Relaxed), 2);
// 15ms + 25ms = 40ms = 40_000us
assert_eq!(collector.backend_connect_time_us.get(key).unwrap().load(Ordering::Relaxed), 40_000);
// Close one
collector.backend_connection_closed(key);
assert_eq!(collector.backend_active.get(key).unwrap().load(Ordering::Relaxed), 1);
// total stays
assert_eq!(collector.backend_total.get(key).unwrap().load(Ordering::Relaxed), 2);
// Record errors
collector.backend_connect_error(key);
collector.backend_handshake_error(key);
collector.backend_request_error(key);
collector.backend_h2_failure(key);
collector.backend_pool_hit(key);
collector.backend_pool_hit(key);
collector.backend_pool_miss(key);
assert_eq!(collector.backend_connect_errors.get(key).unwrap().load(Ordering::Relaxed), 1);
assert_eq!(collector.backend_handshake_errors.get(key).unwrap().load(Ordering::Relaxed), 1);
assert_eq!(collector.backend_request_errors.get(key).unwrap().load(Ordering::Relaxed), 1);
assert_eq!(collector.backend_h2_failures.get(key).unwrap().load(Ordering::Relaxed), 1);
assert_eq!(collector.backend_pool_hits.get(key).unwrap().load(Ordering::Relaxed), 2);
assert_eq!(collector.backend_pool_misses.get(key).unwrap().load(Ordering::Relaxed), 1);
// Protocol
collector.set_backend_protocol(key, "h1");
assert_eq!(collector.backend_protocol.get(key).unwrap().value(), "h1");
collector.set_backend_protocol(key, "h2");
assert_eq!(collector.backend_protocol.get(key).unwrap().value(), "h2");
}
#[test]
fn test_backend_metrics_in_snapshot() {
let collector = MetricsCollector::new();
collector.backend_connection_opened("b1:443", Duration::from_millis(10));
collector.backend_connection_opened("b2:8080", Duration::from_millis(20));
collector.set_backend_protocol("b1:443", "h2");
collector.set_backend_protocol("b2:8080", "h1");
collector.backend_connect_error("b1:443");
let snapshot = collector.snapshot();
assert_eq!(snapshot.backends.len(), 2);
let b1 = snapshot.backends.get("b1:443").unwrap();
assert_eq!(b1.active_connections, 1);
assert_eq!(b1.total_connections, 1);
assert_eq!(b1.protocol, "h2");
assert_eq!(b1.connect_errors, 1);
assert_eq!(b1.total_connect_time_us, 10_000);
assert_eq!(b1.connect_count, 1);
let b2 = snapshot.backends.get("b2:8080").unwrap();
assert_eq!(b2.protocol, "h1");
assert_eq!(b2.connect_errors, 0);
}
#[test]
fn test_retain_backends_prunes_stale() {
let collector = MetricsCollector::new();
collector.backend_connection_opened("active:443", Duration::from_millis(5));
collector.backend_connection_opened("stale:8080", Duration::from_millis(10));
collector.set_backend_protocol("active:443", "h1");
collector.set_backend_protocol("stale:8080", "h2");
collector.backend_connect_error("stale:8080");
let active = HashSet::from(["active:443".to_string()]);
collector.retain_backends(&active);
// active:443 should still exist
assert!(collector.backend_total.get("active:443").is_some());
assert!(collector.backend_protocol.get("active:443").is_some());
// stale:8080 should be fully removed
assert!(collector.backend_active.get("stale:8080").is_none());
assert!(collector.backend_total.get("stale:8080").is_none());
assert!(collector.backend_protocol.get("stale:8080").is_none());
assert!(collector.backend_connect_errors.get("stale:8080").is_none());
assert!(collector.backend_connect_time_us.get("stale:8080").is_none());
assert!(collector.backend_connect_count.get("stale:8080").is_none());
assert!(collector.backend_pool_hits.get("stale:8080").is_none());
assert!(collector.backend_pool_misses.get("stale:8080").is_none());
assert!(collector.backend_h2_failures.get("stale:8080").is_none());
}
#[test]
fn test_backend_connection_closed_saturates() {
let collector = MetricsCollector::new();
let key = "b:80";
// Close without opening — should not underflow
collector.backend_connection_closed(key);
// No entry created
assert!(collector.backend_active.get(key).is_none());
// Open one, close two — should saturate at 0
collector.backend_connection_opened(key, Duration::from_millis(1));
collector.backend_connection_closed(key);
collector.backend_connection_closed(key);
assert_eq!(collector.backend_active.get(key).unwrap().load(Ordering::Relaxed), 0);
}
} }

View File

@@ -1,155 +0,0 @@
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::time::{Duration, Instant};
/// Per-connection tracking record with atomics for lock-free updates.
///
/// Each field uses atomics so that the forwarding tasks can update
/// bytes_received / bytes_sent / last_activity without holding any lock,
/// while the zombie scanner reads them concurrently.
pub struct ConnectionRecord {
/// Unique connection ID assigned by the ConnectionTracker.
pub id: u64,
/// Wall-clock instant when this connection was created.
pub created_at: Instant,
/// Milliseconds since `created_at` when the last activity occurred.
/// Updated atomically by the forwarding loops.
pub last_activity: AtomicU64,
/// Total bytes received from the client (inbound).
pub bytes_received: AtomicU64,
/// Total bytes sent to the client (outbound / from backend).
pub bytes_sent: AtomicU64,
/// True once the client side of the connection has closed.
pub client_closed: AtomicBool,
/// True once the backend side of the connection has closed.
pub backend_closed: AtomicBool,
/// Whether this connection uses TLS (affects zombie thresholds).
pub is_tls: AtomicBool,
/// Whether this connection has keep-alive semantics.
pub has_keep_alive: AtomicBool,
}
impl ConnectionRecord {
/// Create a new connection record with the given ID.
/// All counters start at zero, all flags start as false.
pub fn new(id: u64) -> Self {
Self {
id,
created_at: Instant::now(),
last_activity: AtomicU64::new(0),
bytes_received: AtomicU64::new(0),
bytes_sent: AtomicU64::new(0),
client_closed: AtomicBool::new(false),
backend_closed: AtomicBool::new(false),
is_tls: AtomicBool::new(false),
has_keep_alive: AtomicBool::new(false),
}
}
/// Update `last_activity` to reflect the current elapsed time.
pub fn touch(&self) {
let elapsed_ms = self.created_at.elapsed().as_millis() as u64;
self.last_activity.store(elapsed_ms, Ordering::Relaxed);
}
/// Record `n` bytes received from the client (inbound).
pub fn record_bytes_in(&self, n: u64) {
self.bytes_received.fetch_add(n, Ordering::Relaxed);
self.touch();
}
/// Record `n` bytes sent to the client (outbound / from backend).
pub fn record_bytes_out(&self, n: u64) {
self.bytes_sent.fetch_add(n, Ordering::Relaxed);
self.touch();
}
/// How long since the last activity on this connection.
pub fn idle_duration(&self) -> Duration {
let last_ms = self.last_activity.load(Ordering::Relaxed);
let age_ms = self.created_at.elapsed().as_millis() as u64;
Duration::from_millis(age_ms.saturating_sub(last_ms))
}
/// Total age of this connection (time since creation).
pub fn age(&self) -> Duration {
self.created_at.elapsed()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_new_record() {
let record = ConnectionRecord::new(42);
assert_eq!(record.id, 42);
assert_eq!(record.bytes_received.load(Ordering::Relaxed), 0);
assert_eq!(record.bytes_sent.load(Ordering::Relaxed), 0);
assert!(!record.client_closed.load(Ordering::Relaxed));
assert!(!record.backend_closed.load(Ordering::Relaxed));
assert!(!record.is_tls.load(Ordering::Relaxed));
assert!(!record.has_keep_alive.load(Ordering::Relaxed));
}
#[test]
fn test_record_bytes() {
let record = ConnectionRecord::new(1);
record.record_bytes_in(100);
record.record_bytes_in(200);
assert_eq!(record.bytes_received.load(Ordering::Relaxed), 300);
record.record_bytes_out(50);
record.record_bytes_out(75);
assert_eq!(record.bytes_sent.load(Ordering::Relaxed), 125);
}
#[test]
fn test_touch_updates_activity() {
let record = ConnectionRecord::new(1);
assert_eq!(record.last_activity.load(Ordering::Relaxed), 0);
// Sleep briefly so elapsed time is nonzero
thread::sleep(Duration::from_millis(10));
record.touch();
let activity = record.last_activity.load(Ordering::Relaxed);
assert!(activity >= 10, "last_activity should be at least 10ms, got {}", activity);
}
#[test]
fn test_idle_duration() {
let record = ConnectionRecord::new(1);
// Initially idle_duration ~ age since last_activity is 0
thread::sleep(Duration::from_millis(20));
let idle = record.idle_duration();
assert!(idle >= Duration::from_millis(20));
// After touch, idle should be near zero
record.touch();
let idle = record.idle_duration();
assert!(idle < Duration::from_millis(10));
}
#[test]
fn test_age() {
let record = ConnectionRecord::new(1);
thread::sleep(Duration::from_millis(20));
let age = record.age();
assert!(age >= Duration::from_millis(20));
}
#[test]
fn test_flags() {
let record = ConnectionRecord::new(1);
record.client_closed.store(true, Ordering::Relaxed);
record.is_tls.store(true, Ordering::Relaxed);
record.has_keep_alive.store(true, Ordering::Relaxed);
assert!(record.client_closed.load(Ordering::Relaxed));
assert!(!record.backend_closed.load(Ordering::Relaxed));
assert!(record.is_tls.load(Ordering::Relaxed));
assert!(record.has_keep_alive.load(Ordering::Relaxed));
}
}

View File

@@ -2,24 +2,9 @@ use dashmap::DashMap;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::IpAddr; use std::net::IpAddr;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio_util::sync::CancellationToken;
use tracing::{debug, warn};
use super::connection_record::ConnectionRecord;
/// Thresholds for zombie detection (non-TLS connections).
const HALF_ZOMBIE_TIMEOUT_PLAIN: Duration = Duration::from_secs(30);
/// Thresholds for zombie detection (TLS connections).
const HALF_ZOMBIE_TIMEOUT_TLS: Duration = Duration::from_secs(300);
/// Stuck connection timeout (non-TLS): received data but never sent any.
const STUCK_TIMEOUT_PLAIN: Duration = Duration::from_secs(60);
/// Stuck connection timeout (TLS): received data but never sent any.
const STUCK_TIMEOUT_TLS: Duration = Duration::from_secs(300);
/// Tracks active connections per IP and enforces per-IP limits and rate limiting. /// Tracks active connections per IP and enforces per-IP limits and rate limiting.
/// Also maintains per-connection records for zombie detection.
pub struct ConnectionTracker { pub struct ConnectionTracker {
/// Active connection counts per IP /// Active connection counts per IP
active: DashMap<IpAddr, AtomicU64>, active: DashMap<IpAddr, AtomicU64>,
@@ -29,10 +14,6 @@ pub struct ConnectionTracker {
max_per_ip: Option<u64>, max_per_ip: Option<u64>,
/// Maximum new connections per minute per IP (None = unlimited) /// Maximum new connections per minute per IP (None = unlimited)
rate_limit_per_minute: Option<u64>, rate_limit_per_minute: Option<u64>,
/// Per-connection tracking records for zombie detection
connections: DashMap<u64, Arc<ConnectionRecord>>,
/// Monotonically increasing connection ID counter
next_id: AtomicU64,
} }
impl ConnectionTracker { impl ConnectionTracker {
@@ -42,8 +23,6 @@ impl ConnectionTracker {
timestamps: DashMap::new(), timestamps: DashMap::new(),
max_per_ip, max_per_ip,
rate_limit_per_minute, rate_limit_per_minute,
connections: DashMap::new(),
next_id: AtomicU64::new(1),
} }
} }
@@ -112,118 +91,27 @@ impl ConnectionTracker {
.unwrap_or(0) .unwrap_or(0)
} }
/// Prune stale timestamp entries for IPs that have no active connections
/// and no recent timestamps. This cleans up entries left by rate-limited IPs
/// that never had connection_opened called.
pub fn cleanup_stale_timestamps(&self) {
if self.rate_limit_per_minute.is_none() {
return; // No rate limiting — timestamps map should be empty
}
let now = Instant::now();
let one_minute = Duration::from_secs(60);
self.timestamps.retain(|ip, timestamps| {
timestamps.retain(|t| now.duration_since(*t) < one_minute);
// Keep if there are active connections or recent timestamps
!timestamps.is_empty() || self.active.contains_key(ip)
});
}
/// Get the total number of tracked IPs. /// Get the total number of tracked IPs.
pub fn tracked_ips(&self) -> usize { pub fn tracked_ips(&self) -> usize {
self.active.len() self.active.len()
} }
/// Register a new connection and return its tracking record.
///
/// The returned `Arc<ConnectionRecord>` should be passed to the forwarding
/// loop so it can update bytes / activity atomics in real time.
pub fn register_connection(&self, is_tls: bool) -> Arc<ConnectionRecord> {
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
let record = Arc::new(ConnectionRecord::new(id));
record.is_tls.store(is_tls, Ordering::Relaxed);
self.connections.insert(id, Arc::clone(&record));
record
}
/// Remove a connection record when the connection is fully closed.
pub fn unregister_connection(&self, id: u64) {
self.connections.remove(&id);
}
/// Scan all tracked connections and return IDs of zombie connections.
///
/// A connection is considered a zombie in any of these cases:
/// - **Full zombie**: both `client_closed` and `backend_closed` are true.
/// - **Half zombie**: one side closed for longer than the threshold
/// (5 min for TLS, 30s for non-TLS).
/// - **Stuck**: `bytes_received > 0` but `bytes_sent == 0` for longer
/// than the stuck threshold (5 min for TLS, 60s for non-TLS).
pub fn scan_zombies(&self) -> Vec<u64> {
let mut zombies = Vec::new();
for entry in self.connections.iter() {
let record = entry.value();
let id = *entry.key();
let is_tls = record.is_tls.load(Ordering::Relaxed);
let client_closed = record.client_closed.load(Ordering::Relaxed);
let backend_closed = record.backend_closed.load(Ordering::Relaxed);
let idle = record.idle_duration();
let bytes_in = record.bytes_received.load(Ordering::Relaxed);
let bytes_out = record.bytes_sent.load(Ordering::Relaxed);
// Full zombie: both sides closed
if client_closed && backend_closed {
zombies.push(id);
continue;
}
// Half zombie: one side closed for too long
let half_timeout = if is_tls {
HALF_ZOMBIE_TIMEOUT_TLS
} else {
HALF_ZOMBIE_TIMEOUT_PLAIN
};
if (client_closed || backend_closed) && idle >= half_timeout {
zombies.push(id);
continue;
}
// Stuck: received data but never sent anything for too long
let stuck_timeout = if is_tls {
STUCK_TIMEOUT_TLS
} else {
STUCK_TIMEOUT_PLAIN
};
if bytes_in > 0 && bytes_out == 0 && idle >= stuck_timeout {
zombies.push(id);
}
}
zombies
}
/// Start a background task that periodically scans for zombie connections.
///
/// The scanner runs every 10 seconds and logs any zombies it finds.
/// It stops when the provided `CancellationToken` is cancelled.
pub fn start_zombie_scanner(self: &Arc<Self>, cancel: CancellationToken) {
let tracker = Arc::clone(self);
tokio::spawn(async move {
let interval = Duration::from_secs(10);
loop {
tokio::select! {
_ = cancel.cancelled() => {
debug!("Zombie scanner shutting down");
break;
}
_ = tokio::time::sleep(interval) => {
let zombies = tracker.scan_zombies();
if !zombies.is_empty() {
warn!(
"Cleaning up {} zombie connection(s): {:?}",
zombies.len(),
zombies
);
for id in &zombies {
tracker.unregister_connection(*id);
}
}
}
}
}
});
}
/// Get the total number of tracked connections (with records).
pub fn total_connections(&self) -> usize {
self.connections.len()
}
} }
#[cfg(test)] #[cfg(test)]
@@ -333,98 +221,27 @@ mod tests {
} }
#[test] #[test]
fn test_register_unregister_connection() { fn test_cleanup_stale_timestamps() {
let tracker = ConnectionTracker::new(None, None); // Rate limit of 100/min so timestamps are tracked
assert_eq!(tracker.total_connections(), 0); let tracker = ConnectionTracker::new(None, Some(100));
let ip: IpAddr = "10.0.0.1".parse().unwrap();
let record1 = tracker.register_connection(false); // try_accept adds a timestamp entry
assert_eq!(tracker.total_connections(), 1); assert!(tracker.try_accept(&ip));
assert!(!record1.is_tls.load(Ordering::Relaxed));
let record2 = tracker.register_connection(true); // Simulate: connection was rate-limited and never accepted,
assert_eq!(tracker.total_connections(), 2); // so no connection_opened / connection_closed pair
assert!(record2.is_tls.load(Ordering::Relaxed)); assert!(tracker.timestamps.get(&ip).is_some());
assert!(tracker.active.get(&ip).is_none()); // never opened
// IDs should be unique // Cleanup won't remove it yet because timestamp is recent
assert_ne!(record1.id, record2.id); tracker.cleanup_stale_timestamps();
assert!(tracker.timestamps.get(&ip).is_some());
tracker.unregister_connection(record1.id); // After expiry (use 0-second window trick: create tracker with 0 rate)
assert_eq!(tracker.total_connections(), 1); // Actually, we can't fast-forward time easily, so just verify the cleanup
// doesn't panic and handles the no-rate-limit case
tracker.unregister_connection(record2.id); let tracker2 = ConnectionTracker::new(None, None);
assert_eq!(tracker.total_connections(), 0); tracker2.cleanup_stale_timestamps(); // should be a no-op
}
#[test]
fn test_full_zombie_detection() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
// Not a zombie initially
assert!(tracker.scan_zombies().is_empty());
// Set both sides closed -> full zombie
record.client_closed.store(true, Ordering::Relaxed);
record.backend_closed.store(true, Ordering::Relaxed);
let zombies = tracker.scan_zombies();
assert_eq!(zombies.len(), 1);
assert_eq!(zombies[0], record.id);
}
#[test]
fn test_half_zombie_not_triggered_immediately() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
record.touch(); // mark activity now
// Only one side closed, but just now -> not a zombie yet
record.client_closed.store(true, Ordering::Relaxed);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_stuck_connection_not_triggered_immediately() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
record.touch(); // mark activity now
// Has received data but sent nothing -> but just started, not stuck yet
record.bytes_received.store(1000, Ordering::Relaxed);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_unregister_removes_from_zombie_scan() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
let id = record.id;
// Make it a full zombie
record.client_closed.store(true, Ordering::Relaxed);
record.backend_closed.store(true, Ordering::Relaxed);
assert_eq!(tracker.scan_zombies().len(), 1);
// Unregister should remove it
tracker.unregister_connection(id);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_total_connections() {
let tracker = ConnectionTracker::new(None, None);
assert_eq!(tracker.total_connections(), 0);
let r1 = tracker.register_connection(false);
let r2 = tracker.register_connection(true);
let r3 = tracker.register_connection(false);
assert_eq!(tracker.total_connections(), 3);
tracker.unregister_connection(r2.id);
assert_eq!(tracker.total_connections(), 2);
tracker.unregister_connection(r1.id);
tracker.unregister_connection(r3.id);
assert_eq!(tracker.total_connections(), 0);
} }
} }

View File

@@ -8,7 +8,6 @@ pub mod sni_parser;
pub mod forwarder; pub mod forwarder;
pub mod proxy_protocol; pub mod proxy_protocol;
pub mod tls_handler; pub mod tls_handler;
pub mod connection_record;
pub mod connection_tracker; pub mod connection_tracker;
pub mod socket_relay; pub mod socket_relay;
pub mod socket_opts; pub mod socket_opts;
@@ -18,7 +17,6 @@ pub use sni_parser::*;
pub use forwarder::*; pub use forwarder::*;
pub use proxy_protocol::*; pub use proxy_protocol::*;
pub use tls_handler::*; pub use tls_handler::*;
pub use connection_record::*;
pub use connection_tracker::*; pub use connection_tracker::*;
pub use socket_relay::*; pub use socket_relay::*;
pub use socket_opts::*; pub use socket_opts::*;

View File

@@ -1,6 +1,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use dashmap::DashMap;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tokio_rustls::TlsAcceptor; use tokio_rustls::TlsAcceptor;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
@@ -41,6 +42,25 @@ impl Drop for ConnectionGuard {
} }
} }
/// RAII guard that calls ConnectionTracker::connection_closed on drop.
/// Ensures per-IP tracking is cleaned up on ALL exit paths — normal, error, or panic.
struct ConnectionTrackerGuard {
tracker: Arc<ConnectionTracker>,
ip: std::net::IpAddr,
}
impl ConnectionTrackerGuard {
fn new(tracker: Arc<ConnectionTracker>, ip: std::net::IpAddr) -> Self {
Self { tracker, ip }
}
}
impl Drop for ConnectionTrackerGuard {
fn drop(&mut self) {
self.tracker.connection_closed(&self.ip);
}
}
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum ListenerError { pub enum ListenerError {
#[error("Failed to bind port {port}: {source}")] #[error("Failed to bind port {port}: {source}")]
@@ -99,10 +119,10 @@ pub struct ConnectionConfig {
impl Default for ConnectionConfig { impl Default for ConnectionConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
connection_timeout_ms: 30_000, connection_timeout_ms: 60_000,
initial_data_timeout_ms: 60_000, initial_data_timeout_ms: 60_000,
socket_timeout_ms: 3_600_000, socket_timeout_ms: 60_000,
max_connection_lifetime_ms: 86_400_000, max_connection_lifetime_ms: 3_600_000,
graceful_shutdown_timeout_ms: 30_000, graceful_shutdown_timeout_ms: 30_000,
max_connections_per_ip: None, max_connections_per_ip: None,
connection_rate_limit_per_minute: None, connection_rate_limit_per_minute: None,
@@ -143,18 +163,28 @@ pub struct TcpListenerManager {
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>, socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
/// Global connection semaphore — limits total simultaneous connections. /// Global connection semaphore — limits total simultaneous connections.
conn_semaphore: Arc<tokio::sync::Semaphore>, conn_semaphore: Arc<tokio::sync::Semaphore>,
/// Per-route cancellation tokens (child of cancel_token).
/// When a route is removed, its token is cancelled, terminating all connections on that route.
route_cancels: Arc<DashMap<String, CancellationToken>>,
} }
impl TcpListenerManager { impl TcpListenerManager {
pub fn new(route_manager: Arc<RouteManager>) -> Self { pub fn new(route_manager: Arc<RouteManager>) -> Self {
let metrics = Arc::new(MetricsCollector::new()); let metrics = Arc::new(MetricsCollector::new());
let conn_config = ConnectionConfig::default(); let conn_config = ConnectionConfig::default();
let route_manager_swap = Arc::new(ArcSwap::from(route_manager));
let mut http_proxy_svc = HttpProxyService::with_connect_timeout( let mut http_proxy_svc = HttpProxyService::with_connect_timeout(
Arc::clone(&route_manager), Arc::clone(&route_manager_swap),
Arc::clone(&metrics), Arc::clone(&metrics),
std::time::Duration::from_millis(conn_config.connection_timeout_ms), std::time::Duration::from_millis(conn_config.connection_timeout_ms),
); );
http_proxy_svc.set_backend_tls_config(tls_handler::shared_backend_tls_config()); http_proxy_svc.set_backend_tls_config(tls_handler::shared_backend_tls_config());
http_proxy_svc.set_backend_tls_config_alpn(tls_handler::shared_backend_tls_config_alpn());
http_proxy_svc.set_connection_timeouts(
std::time::Duration::from_millis(conn_config.socket_timeout_ms),
std::time::Duration::from_millis(conn_config.socket_timeout_ms),
std::time::Duration::from_millis(conn_config.max_connection_lifetime_ms),
);
let http_proxy = Arc::new(http_proxy_svc); let http_proxy = Arc::new(http_proxy_svc);
let conn_tracker = Arc::new(ConnectionTracker::new( let conn_tracker = Arc::new(ConnectionTracker::new(
conn_config.max_connections_per_ip, conn_config.max_connections_per_ip,
@@ -163,7 +193,7 @@ impl TcpListenerManager {
let max_conns = conn_config.max_connections as usize; let max_conns = conn_config.max_connections as usize;
Self { Self {
listeners: HashMap::new(), listeners: HashMap::new(),
route_manager: Arc::new(ArcSwap::from(route_manager)), route_manager: route_manager_swap,
metrics, metrics,
tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))), tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))),
shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))), shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))),
@@ -173,18 +203,26 @@ impl TcpListenerManager {
cancel_token: CancellationToken::new(), cancel_token: CancellationToken::new(),
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)), socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
conn_semaphore: Arc::new(tokio::sync::Semaphore::new(max_conns)), conn_semaphore: Arc::new(tokio::sync::Semaphore::new(max_conns)),
route_cancels: Arc::new(DashMap::new()),
} }
} }
/// Create with a metrics collector. /// Create with a metrics collector.
pub fn with_metrics(route_manager: Arc<RouteManager>, metrics: Arc<MetricsCollector>) -> Self { pub fn with_metrics(route_manager: Arc<RouteManager>, metrics: Arc<MetricsCollector>) -> Self {
let conn_config = ConnectionConfig::default(); let conn_config = ConnectionConfig::default();
let route_manager_swap = Arc::new(ArcSwap::from(route_manager));
let mut http_proxy_svc = HttpProxyService::with_connect_timeout( let mut http_proxy_svc = HttpProxyService::with_connect_timeout(
Arc::clone(&route_manager), Arc::clone(&route_manager_swap),
Arc::clone(&metrics), Arc::clone(&metrics),
std::time::Duration::from_millis(conn_config.connection_timeout_ms), std::time::Duration::from_millis(conn_config.connection_timeout_ms),
); );
http_proxy_svc.set_backend_tls_config(tls_handler::shared_backend_tls_config()); http_proxy_svc.set_backend_tls_config(tls_handler::shared_backend_tls_config());
http_proxy_svc.set_backend_tls_config_alpn(tls_handler::shared_backend_tls_config_alpn());
http_proxy_svc.set_connection_timeouts(
std::time::Duration::from_millis(conn_config.socket_timeout_ms),
std::time::Duration::from_millis(conn_config.socket_timeout_ms),
std::time::Duration::from_millis(conn_config.max_connection_lifetime_ms),
);
let http_proxy = Arc::new(http_proxy_svc); let http_proxy = Arc::new(http_proxy_svc);
let conn_tracker = Arc::new(ConnectionTracker::new( let conn_tracker = Arc::new(ConnectionTracker::new(
conn_config.max_connections_per_ip, conn_config.max_connections_per_ip,
@@ -193,7 +231,7 @@ impl TcpListenerManager {
let max_conns = conn_config.max_connections as usize; let max_conns = conn_config.max_connections as usize;
Self { Self {
listeners: HashMap::new(), listeners: HashMap::new(),
route_manager: Arc::new(ArcSwap::from(route_manager)), route_manager: route_manager_swap,
metrics, metrics,
tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))), tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))),
shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))), shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))),
@@ -203,6 +241,7 @@ impl TcpListenerManager {
cancel_token: CancellationToken::new(), cancel_token: CancellationToken::new(),
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)), socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
conn_semaphore: Arc::new(tokio::sync::Semaphore::new(max_conns)), conn_semaphore: Arc::new(tokio::sync::Semaphore::new(max_conns)),
route_cancels: Arc::new(DashMap::new()),
} }
} }
@@ -213,6 +252,22 @@ impl TcpListenerManager {
config.connection_rate_limit_per_minute, config.connection_rate_limit_per_minute,
)); ));
self.conn_semaphore = Arc::new(tokio::sync::Semaphore::new(config.max_connections as usize)); self.conn_semaphore = Arc::new(tokio::sync::Semaphore::new(config.max_connections as usize));
// Rebuild http_proxy with updated timeouts (shares the same ArcSwap<RouteManager>)
let mut http_proxy_svc = HttpProxyService::with_connect_timeout(
Arc::clone(&self.route_manager),
Arc::clone(&self.metrics),
std::time::Duration::from_millis(config.connection_timeout_ms),
);
http_proxy_svc.set_backend_tls_config(tls_handler::shared_backend_tls_config());
http_proxy_svc.set_backend_tls_config_alpn(tls_handler::shared_backend_tls_config_alpn());
http_proxy_svc.set_connection_timeouts(
std::time::Duration::from_millis(config.socket_timeout_ms),
std::time::Duration::from_millis(config.socket_timeout_ms),
std::time::Duration::from_millis(config.max_connection_lifetime_ms),
);
self.http_proxy = Arc::new(http_proxy_svc);
self.conn_config = Arc::new(config); self.conn_config = Arc::new(config);
} }
@@ -269,12 +324,13 @@ impl TcpListenerManager {
let cancel = self.cancel_token.clone(); let cancel = self.cancel_token.clone();
let relay = Arc::clone(&self.socket_handler_relay); let relay = Arc::clone(&self.socket_handler_relay);
let semaphore = Arc::clone(&self.conn_semaphore); let semaphore = Arc::clone(&self.conn_semaphore);
let route_cancels = Arc::clone(&self.route_cancels);
let handle = tokio::spawn(async move { let handle = tokio::spawn(async move {
Self::accept_loop( Self::accept_loop(
listener, port, route_manager_swap, metrics, tls_configs, listener, port, route_manager_swap, metrics, tls_configs,
shared_tls_acceptor, http_proxy, conn_config, conn_tracker, cancel, relay, shared_tls_acceptor, http_proxy, conn_config, conn_tracker, cancel, relay,
semaphore, semaphore, route_cancels,
).await; ).await;
}); });
@@ -317,13 +373,15 @@ impl TcpListenerManager {
for (port, handle) in self.listeners.drain() { for (port, handle) in self.listeners.drain() {
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now()); let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
let abort_handle = handle.abort_handle();
if remaining.is_zero() { if remaining.is_zero() {
handle.abort(); abort_handle.abort();
warn!("Force-stopped listener on port {} (timeout exceeded)", port); warn!("Force-stopped listener on port {} (timeout exceeded)", port);
} else { } else {
match tokio::time::timeout(remaining, handle).await { match tokio::time::timeout(remaining, handle).await {
Ok(_) => info!("Listener on port {} stopped gracefully", port), Ok(_) => info!("Listener on port {} stopped gracefully", port),
Err(_) => { Err(_) => {
abort_handle.abort();
warn!("Listener on port {} did not stop in time, aborting", port); warn!("Listener on port {} did not stop in time, aborting", port);
} }
} }
@@ -351,6 +409,30 @@ impl TcpListenerManager {
self.route_manager.store(route_manager); self.route_manager.store(route_manager);
} }
/// Cancel connections on routes that no longer exist in the active set.
/// Existing connections on removed routes are terminated via their per-route CancellationToken.
pub fn invalidate_removed_routes(&self, active_route_ids: &std::collections::HashSet<String>) {
self.route_cancels.retain(|id, token| {
if active_route_ids.contains(id) {
true
} else {
info!("Cancelling connections for removed route '{}'", id);
token.cancel();
false // remove cancelled token from map
}
});
}
/// Prune HTTP proxy caches for route IDs that are no longer active.
pub fn prune_http_proxy_caches(&self, active_route_ids: &std::collections::HashSet<String>) {
self.http_proxy.prune_stale_routes(active_route_ids);
}
/// Get a reference to the connection tracker.
pub fn conn_tracker(&self) -> &Arc<ConnectionTracker> {
&self.conn_tracker
}
/// Get a reference to the metrics collector. /// Get a reference to the metrics collector.
pub fn metrics(&self) -> &Arc<MetricsCollector> { pub fn metrics(&self) -> &Arc<MetricsCollector> {
&self.metrics &self.metrics
@@ -370,6 +452,7 @@ impl TcpListenerManager {
cancel: CancellationToken, cancel: CancellationToken,
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>, socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
conn_semaphore: Arc<tokio::sync::Semaphore>, conn_semaphore: Arc<tokio::sync::Semaphore>,
route_cancels: Arc<DashMap<String, CancellationToken>>,
) { ) {
loop { loop {
tokio::select! { tokio::select! {
@@ -424,18 +507,20 @@ impl TcpListenerManager {
let ct = Arc::clone(&conn_tracker); let ct = Arc::clone(&conn_tracker);
let cn = cancel.clone(); let cn = cancel.clone();
let sr = Arc::clone(&socket_handler_relay); let sr = Arc::clone(&socket_handler_relay);
let rc = Arc::clone(&route_cancels);
debug!("Accepted connection from {} on port {}", peer_addr, port); debug!("Accepted connection from {} on port {}", peer_addr, port);
tokio::spawn(async move { tokio::spawn(async move {
// Move permit into the task — auto-releases on drop // Move permit into the task — auto-releases on drop
let _permit = permit; let _permit = permit;
// RAII guard ensures connection_closed is called on all paths
let _ct_guard = ConnectionTrackerGuard::new(ct, ip);
let result = Self::handle_connection( let result = Self::handle_connection(
stream, port, peer_addr, rm, m, tc, sa, hp, cc, cn, sr, stream, port, peer_addr, rm, m, tc, sa, hp, cc, cn, sr, rc,
).await; ).await;
if let Err(e) = result { if let Err(e) = result {
debug!("Connection error from {}: {}", peer_addr, e); debug!("Connection error from {}: {}", peer_addr, e);
} }
ct.connection_closed(&ip);
}); });
} }
Err(e) => { Err(e) => {
@@ -461,6 +546,7 @@ impl TcpListenerManager {
conn_config: Arc<ConnectionConfig>, conn_config: Arc<ConnectionConfig>,
cancel: CancellationToken, cancel: CancellationToken,
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>, socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
route_cancels: Arc<DashMap<String, CancellationToken>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
@@ -565,6 +651,14 @@ impl TcpListenerManager {
let target_port = target.port.resolve(port); let target_port = target.port.resolve(port);
let route_id = quick_match.route.id.as_deref(); let route_id = quick_match.route.id.as_deref();
// Resolve per-route cancel token (child of global cancel)
let conn_cancel = match route_id {
Some(id) => route_cancels.entry(id.to_string())
.or_insert_with(|| cancel.child_token())
.clone(),
None => cancel.clone(),
};
// Check route-level IP security // Check route-level IP security
if let Some(ref security) = quick_match.route.security { if let Some(ref security) = quick_match.route.security {
if !rustproxy_http::request_filter::RequestFilter::check_ip_security( if !rustproxy_http::request_filter::RequestFilter::check_ip_security(
@@ -619,7 +713,7 @@ impl TcpListenerManager {
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts( let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend_w, None, stream, backend_w, None,
inactivity_timeout, max_lifetime, cancel, inactivity_timeout, max_lifetime, conn_cancel,
Some(forwarder::ForwardMetricsCtx { Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics), collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()), route_id: route_id.map(|s| s.to_string()),
@@ -629,7 +723,7 @@ impl TcpListenerManager {
} else { } else {
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts( let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend, None, stream, backend, None,
inactivity_timeout, max_lifetime, cancel, inactivity_timeout, max_lifetime, conn_cancel,
Some(forwarder::ForwardMetricsCtx { Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics), collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()), route_id: route_id.map(|s| s.to_string()),
@@ -734,6 +828,16 @@ impl TcpListenerManager {
let route_id = route_match.route.id.as_deref(); let route_id = route_match.route.id.as_deref();
// Resolve per-route cancel token (child of global cancel).
// When this route is removed via updateRoutes, the token is cancelled,
// terminating all connections on this route.
let cancel = match route_id {
Some(id) => route_cancels.entry(id.to_string())
.or_insert_with(|| cancel.child_token())
.clone(),
None => cancel,
};
// Check route-level IP security for passthrough connections // Check route-level IP security for passthrough connections
if let Some(ref security) = route_match.route.security { if let Some(ref security) = route_match.route.security {
if !rustproxy_http::request_filter::RequestFilter::check_ip_security( if !rustproxy_http::request_filter::RequestFilter::check_ip_security(
@@ -761,7 +865,8 @@ impl TcpListenerManager {
stream, n, port, peer_addr, stream, n, port, peer_addr,
&route_match, domain.as_deref(), is_tls, &route_match, domain.as_deref(), is_tls,
&relay_socket_path, &relay_socket_path,
&metrics, route_id, Arc::clone(&metrics), route_id,
&conn_config, cancel.clone(),
).await; ).await;
} else { } else {
debug!("Socket-handler route matched but no relay path configured"); debug!("Socket-handler route matched but no relay path configured");
@@ -934,7 +1039,7 @@ impl TcpListenerManager {
let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts( let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts(
tls_read, tls_write, backend_read, backend_write, tls_read, tls_write, backend_read, backend_write,
inactivity_timeout, max_lifetime, inactivity_timeout, max_lifetime, cancel.clone(),
Some(forwarder::ForwardMetricsCtx { Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics), collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()), route_id: route_id.map(|s| s.to_string()),
@@ -993,7 +1098,7 @@ impl TcpListenerManager {
Self::handle_tls_reencrypt_tunnel( Self::handle_tls_reencrypt_tunnel(
buf_stream, &target_host, target_port, buf_stream, &target_host, target_port,
peer_addr, Arc::clone(&metrics), route_id, peer_addr, Arc::clone(&metrics), route_id,
&conn_config, &conn_config, cancel.clone(),
).await?; ).await?;
} }
Ok(()) Ok(())
@@ -1070,8 +1175,10 @@ impl TcpListenerManager {
domain: Option<&str>, domain: Option<&str>,
is_tls: bool, is_tls: bool,
relay_path: &str, relay_path: &str,
metrics: &MetricsCollector, metrics: Arc<MetricsCollector>,
route_id: Option<&str>, route_id: Option<&str>,
conn_config: &ConnectionConfig,
cancel: CancellationToken,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::UnixStream; use tokio::net::UnixStream;
@@ -1111,27 +1218,34 @@ impl TcpListenerManager {
// Forward initial data to the Unix socket // Forward initial data to the Unix socket
unix_stream.write_all(&initial_buf).await?; unix_stream.write_all(&initial_buf).await?;
// Bidirectional relay between TCP client and Unix socket handler // Bidirectional relay with inactivity timeout, max lifetime, and cancellation.
// Split both streams and use the same watchdog pattern as other forwarding paths.
let initial_len = initial_buf.len() as u64; let initial_len = initial_buf.len() as u64;
match tokio::io::copy_bidirectional(&mut stream, &mut unix_stream).await { let inactivity_timeout = std::time::Duration::from_millis(conn_config.socket_timeout_ms);
Ok((c2s, s2c)) => { let max_lifetime = std::time::Duration::from_millis(conn_config.max_connection_lifetime_ms);
// Include initial data bytes that were forwarded before copy_bidirectional
let total_in = c2s + initial_len; let (tcp_read, tcp_write) = stream.into_split();
debug!("Socket handler relay complete for {}: {} bytes in, {} bytes out", let (unix_read, unix_write) = unix_stream.into_split();
route_key, total_in, s2c);
let ip = peer_addr.ip().to_string(); let ip_str = peer_addr.ip().to_string();
metrics.record_bytes(total_in, s2c, route_id, Some(&ip)); let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts(
} tcp_read, tcp_write, unix_read, unix_write,
Err(e) => { inactivity_timeout, max_lifetime, cancel,
// Still record the initial data even on error Some(forwarder::ForwardMetricsCtx {
if initial_len > 0 { collector: Arc::clone(&metrics),
let ip = peer_addr.ip().to_string(); route_id: route_id.map(|s| s.to_string()),
metrics.record_bytes(initial_len, 0, route_id, Some(&ip)); source_ip: Some(ip_str.clone()),
} }),
debug!("Socket handler relay ended for {}: {}", route_key, e); ).await;
}
// Include the initial data that was forwarded before the bidirectional relay
if initial_len > 0 {
metrics.record_bytes(initial_len, 0, route_id, Some(&ip_str));
} }
debug!("Socket handler relay complete for {}: {} bytes in, {} bytes out",
route_key, _bytes_in + initial_len, _bytes_out);
Ok(()) Ok(())
} }
@@ -1146,6 +1260,7 @@ impl TcpListenerManager {
metrics: Arc<MetricsCollector>, metrics: Arc<MetricsCollector>,
route_id: Option<&str>, route_id: Option<&str>,
conn_config: &ConnectionConfig, conn_config: &ConnectionConfig,
cancel: CancellationToken,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Connect to backend over TLS with timeout // Connect to backend over TLS with timeout
let backend_tls = match tokio::time::timeout( let backend_tls = match tokio::time::timeout(
@@ -1190,7 +1305,7 @@ impl TcpListenerManager {
let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts( let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts(
client_read, client_write, backend_read, backend_write, client_read, client_write, backend_read, backend_write,
inactivity_timeout, max_lifetime, inactivity_timeout, max_lifetime, cancel,
Some(forwarder::ForwardMetricsCtx { Some(forwarder::ForwardMetricsCtx {
collector: metrics, collector: metrics,
route_id: route_id.map(|s| s.to_string()), route_id: route_id.map(|s| s.to_string()),
@@ -1265,6 +1380,7 @@ impl TcpListenerManager {
mut backend_write: W2, mut backend_write: W2,
inactivity_timeout: std::time::Duration, inactivity_timeout: std::time::Duration,
max_lifetime: std::time::Duration, max_lifetime: std::time::Duration,
cancel: CancellationToken,
metrics: Option<forwarder::ForwardMetricsCtx>, metrics: Option<forwarder::ForwardMetricsCtx>,
) -> (u64, u64) ) -> (u64, u64)
where where
@@ -1332,7 +1448,7 @@ impl TcpListenerManager {
total total
}); });
// Watchdog task: check for inactivity and max lifetime // Watchdog task: check for inactivity, max lifetime, and cancellation
let la_watch = Arc::clone(&last_activity); let la_watch = Arc::clone(&last_activity);
let c2b_handle = c2b.abort_handle(); let c2b_handle = c2b.abort_handle();
let b2c_handle = b2c.abort_handle(); let b2c_handle = b2c.abort_handle();
@@ -1340,29 +1456,37 @@ impl TcpListenerManager {
let check_interval = std::time::Duration::from_secs(5); let check_interval = std::time::Duration::from_secs(5);
let mut last_seen = 0u64; let mut last_seen = 0u64;
loop { loop {
tokio::time::sleep(check_interval).await; tokio::select! {
_ = cancel.cancelled() => {
// Check max lifetime debug!("Split-stream connection cancelled by shutdown");
if start.elapsed() >= max_lifetime {
debug!("Connection exceeded max lifetime, closing");
c2b_handle.abort();
b2c_handle.abort();
break;
}
// Check inactivity
let current = la_watch.load(Ordering::Relaxed);
if current == last_seen {
// No activity since last check
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
c2b_handle.abort(); c2b_handle.abort();
b2c_handle.abort(); b2c_handle.abort();
break; break;
} }
_ = tokio::time::sleep(check_interval) => {
// Check max lifetime
if start.elapsed() >= max_lifetime {
debug!("Connection exceeded max lifetime, closing");
c2b_handle.abort();
b2c_handle.abort();
break;
}
// Check inactivity
let current = la_watch.load(Ordering::Relaxed);
if current == last_seen {
// No activity since last check
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
c2b_handle.abort();
b2c_handle.abort();
break;
}
}
last_seen = current;
}
} }
last_seen = current;
} }
}); });
@@ -1372,3 +1496,13 @@ impl TcpListenerManager {
(bytes_in, bytes_out) (bytes_in, bytes_out)
} }
} }
/// Safety net: cancel and abort all listener tasks if dropped without graceful_stop().
impl Drop for TcpListenerManager {
fn drop(&mut self) {
self.cancel_token.cancel();
for (_, handle) in self.listeners.drain() {
handle.abort();
}
}
}

View File

@@ -98,10 +98,24 @@ pub fn build_shared_tls_acceptor(resolver: CertResolver) -> Result<TlsAcceptor,
} }
/// Build a TLS acceptor from PEM-encoded cert and key data. /// Build a TLS acceptor from PEM-encoded cert and key data.
/// Advertises both h2 and http/1.1 via ALPN (for client-facing connections).
pub fn build_tls_acceptor(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> { pub fn build_tls_acceptor(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
build_tls_acceptor_with_config(cert_pem, key_pem, None) build_tls_acceptor_with_config(cert_pem, key_pem, None)
} }
/// Build a TLS acceptor for backend servers that only speak HTTP/1.1.
/// Does NOT advertise h2 in ALPN, preventing false h2 auto-detection.
pub fn build_tls_acceptor_h1_only(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let certs = load_certs(cert_pem)?;
let key = load_private_key(key_pem)?;
let mut config = ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)?;
config.alpn_protocols = vec![b"http/1.1".to_vec()];
Ok(TlsAcceptor::from(Arc::new(config)))
}
/// Build a TLS acceptor with optional RouteTls configuration for version/cipher tuning. /// Build a TLS acceptor with optional RouteTls configuration for version/cipher tuning.
pub fn build_tls_acceptor_with_config( pub fn build_tls_acceptor_with_config(
cert_pem: &str, cert_pem: &str,
@@ -204,6 +218,25 @@ pub fn shared_backend_tls_config() -> Arc<rustls::ClientConfig> {
}).clone() }).clone()
} }
/// Get or create a shared backend TLS `ClientConfig` with ALPN `h2` + `http/1.1`.
///
/// Used for auto-detection mode: the backend server picks its preferred protocol
/// via ALPN, and the proxy reads the negotiated result to decide h1 vs h2 forwarding.
static SHARED_CLIENT_CONFIG_ALPN: OnceLock<Arc<rustls::ClientConfig>> = OnceLock::new();
pub fn shared_backend_tls_config_alpn() -> Arc<rustls::ClientConfig> {
SHARED_CLIENT_CONFIG_ALPN.get_or_init(|| {
ensure_crypto_provider();
let mut config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
.with_no_client_auth();
config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
info!("Built shared backend TLS client config with ALPN h2+http/1.1 for auto-detection");
Arc::new(config)
}).clone()
}
/// Connect to a backend with TLS (for terminate-and-reencrypt mode). /// Connect to a backend with TLS (for terminate-and-reencrypt mode).
/// Uses the shared backend TLS config for session resumption. /// Uses the shared backend TLS config for session resumption.
pub async fn connect_tls( pub async fn connect_tls(

View File

@@ -39,6 +39,7 @@ hyper = { workspace = true }
hyper-util = { workspace = true } hyper-util = { workspace = true }
http-body-util = { workspace = true } http-body-util = { workspace = true }
bytes = { workspace = true } bytes = { workspace = true }
mimalloc = { workspace = true }
[dev-dependencies] [dev-dependencies]
rcgen = { workspace = true } rcgen = { workspace = true }

View File

@@ -51,6 +51,7 @@ use rustproxy_passthrough::{TcpListenerManager, TlsCertConfig, ConnectionConfig}
use rustproxy_metrics::{MetricsCollector, Metrics, Statistics}; use rustproxy_metrics::{MetricsCollector, Metrics, Statistics};
use rustproxy_tls::{CertManager, CertStore, CertBundle, CertMetadata, CertSource}; use rustproxy_tls::{CertManager, CertStore, CertBundle, CertMetadata, CertSource};
use rustproxy_nftables::{NftManager, rule_builder}; use rustproxy_nftables::{NftManager, rule_builder};
use tokio_util::sync::CancellationToken;
/// Certificate status. /// Certificate status.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -79,6 +80,8 @@ pub struct RustProxy {
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>, socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
/// Dynamically loaded certificates (via loadCertificate IPC), independent of CertManager. /// Dynamically loaded certificates (via loadCertificate IPC), independent of CertManager.
loaded_certs: HashMap<String, TlsCertConfig>, loaded_certs: HashMap<String, TlsCertConfig>,
/// Cancellation token for cooperative shutdown of background tasks.
cancel_token: CancellationToken,
} }
impl RustProxy { impl RustProxy {
@@ -121,6 +124,7 @@ impl RustProxy {
started_at: None, started_at: None,
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)), socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
loaded_certs: HashMap::new(), loaded_certs: HashMap::new(),
cancel_token: CancellationToken::new(),
}) })
} }
@@ -299,18 +303,26 @@ impl RustProxy {
self.started = true; self.started = true;
self.started_at = Some(Instant::now()); self.started_at = Some(Instant::now());
// Start the throughput sampling task // Start the throughput sampling task with cooperative cancellation
let metrics = Arc::clone(&self.metrics); let metrics = Arc::clone(&self.metrics);
let conn_tracker = self.listener_manager.as_ref().unwrap().conn_tracker().clone();
let interval_ms = self.options.metrics.as_ref() let interval_ms = self.options.metrics.as_ref()
.and_then(|m| m.sample_interval_ms) .and_then(|m| m.sample_interval_ms)
.unwrap_or(1000); .unwrap_or(1000);
let sampling_cancel = self.cancel_token.clone();
self.sampling_handle = Some(tokio::spawn(async move { self.sampling_handle = Some(tokio::spawn(async move {
let mut interval = tokio::time::interval( let mut interval = tokio::time::interval(
std::time::Duration::from_millis(interval_ms) std::time::Duration::from_millis(interval_ms)
); );
loop { loop {
interval.tick().await; tokio::select! {
metrics.sample_all(); _ = sampling_cancel.cancelled() => break,
_ = interval.tick() => {
metrics.sample_all();
// Periodically clean up stale rate-limit timestamp entries
conn_tracker.cleanup_stale_timestamps();
}
}
} }
})); }));
@@ -457,51 +469,59 @@ impl RustProxy {
.unwrap_or(80); .unwrap_or(80);
let interval = std::time::Duration::from_secs(check_interval_hours as u64 * 3600); let interval = std::time::Duration::from_secs(check_interval_hours as u64 * 3600);
let renewal_cancel = self.cancel_token.clone();
let handle = tokio::spawn(async move { let handle = tokio::spawn(async move {
loop { loop {
tokio::time::sleep(interval).await; tokio::select! {
debug!("Certificate renewal check triggered (interval: {}h)", check_interval_hours); _ = renewal_cancel.cancelled() => {
debug!("Renewal timer shutting down");
break;
}
_ = tokio::time::sleep(interval) => {
debug!("Certificate renewal check triggered (interval: {}h)", check_interval_hours);
// Check which domains need renewal // Check which domains need renewal
let domains = { let domains = {
let cm = cm_arc.lock().await; let cm = cm_arc.lock().await;
cm.check_renewals() cm.check_renewals()
}; };
if domains.is_empty() { if domains.is_empty() {
debug!("No certificates need renewal"); debug!("No certificates need renewal");
continue; continue;
}
info!("Renewing {} certificate(s)", domains.len());
// Start challenge server for renewals
let mut cs = challenge_server::ChallengeServer::new();
if let Err(e) = cs.start(acme_port).await {
error!("Failed to start challenge server for renewal: {}", e);
continue;
}
for domain in &domains {
let cs_ref = &cs;
let mut cm = cm_arc.lock().await;
let result = cm.renew_domain(domain, |token, key_auth| {
cs_ref.set_challenge(token, key_auth);
async {}
}).await;
match result {
Ok(_bundle) => {
info!("Successfully renewed certificate for {}", domain);
} }
Err(e) => {
error!("Failed to renew certificate for {}: {}", domain, e); info!("Renewing {} certificate(s)", domains.len());
// Start challenge server for renewals
let mut cs = challenge_server::ChallengeServer::new();
if let Err(e) = cs.start(acme_port).await {
error!("Failed to start challenge server for renewal: {}", e);
continue;
} }
for domain in &domains {
let cs_ref = &cs;
let mut cm = cm_arc.lock().await;
let result = cm.renew_domain(domain, |token, key_auth| {
cs_ref.set_challenge(token, key_auth);
async {}
}).await;
match result {
Ok(_bundle) => {
info!("Successfully renewed certificate for {}", domain);
}
Err(e) => {
error!("Failed to renew certificate for {}: {}", domain, e);
}
}
}
cs.stop().await;
} }
} }
cs.stop().await;
} }
}); });
@@ -516,14 +536,17 @@ impl RustProxy {
info!("Stopping RustProxy..."); info!("Stopping RustProxy...");
// Stop sampling task // Signal all background tasks to stop cooperatively
self.cancel_token.cancel();
// Await sampling task (cooperative shutdown)
if let Some(handle) = self.sampling_handle.take() { if let Some(handle) = self.sampling_handle.take() {
handle.abort(); let _ = handle.await;
} }
// Stop renewal timer // Await renewal timer (cooperative shutdown)
if let Some(handle) = self.renewal_handle.take() { if let Some(handle) = self.renewal_handle.take() {
handle.abort(); let _ = handle.await;
} }
// Stop challenge server if running // Stop challenge server if running
@@ -545,6 +568,8 @@ impl RustProxy {
} }
self.listener_manager = None; self.listener_manager = None;
self.started = false; self.started = false;
// Reset cancel token so proxy can be restarted
self.cancel_token = CancellationToken::new();
info!("RustProxy stopped"); info!("RustProxy stopped");
Ok(()) Ok(())
@@ -578,6 +603,31 @@ impl RustProxy {
.collect(); .collect();
self.metrics.retain_routes(&active_route_ids); self.metrics.retain_routes(&active_route_ids);
// Prune per-backend metrics for backends no longer in any route target.
// For PortSpec::Preserve routes, expand across all listening ports since
// the actual runtime port depends on the incoming connection.
let listening_ports = self.get_listening_ports();
let active_backends: HashSet<String> = routes.iter()
.filter_map(|r| r.action.targets.as_ref())
.flat_map(|targets| targets.iter())
.flat_map(|target| {
let hosts: Vec<String> = target.host.to_vec().into_iter().map(|s| s.to_string()).collect();
match &target.port {
rustproxy_config::PortSpec::Fixed(p) => {
hosts.into_iter().map(|h| format!("{}:{}", h, p)).collect::<Vec<_>>()
}
_ => {
// Preserve/special: expand across all listening ports
let lp = &listening_ports;
hosts.into_iter()
.flat_map(|h| lp.iter().map(move |p| format!("{}:{}", h, *p)))
.collect::<Vec<_>>()
}
}
})
.collect();
self.metrics.retain_backends(&active_backends);
// Atomically swap the route table // Atomically swap the route table
let new_manager = Arc::new(new_manager); let new_manager = Arc::new(new_manager);
self.route_table.store(Arc::clone(&new_manager)); self.route_table.store(Arc::clone(&new_manager));
@@ -585,6 +635,10 @@ impl RustProxy {
// Update listener manager // Update listener manager
if let Some(ref mut listener) = self.listener_manager { if let Some(ref mut listener) = self.listener_manager {
listener.update_route_manager(Arc::clone(&new_manager)); listener.update_route_manager(Arc::clone(&new_manager));
// Cancel connections on routes that were removed or disabled
listener.invalidate_removed_routes(&active_route_ids);
// Prune HTTP proxy caches (rate limiters, regex cache, round-robin counters)
listener.prune_http_proxy_caches(&active_route_ids);
// Update TLS configs // Update TLS configs
let mut tls_configs = Self::extract_tls_configs(&routes); let mut tls_configs = Self::extract_tls_configs(&routes);
@@ -983,3 +1037,21 @@ impl RustProxy {
configs configs
} }
} }
/// Safety net: abort background tasks if RustProxy is dropped without calling stop().
/// Normal shutdown should still use stop() for graceful behavior.
impl Drop for RustProxy {
fn drop(&mut self) {
self.cancel_token.cancel();
if let Some(handle) = self.sampling_handle.take() {
handle.abort();
}
if let Some(handle) = self.renewal_handle.take() {
handle.abort();
}
// Cancel the listener manager's token and abort accept loops
if let Some(ref mut listener) = self.listener_manager {
listener.stop_all();
}
}
}

View File

@@ -1,3 +1,6 @@
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
use clap::Parser; use clap::Parser;
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use anyhow::Result; use anyhow::Result;

View File

@@ -195,7 +195,10 @@ pub async fn start_tls_http_backend(
) -> JoinHandle<()> { ) -> JoinHandle<()> {
use std::sync::Arc; use std::sync::Arc;
let acceptor = rustproxy_passthrough::build_tls_acceptor(cert_pem, key_pem) // Use h1-only acceptor: test backends speak raw HTTP/1.1 text,
// so they must NOT advertise h2 via ALPN (which would cause
// auto-detect to attempt h2 binary framing and fail).
let acceptor = rustproxy_passthrough::build_tls_acceptor_h1_only(cert_pem, key_pem)
.expect("Failed to build TLS acceptor"); .expect("Failed to build TLS acceptor");
let acceptor = Arc::new(acceptor); let acceptor = Arc::new(acceptor);
let name = backend_name.to_string(); let name = backend_name.to_string();

View File

@@ -0,0 +1,70 @@
import * as net from 'net';
/**
* Finds `count` free ports by binding to port 0 and reading the OS-assigned port.
* All servers are opened simultaneously to guarantee uniqueness.
* Returns an array of guaranteed-free ports.
*/
export async function findFreePorts(count: number): Promise<number[]> {
const servers: net.Server[] = [];
const ports: number[] = [];
// Open all servers simultaneously on port 0
await Promise.all(
Array.from({ length: count }, () =>
new Promise<void>((resolve, reject) => {
const server = net.createServer();
server.listen(0, '127.0.0.1', () => {
const addr = server.address() as net.AddressInfo;
ports.push(addr.port);
servers.push(server);
resolve();
});
server.on('error', reject);
})
)
);
// Close all servers
await Promise.all(
servers.map(
(server) => new Promise<void>((resolve) => server.close(() => resolve()))
)
);
return ports;
}
/**
* Verifies that all given ports are free (not listening).
* Useful as a cleanup assertion at the end of tests.
* Throws if any port is still in use.
*/
export async function assertPortsFree(ports: number[]): Promise<void> {
const results = await Promise.all(
ports.map(
(port) =>
new Promise<{ port: number; free: boolean }>((resolve) => {
const client = net.connect({ port, host: '127.0.0.1' });
client.on('connect', () => {
client.destroy();
resolve({ port, free: false });
});
client.on('error', () => {
resolve({ port, free: true });
});
client.setTimeout(1000, () => {
client.destroy();
resolve({ port, free: true });
});
})
)
);
const occupied = results.filter((r) => !r.free);
if (occupied.length > 0) {
throw new Error(
`Ports still in use after cleanup: ${occupied.map((r) => r.port).join(', ')}`
);
}
}

View File

@@ -1,9 +1,12 @@
import { tap, expect } from '@git.zone/tstest/tapbundle'; import { tap, expect } from '@git.zone/tstest/tapbundle';
import { SmartProxy, SocketHandlers } from '../ts/index.js'; import { SmartProxy, SocketHandlers } from '../ts/index.js';
import * as net from 'net'; import * as net from 'net';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test that HTTP-01 challenges are properly processed when the initial data arrives // Test that HTTP-01 challenges are properly processed when the initial data arrives
tap.test('should correctly handle HTTP-01 challenge requests with initial data chunk', async (tapTest) => { tap.test('should correctly handle HTTP-01 challenge requests with initial data chunk', async (tapTest) => {
const [PORT] = await findFreePorts(1);
// Prepare test data // Prepare test data
const challengeToken = 'test-acme-http01-challenge-token'; const challengeToken = 'test-acme-http01-challenge-token';
const challengeResponse = 'mock-response-for-challenge'; const challengeResponse = 'mock-response-for-challenge';
@@ -37,7 +40,7 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
routes: [{ routes: [{
name: 'acme-challenge-route', name: 'acme-challenge-route',
match: { match: {
ports: 47700, ports: PORT,
path: '/.well-known/acme-challenge/*' path: '/.well-known/acme-challenge/*'
}, },
action: { action: {
@@ -60,7 +63,7 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
// Connect to the proxy and send the HTTP-01 challenge request // Connect to the proxy and send the HTTP-01 challenge request
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
testClient.connect(47700, 'localhost', () => { testClient.connect(PORT, 'localhost', () => {
// Send HTTP request for the challenge token // Send HTTP request for the challenge token
testClient.write( testClient.write(
`GET ${challengePath} HTTP/1.1\r\n` + `GET ${challengePath} HTTP/1.1\r\n` +
@@ -86,10 +89,13 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
// Cleanup // Cleanup
testClient.destroy(); testClient.destroy();
await proxy.stop(); await proxy.stop();
await assertPortsFree([PORT]);
}); });
// Test that non-existent challenge tokens return 404 // Test that non-existent challenge tokens return 404
tap.test('should return 404 for non-existent challenge tokens', async (tapTest) => { tap.test('should return 404 for non-existent challenge tokens', async (tapTest) => {
const [PORT] = await findFreePorts(1);
// Create a socket handler that behaves like a real ACME handler // Create a socket handler that behaves like a real ACME handler
const acmeHandler = SocketHandlers.httpServer((req, res) => { const acmeHandler = SocketHandlers.httpServer((req, res) => {
if (req.url?.startsWith('/.well-known/acme-challenge/')) { if (req.url?.startsWith('/.well-known/acme-challenge/')) {
@@ -113,7 +119,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
routes: [{ routes: [{
name: 'acme-challenge-route', name: 'acme-challenge-route',
match: { match: {
ports: 47701, ports: PORT,
path: '/.well-known/acme-challenge/*' path: '/.well-known/acme-challenge/*'
}, },
action: { action: {
@@ -135,7 +141,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
// Connect and send a request for a non-existent token // Connect and send a request for a non-existent token
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
testClient.connect(47701, 'localhost', () => { testClient.connect(PORT, 'localhost', () => {
testClient.write( testClient.write(
'GET /.well-known/acme-challenge/invalid-token HTTP/1.1\r\n' + 'GET /.well-known/acme-challenge/invalid-token HTTP/1.1\r\n' +
'Host: test.example.com\r\n' + 'Host: test.example.com\r\n' +
@@ -157,6 +163,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
// Cleanup // Cleanup
testClient.destroy(); testClient.destroy();
await proxy.stop(); await proxy.stop();
await assertPortsFree([PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -5,6 +5,7 @@ import * as fs from 'fs';
import * as path from 'path'; import * as path from 'path';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js'; import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js'; import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Setup test infrastructure // Setup test infrastructure
const testCertPath = path.join(process.cwd(), 'test', 'helpers', 'test-cert.pem'); const testCertPath = path.join(process.cwd(), 'test', 'helpers', 'test-cert.pem');
@@ -13,8 +14,14 @@ const testKeyPath = path.join(process.cwd(), 'test', 'helpers', 'test-key.pem');
let testServer: net.Server; let testServer: net.Server;
let tlsTestServer: tls.Server; let tlsTestServer: tls.Server;
let smartProxy: SmartProxy; let smartProxy: SmartProxy;
let PROXY_TCP_PORT: number;
let PROXY_TLS_PORT: number;
let TCP_SERVER_PORT: number;
let TLS_SERVER_PORT: number;
tap.test('setup test servers', async () => { tap.test('setup test servers', async () => {
[PROXY_TCP_PORT, PROXY_TLS_PORT, TCP_SERVER_PORT, TLS_SERVER_PORT] = await findFreePorts(4);
// Create TCP test server // Create TCP test server
testServer = net.createServer((socket) => { testServer = net.createServer((socket) => {
socket.write('Connected to TCP test server\n'); socket.write('Connected to TCP test server\n');
@@ -24,8 +31,8 @@ tap.test('setup test servers', async () => {
}); });
await new Promise<void>((resolve) => { await new Promise<void>((resolve) => {
testServer.listen(47712, '127.0.0.1', () => { testServer.listen(TCP_SERVER_PORT, '127.0.0.1', () => {
console.log('TCP test server listening on port 47712'); console.log(`TCP test server listening on port ${TCP_SERVER_PORT}`);
resolve(); resolve();
}); });
}); });
@@ -45,8 +52,8 @@ tap.test('setup test servers', async () => {
); );
await new Promise<void>((resolve) => { await new Promise<void>((resolve) => {
tlsTestServer.listen(47713, '127.0.0.1', () => { tlsTestServer.listen(TLS_SERVER_PORT, '127.0.0.1', () => {
console.log('TLS test server listening on port 47713'); console.log(`TLS test server listening on port ${TLS_SERVER_PORT}`);
resolve(); resolve();
}); });
}); });
@@ -60,13 +67,13 @@ tap.test('should forward TCP connections correctly', async () => {
{ {
name: 'tcp-forward', name: 'tcp-forward',
match: { match: {
ports: 47710, ports: PROXY_TCP_PORT,
}, },
action: { action: {
type: 'forward', type: 'forward',
targets: [{ targets: [{
host: '127.0.0.1', host: '127.0.0.1',
port: 47712, port: TCP_SERVER_PORT,
}], }],
}, },
}, },
@@ -77,7 +84,7 @@ tap.test('should forward TCP connections correctly', async () => {
// Test TCP forwarding // Test TCP forwarding
const client = await new Promise<net.Socket>((resolve, reject) => { const client = await new Promise<net.Socket>((resolve, reject) => {
const socket = net.connect(47710, '127.0.0.1', () => { const socket = net.connect(PROXY_TCP_PORT, '127.0.0.1', () => {
console.log('Connected to proxy'); console.log('Connected to proxy');
resolve(socket); resolve(socket);
}); });
@@ -106,7 +113,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
{ {
name: 'tls-passthrough', name: 'tls-passthrough',
match: { match: {
ports: 47711, ports: PROXY_TLS_PORT,
domains: 'test.example.com', domains: 'test.example.com',
}, },
action: { action: {
@@ -116,7 +123,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
}, },
targets: [{ targets: [{
host: '127.0.0.1', host: '127.0.0.1',
port: 47713, port: TLS_SERVER_PORT,
}], }],
}, },
}, },
@@ -129,7 +136,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
const client = await new Promise<tls.TLSSocket>((resolve, reject) => { const client = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect( const socket = tls.connect(
{ {
port: 47711, port: PROXY_TLS_PORT,
host: '127.0.0.1', host: '127.0.0.1',
servername: 'test.example.com', servername: 'test.example.com',
rejectUnauthorized: false, rejectUnauthorized: false,
@@ -164,7 +171,7 @@ tap.test('should handle SNI-based forwarding', async () => {
{ {
name: 'domain-a', name: 'domain-a',
match: { match: {
ports: 47711, ports: PROXY_TLS_PORT,
domains: 'a.example.com', domains: 'a.example.com',
}, },
action: { action: {
@@ -174,14 +181,14 @@ tap.test('should handle SNI-based forwarding', async () => {
}, },
targets: [{ targets: [{
host: '127.0.0.1', host: '127.0.0.1',
port: 47713, port: TLS_SERVER_PORT,
}], }],
}, },
}, },
{ {
name: 'domain-b', name: 'domain-b',
match: { match: {
ports: 47711, ports: PROXY_TLS_PORT,
domains: 'b.example.com', domains: 'b.example.com',
}, },
action: { action: {
@@ -191,7 +198,7 @@ tap.test('should handle SNI-based forwarding', async () => {
}, },
targets: [{ targets: [{
host: '127.0.0.1', host: '127.0.0.1',
port: 47713, port: TLS_SERVER_PORT,
}], }],
}, },
}, },
@@ -204,7 +211,7 @@ tap.test('should handle SNI-based forwarding', async () => {
const clientA = await new Promise<tls.TLSSocket>((resolve, reject) => { const clientA = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect( const socket = tls.connect(
{ {
port: 47711, port: PROXY_TLS_PORT,
host: '127.0.0.1', host: '127.0.0.1',
servername: 'a.example.com', servername: 'a.example.com',
rejectUnauthorized: false, rejectUnauthorized: false,
@@ -231,7 +238,7 @@ tap.test('should handle SNI-based forwarding', async () => {
const clientB = await new Promise<tls.TLSSocket>((resolve, reject) => { const clientB = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect( const socket = tls.connect(
{ {
port: 47711, port: PROXY_TLS_PORT,
host: '127.0.0.1', host: '127.0.0.1',
servername: 'b.example.com', servername: 'b.example.com',
rejectUnauthorized: false, rejectUnauthorized: false,
@@ -261,6 +268,7 @@ tap.test('should handle SNI-based forwarding', async () => {
tap.test('cleanup', async () => { tap.test('cleanup', async () => {
testServer.close(); testServer.close();
tlsTestServer.close(); tlsTestServer.close();
await assertPortsFree([PROXY_TCP_PORT, PROXY_TLS_PORT, TCP_SERVER_PORT, TLS_SERVER_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -1,9 +1,12 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js'; import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test to verify port forwarding works correctly // Test to verify port forwarding works correctly
tap.test('forward connections should not be immediately closed', async (t) => { tap.test('forward connections should not be immediately closed', async (t) => {
const [PROXY_PORT, SERVER_PORT] = await findFreePorts(2);
// Create a backend server that accepts connections // Create a backend server that accepts connections
const testServer = net.createServer((socket) => { const testServer = net.createServer((socket) => {
console.log('Client connected to test server'); console.log('Client connected to test server');
@@ -21,8 +24,8 @@ tap.test('forward connections should not be immediately closed', async (t) => {
// Listen on a non-privileged port // Listen on a non-privileged port
await new Promise<void>((resolve) => { await new Promise<void>((resolve) => {
testServer.listen(47721, '127.0.0.1', () => { testServer.listen(SERVER_PORT, '127.0.0.1', () => {
console.log('Test server listening on port 47721'); console.log(`Test server listening on port ${SERVER_PORT}`);
resolve(); resolve();
}); });
}); });
@@ -34,13 +37,13 @@ tap.test('forward connections should not be immediately closed', async (t) => {
{ {
name: 'forward-test', name: 'forward-test',
match: { match: {
ports: 47720, ports: PROXY_PORT,
}, },
action: { action: {
type: 'forward', type: 'forward',
targets: [{ targets: [{
host: '127.0.0.1', host: '127.0.0.1',
port: 47721, port: SERVER_PORT,
}], }],
}, },
}, },
@@ -51,7 +54,7 @@ tap.test('forward connections should not be immediately closed', async (t) => {
// Create a client connection through the proxy // Create a client connection through the proxy
const client = net.createConnection({ const client = net.createConnection({
port: 47720, port: PROXY_PORT,
host: '127.0.0.1', host: '127.0.0.1',
}); });
@@ -105,6 +108,7 @@ tap.test('forward connections should not be immediately closed', async (t) => {
client.end(); client.end();
await smartProxy.stop(); await smartProxy.stop();
testServer.close(); testServer.close();
await assertPortsFree([PROXY_PORT, SERVER_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -1,10 +1,13 @@
import { tap, expect } from '@git.zone/tstest/tapbundle'; import { tap, expect } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/index.js'; import { SmartProxy } from '../ts/index.js';
import * as http from 'http'; import * as http from 'http';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
tap.test('should forward HTTP connections on port 8080', async (tapTest) => { tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
const [PROXY_PORT, TARGET_PORT] = await findFreePorts(2);
// Create a mock HTTP server to act as our target // Create a mock HTTP server to act as our target
const targetPort = 47732; const targetPort = TARGET_PORT;
let receivedRequest = false; let receivedRequest = false;
let receivedPath = ''; let receivedPath = '';
@@ -36,7 +39,7 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
routes: [{ routes: [{
name: 'test-route', name: 'test-route',
match: { match: {
ports: 47730 ports: PROXY_PORT
// Remove domain restriction for HTTP connections // Remove domain restriction for HTTP connections
// Domain matching happens after HTTP headers are received // Domain matching happens after HTTP headers are received
}, },
@@ -46,16 +49,16 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
} }
}] }]
}); });
await proxy.start(); await proxy.start();
// Give the proxy a moment to fully initialize // Give the proxy a moment to fully initialize
await new Promise(resolve => setTimeout(resolve, 500)); await new Promise(resolve => setTimeout(resolve, 500));
// Make an HTTP request to port 8080 // Make an HTTP request to port 8080
const options = { const options = {
hostname: 'localhost', hostname: 'localhost',
port: 47730, port: PROXY_PORT,
path: '/.well-known/acme-challenge/test-token', path: '/.well-known/acme-challenge/test-token',
method: 'GET', method: 'GET',
headers: { headers: {
@@ -97,14 +100,17 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
await new Promise<void>((resolve) => { await new Promise<void>((resolve) => {
targetServer.close(() => resolve()); targetServer.close(() => resolve());
}); });
// Wait a bit to ensure port is fully released // Wait a bit to ensure port is fully released
await new Promise(resolve => setTimeout(resolve, 500)); await new Promise(resolve => setTimeout(resolve, 500));
await assertPortsFree([PROXY_PORT, TARGET_PORT]);
}); });
tap.test('should handle basic HTTP request forwarding', async (tapTest) => { tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
const [PROXY_PORT, TARGET_PORT] = await findFreePorts(2);
// Create a simple target server // Create a simple target server
const targetPort = 47733; const targetPort = TARGET_PORT;
let receivedRequest = false; let receivedRequest = false;
const targetServer = http.createServer((req, res) => { const targetServer = http.createServer((req, res) => {
@@ -126,7 +132,7 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
routes: [{ routes: [{
name: 'simple-forward', name: 'simple-forward',
match: { match: {
ports: 47731 ports: PROXY_PORT
// Remove domain restriction for HTTP connections // Remove domain restriction for HTTP connections
}, },
action: { action: {
@@ -142,7 +148,7 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
// Make request // Make request
const options = { const options = {
hostname: 'localhost', hostname: 'localhost',
port: 47731, port: PROXY_PORT,
path: '/test', path: '/test',
method: 'GET', method: 'GET',
headers: { headers: {
@@ -184,9 +190,10 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
await new Promise<void>((resolve) => { await new Promise<void>((resolve) => {
targetServer.close(() => resolve()); targetServer.close(() => resolve());
}); });
// Wait a bit to ensure port is fully released // Wait a bit to ensure port is fully released
await new Promise(resolve => setTimeout(resolve, 500)); await new Promise(resolve => setTimeout(resolve, 500));
await assertPortsFree([PROXY_PORT, TARGET_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -2,15 +2,17 @@ import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import * as tls from 'tls'; import * as tls from 'tls';
import { SmartProxy } from '../ts/index.js'; import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let testProxy: SmartProxy; let testProxy: SmartProxy;
let targetServer: net.Server; let targetServer: net.Server;
const ECHO_PORT = 47200; let ECHO_PORT: number;
const PROXY_PORT = 47201; let PROXY_PORT: number;
// Create a simple echo server as target // Create a simple echo server as target
tap.test('setup test environment', async () => { tap.test('setup test environment', async () => {
[ECHO_PORT, PROXY_PORT] = await findFreePorts(2);
// Create target server that echoes data back // Create target server that echoes data back
targetServer = net.createServer((socket) => { targetServer = net.createServer((socket) => {
console.log('Target server: client connected'); console.log('Target server: client connected');
@@ -148,6 +150,8 @@ tap.test('cleanup', async () => {
resolve(); resolve();
}); });
}); });
await assertPortsFree([ECHO_PORT, PROXY_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -2,14 +2,16 @@ import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as plugins from '../ts/plugins.js'; import * as plugins from '../ts/plugins.js';
import { SmartProxy } from '../ts/index.js'; import { SmartProxy } from '../ts/index.js';
import * as net from 'net'; import * as net from 'net';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let smartProxyInstance: SmartProxy; let smartProxyInstance: SmartProxy;
let echoServer: net.Server; let echoServer: net.Server;
const echoServerPort = 47300; let echoServerPort: number;
const proxyPort = 47301; let proxyPort: number;
// Create an echo server for testing // Create an echo server for testing
tap.test('should create echo server for testing', async () => { tap.test('should create echo server for testing', async () => {
[echoServerPort, proxyPort] = await findFreePorts(2);
echoServer = net.createServer((socket) => { echoServer = net.createServer((socket) => {
socket.on('data', (data) => { socket.on('data', (data) => {
socket.write(data); // Echo back the data socket.write(data); // Echo back the data
@@ -267,6 +269,8 @@ tap.test('should clean up resources', async () => {
resolve(); resolve();
}); });
}); });
await assertPortsFree([echoServerPort, proxyPort]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -7,15 +7,16 @@ import * as net from 'net';
import * as tls from 'tls'; import * as tls from 'tls';
import * as fs from 'fs'; import * as fs from 'fs';
import * as path from 'path'; import * as path from 'path';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Port assignments (4760047620 range to avoid conflicts) // Port assignments (dynamically allocated to avoid conflicts)
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
const HTTP_ECHO_PORT = 47600; // backend HTTP echo server let HTTP_ECHO_PORT: number;
const PROXY_HTTP_PORT = 47601; // SmartProxy plain HTTP forwarding let PROXY_HTTP_PORT: number;
const PROXY_HTTPS_PORT = 47602; // SmartProxy TLS-terminate HTTPS forwarding let PROXY_HTTPS_PORT: number;
const TCP_ECHO_PORT = 47603; // backend TCP echo server let TCP_ECHO_PORT: number;
const PROXY_TCP_PORT = 47604; // SmartProxy plain TCP forwarding let PROXY_TCP_PORT: number;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Shared state // Shared state
@@ -88,6 +89,8 @@ async function waitForMetrics(
// 1. Setup backend servers // 1. Setup backend servers
// =========================================================================== // ===========================================================================
tap.test('setup - backend servers', async () => { tap.test('setup - backend servers', async () => {
[HTTP_ECHO_PORT, PROXY_HTTP_PORT, PROXY_HTTPS_PORT, TCP_ECHO_PORT, PROXY_TCP_PORT] = await findFreePorts(5);
// HTTP echo server: POST → echo:<body>, GET → ok // HTTP echo server: POST → echo:<body>, GET → ok
httpEchoServer = http.createServer((req, res) => { httpEchoServer = http.createServer((req, res) => {
if (req.method === 'POST') { if (req.method === 'POST') {
@@ -467,6 +470,8 @@ tap.test('cleanup', async () => {
resolve(); resolve();
}); });
}); });
await assertPortsFree([HTTP_ECHO_PORT, PROXY_HTTP_PORT, PROXY_HTTPS_PORT, TCP_ECHO_PORT, PROXY_TCP_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -1,17 +1,19 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js'; import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let echoServer: net.Server; let echoServer: net.Server;
let proxy: SmartProxy; let proxy: SmartProxy;
const ECHO_PORT = 47400; let ECHO_PORT: number;
const PROXY_PORT_1 = 47401; let PROXY_PORT_1: number;
const PROXY_PORT_2 = 47402; let PROXY_PORT_2: number;
tap.test('port forwarding should not immediately close connections', async (tools) => { tap.test('port forwarding should not immediately close connections', async (tools) => {
// Set a timeout for this test // Set a timeout for this test
tools.timeout(10000); // 10 seconds tools.timeout(10000); // 10 seconds
[ECHO_PORT, PROXY_PORT_1, PROXY_PORT_2] = await findFreePorts(3);
// Create an echo server // Create an echo server
echoServer = await new Promise<net.Server>((resolve, reject) => { echoServer = await new Promise<net.Server>((resolve, reject) => {
const server = net.createServer((socket) => { const server = net.createServer((socket) => {
@@ -96,6 +98,7 @@ tap.test('cleanup', async () => {
}); });
}); });
} }
await assertPortsFree([ECHO_PORT, PROXY_PORT_1, PROXY_PORT_2]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -9,13 +9,14 @@ import {
createPortOffset createPortOffset
} from '../ts/proxies/smart-proxy/utils/route-helpers.js'; } from '../ts/proxies/smart-proxy/utils/route-helpers.js';
import type { IRouteConfig, IRouteContext } from '../ts/proxies/smart-proxy/models/route-types.js'; import type { IRouteConfig, IRouteContext } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test server and client utilities // Test server and client utilities
let testServers: Array<{ server: net.Server; port: number }> = []; let testServers: Array<{ server: net.Server; port: number }> = [];
let smartProxy: SmartProxy; let smartProxy: SmartProxy;
const TEST_PORT_START = 47750; let TEST_PORTS: number[]; // 3 test server ports
const PROXY_PORT_START = 48750; let PROXY_PORTS: number[]; // 6 proxy ports
const TEST_DATA = 'Hello through dynamic port mapper!'; const TEST_DATA = 'Hello through dynamic port mapper!';
// Cleanup function to close all servers and proxies // Cleanup function to close all servers and proxies
@@ -101,53 +102,60 @@ function createTestClient(port: number, data: string): Promise<string> {
// Set up test environment // Set up test environment
tap.test('setup port mapping test environment', async () => { tap.test('setup port mapping test environment', async () => {
const allPorts = await findFreePorts(9);
TEST_PORTS = allPorts.slice(0, 3);
PROXY_PORTS = allPorts.slice(3, 9);
// Create multiple test servers on different ports // Create multiple test servers on different ports
await Promise.all([ await Promise.all([
createTestServer(TEST_PORT_START), // Server on port 47750 createTestServer(TEST_PORTS[0]),
createTestServer(TEST_PORT_START + 1), // Server on port 47751 createTestServer(TEST_PORTS[1]),
createTestServer(TEST_PORT_START + 2), // Server on port 47752 createTestServer(TEST_PORTS[2]),
]); ]);
// Compute dynamic offset between proxy and test ports
const portOffset = TEST_PORTS[1] - PROXY_PORTS[1];
// Create a SmartProxy with dynamic port mapping routes // Create a SmartProxy with dynamic port mapping routes
smartProxy = new SmartProxy({ smartProxy = new SmartProxy({
routes: [ routes: [
// Simple function that returns the same port (identity mapping) // Simple function that returns the same port (identity mapping)
createPortMappingRoute({ createPortMappingRoute({
sourcePortRange: PROXY_PORT_START, sourcePortRange: PROXY_PORTS[0],
targetHost: 'localhost', targetHost: 'localhost',
portMapper: (context) => TEST_PORT_START, portMapper: (context) => TEST_PORTS[0],
name: 'Identity Port Mapping' name: 'Identity Port Mapping'
}), }),
// Offset port mapping from 48751 to 47751 (offset -1000) // Offset port mapping using dynamic offset
createOffsetPortMappingRoute({ createOffsetPortMappingRoute({
ports: PROXY_PORT_START + 1, ports: PROXY_PORTS[1],
targetHost: 'localhost', targetHost: 'localhost',
offset: -1000, offset: portOffset,
name: 'Offset Port Mapping (-1000)' name: `Offset Port Mapping (${portOffset})`
}), }),
// Dynamic route with conditional port mapping // Dynamic route with conditional port mapping
createDynamicRoute({ createDynamicRoute({
ports: [PROXY_PORT_START + 2, PROXY_PORT_START + 3], ports: [PROXY_PORTS[2], PROXY_PORTS[3]],
targetHost: (context) => { targetHost: (context) => {
// Dynamic host selection based on port // Dynamic host selection based on port
return context.port === PROXY_PORT_START + 2 ? 'localhost' : '127.0.0.1'; return context.port === PROXY_PORTS[2] ? 'localhost' : '127.0.0.1';
}, },
portMapper: (context) => { portMapper: (context) => {
// Port mapping logic based on incoming port // Port mapping logic based on incoming port
if (context.port === PROXY_PORT_START + 2) { if (context.port === PROXY_PORTS[2]) {
return TEST_PORT_START; return TEST_PORTS[0];
} else { } else {
return TEST_PORT_START + 2; return TEST_PORTS[2];
} }
}, },
name: 'Dynamic Host and Port Mapping' name: 'Dynamic Host and Port Mapping'
}), }),
// Smart load balancer for domain-based routing // Smart load balancer for domain-based routing
createSmartLoadBalancer({ createSmartLoadBalancer({
ports: PROXY_PORT_START + 4, ports: PROXY_PORTS[4],
domainTargets: { domainTargets: {
'test1.example.com': 'localhost', 'test1.example.com': 'localhost',
'test2.example.com': '127.0.0.1' 'test2.example.com': '127.0.0.1'
@@ -155,9 +163,9 @@ tap.test('setup port mapping test environment', async () => {
portMapper: (context) => { portMapper: (context) => {
// Use different backend ports based on domain // Use different backend ports based on domain
if (context.domain === 'test1.example.com') { if (context.domain === 'test1.example.com') {
return TEST_PORT_START; return TEST_PORTS[0];
} else { } else {
return TEST_PORT_START + 1; return TEST_PORTS[1];
} }
}, },
defaultTarget: 'localhost', defaultTarget: 'localhost',
@@ -165,44 +173,45 @@ tap.test('setup port mapping test environment', async () => {
}) })
] ]
}); });
// Start the SmartProxy // Start the SmartProxy
await smartProxy.start(); await smartProxy.start();
}); });
// Test 1: Simple identity port mapping (48750 -> 47750) // Test 1: Simple identity port mapping
tap.test('should map port using identity function', async () => { tap.test('should map port using identity function', async () => {
const response = await createTestClient(PROXY_PORT_START, TEST_DATA); const response = await createTestClient(PROXY_PORTS[0], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START} says: ${TEST_DATA}`); expect(response).toEqual(`Server ${TEST_PORTS[0]} says: ${TEST_DATA}`);
}); });
// Test 2: Offset port mapping (48751 -> 47751) // Test 2: Offset port mapping
tap.test('should map port using offset function', async () => { tap.test('should map port using offset function', async () => {
const response = await createTestClient(PROXY_PORT_START + 1, TEST_DATA); const response = await createTestClient(PROXY_PORTS[1], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START + 1} says: ${TEST_DATA}`); expect(response).toEqual(`Server ${TEST_PORTS[1]} says: ${TEST_DATA}`);
}); });
// Test 3: Dynamic port and host mapping (conditional logic) // Test 3: Dynamic port and host mapping (conditional logic)
tap.test('should map port using dynamic logic', async () => { tap.test('should map port using dynamic logic', async () => {
const response = await createTestClient(PROXY_PORT_START + 2, TEST_DATA); const response = await createTestClient(PROXY_PORTS[2], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START} says: ${TEST_DATA}`); expect(response).toEqual(`Server ${TEST_PORTS[0]} says: ${TEST_DATA}`);
}); });
// Test 4: Test reuse of createPortOffset helper // Test 4: Test reuse of createPortOffset helper
tap.test('should use createPortOffset helper for port mapping', async () => { tap.test('should use createPortOffset helper for port mapping', async () => {
// Test the createPortOffset helper // Test the createPortOffset helper with dynamic offset
const offsetFn = createPortOffset(-1000); const portOffset = TEST_PORTS[1] - PROXY_PORTS[1];
const offsetFn = createPortOffset(portOffset);
const context = { const context = {
port: PROXY_PORT_START + 1, port: PROXY_PORTS[1],
clientIp: '127.0.0.1', clientIp: '127.0.0.1',
serverIp: '127.0.0.1', serverIp: '127.0.0.1',
isTls: false, isTls: false,
timestamp: Date.now(), timestamp: Date.now(),
connectionId: 'test-connection' connectionId: 'test-connection'
} as IRouteContext; } as IRouteContext;
const mappedPort = offsetFn(context); const mappedPort = offsetFn(context);
expect(mappedPort).toEqual(TEST_PORT_START + 1); expect(mappedPort).toEqual(TEST_PORTS[1]);
}); });
// Test 5: Test error handling for invalid port mapping functions // Test 5: Test error handling for invalid port mapping functions
@@ -210,7 +219,7 @@ tap.test('should handle errors in port mapping functions', async () => {
// Create a route with a function that throws an error // Create a route with a function that throws an error
const errorRoute: IRouteConfig = { const errorRoute: IRouteConfig = {
match: { match: {
ports: PROXY_PORT_START + 5 ports: PROXY_PORTS[5]
}, },
action: { action: {
type: 'forward', type: 'forward',
@@ -229,7 +238,7 @@ tap.test('should handle errors in port mapping functions', async () => {
// The connection should fail or timeout // The connection should fail or timeout
try { try {
await createTestClient(PROXY_PORT_START + 5, TEST_DATA); await createTestClient(PROXY_PORTS[5], TEST_DATA);
// Connection should not succeed // Connection should not succeed
expect(false).toBeTrue(); expect(false).toBeTrue();
} catch (error) { } catch (error) {
@@ -254,6 +263,8 @@ tap.test('cleanup port mapping test environment', async () => {
testServers = []; testServers = [];
smartProxy = null as any; smartProxy = null as any;
} }
await assertPortsFree([...TEST_PORTS, ...PROXY_PORTS]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -1,11 +1,19 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/index.js'; import { SmartProxy } from '../ts/proxies/smart-proxy/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let testServer: net.Server; let testServer: net.Server;
let smartProxy: SmartProxy; let smartProxy: SmartProxy;
const TEST_SERVER_PORT = 47770; let TEST_SERVER_PORT: number;
const PROXY_PORT = 47771; let PROXY_PORT: number;
let CUSTOM_HOST_PORT: number;
let CUSTOM_IP_PROXY_PORT: number;
let CUSTOM_IP_TARGET_PORT: number;
let CHAIN_DEFAULT_1_PORT: number;
let CHAIN_DEFAULT_2_PORT: number;
let CHAIN_PRESERVED_1_PORT: number;
let CHAIN_PRESERVED_2_PORT: number;
const TEST_DATA = 'Hello through port proxy!'; const TEST_DATA = 'Hello through port proxy!';
// Track all created servers and proxies for proper cleanup // Track all created servers and proxies for proper cleanup
@@ -64,6 +72,7 @@ function createTestClient(port: number, data: string): Promise<string> {
// SETUP: Create a test server and a PortProxy instance. // SETUP: Create a test server and a PortProxy instance.
tap.test('setup port proxy test environment', async () => { tap.test('setup port proxy test environment', async () => {
[TEST_SERVER_PORT, PROXY_PORT, CUSTOM_HOST_PORT, CUSTOM_IP_PROXY_PORT, CUSTOM_IP_TARGET_PORT, CHAIN_DEFAULT_1_PORT, CHAIN_DEFAULT_2_PORT, CHAIN_PRESERVED_1_PORT, CHAIN_PRESERVED_2_PORT] = await findFreePorts(9);
testServer = await createTestServer(TEST_SERVER_PORT); testServer = await createTestServer(TEST_SERVER_PORT);
smartProxy = new SmartProxy({ smartProxy = new SmartProxy({
routes: [ routes: [
@@ -110,7 +119,7 @@ tap.test('should forward TCP connections to custom host', async () => {
{ {
name: 'custom-host-route', name: 'custom-host-route',
match: { match: {
ports: PROXY_PORT + 1 ports: CUSTOM_HOST_PORT
}, },
action: { action: {
type: 'forward', type: 'forward',
@@ -128,9 +137,9 @@ tap.test('should forward TCP connections to custom host', async () => {
} }
}); });
allProxies.push(customHostProxy); // Track this proxy allProxies.push(customHostProxy); // Track this proxy
await customHostProxy.start(); await customHostProxy.start();
const response = await createTestClient(PROXY_PORT + 1, TEST_DATA); const response = await createTestClient(CUSTOM_HOST_PORT, TEST_DATA);
expect(response).toEqual(`Echo: ${TEST_DATA}`); expect(response).toEqual(`Echo: ${TEST_DATA}`);
await customHostProxy.stop(); await customHostProxy.stop();
@@ -143,8 +152,8 @@ tap.test('should forward TCP connections to custom host', async () => {
// Modified to work in Docker/CI environments without needing 127.0.0.2 // Modified to work in Docker/CI environments without needing 127.0.0.2
tap.test('should forward connections to custom IP', async () => { tap.test('should forward connections to custom IP', async () => {
// Set up ports that are FAR apart to avoid any possible confusion // Set up ports that are FAR apart to avoid any possible confusion
const forcedProxyPort = PROXY_PORT + 2; // 4003 - The port that our proxy listens on const forcedProxyPort = CUSTOM_IP_PROXY_PORT;
const targetServerPort = TEST_SERVER_PORT + 200; // 4200 - Target test server on different port const targetServerPort = CUSTOM_IP_TARGET_PORT;
// Create a test server listening on a unique port on 127.0.0.1 (works in all environments) // Create a test server listening on a unique port on 127.0.0.1 (works in all environments)
const testServer2 = await createTestServer(targetServerPort, '127.0.0.1'); const testServer2 = await createTestServer(targetServerPort, '127.0.0.1');
@@ -252,13 +261,13 @@ tap.test('should support optional source IP preservation in chained proxies', as
{ {
name: 'first-proxy-default-route', name: 'first-proxy-default-route',
match: { match: {
ports: PROXY_PORT + 4 ports: CHAIN_DEFAULT_1_PORT
}, },
action: { action: {
type: 'forward', type: 'forward',
targets: [{ targets: [{
host: 'localhost', host: 'localhost',
port: PROXY_PORT + 5 port: CHAIN_DEFAULT_2_PORT
}] }]
} }
} }
@@ -274,7 +283,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
{ {
name: 'second-proxy-default-route', name: 'second-proxy-default-route',
match: { match: {
ports: PROXY_PORT + 5 ports: CHAIN_DEFAULT_2_PORT
}, },
action: { action: {
type: 'forward', type: 'forward',
@@ -296,7 +305,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
await secondProxyDefault.start(); await secondProxyDefault.start();
await firstProxyDefault.start(); await firstProxyDefault.start();
const response1 = await createTestClient(PROXY_PORT + 4, TEST_DATA); const response1 = await createTestClient(CHAIN_DEFAULT_1_PORT, TEST_DATA);
expect(response1).toEqual(`Echo: ${TEST_DATA}`); expect(response1).toEqual(`Echo: ${TEST_DATA}`);
await firstProxyDefault.stop(); await firstProxyDefault.stop();
await secondProxyDefault.stop(); await secondProxyDefault.stop();
@@ -313,13 +322,13 @@ tap.test('should support optional source IP preservation in chained proxies', as
{ {
name: 'first-proxy-preserved-route', name: 'first-proxy-preserved-route',
match: { match: {
ports: PROXY_PORT + 6 ports: CHAIN_PRESERVED_1_PORT
}, },
action: { action: {
type: 'forward', type: 'forward',
targets: [{ targets: [{
host: 'localhost', host: 'localhost',
port: PROXY_PORT + 7 port: CHAIN_PRESERVED_2_PORT
}] }]
} }
} }
@@ -337,7 +346,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
{ {
name: 'second-proxy-preserved-route', name: 'second-proxy-preserved-route',
match: { match: {
ports: PROXY_PORT + 7 ports: CHAIN_PRESERVED_2_PORT
}, },
action: { action: {
type: 'forward', type: 'forward',
@@ -361,7 +370,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
await secondProxyPreserved.start(); await secondProxyPreserved.start();
await firstProxyPreserved.start(); await firstProxyPreserved.start();
const response2 = await createTestClient(PROXY_PORT + 6, TEST_DATA); const response2 = await createTestClient(CHAIN_PRESERVED_1_PORT, TEST_DATA);
expect(response2).toEqual(`Echo: ${TEST_DATA}`); expect(response2).toEqual(`Echo: ${TEST_DATA}`);
await firstProxyPreserved.stop(); await firstProxyPreserved.stop();
await secondProxyPreserved.stop(); await secondProxyPreserved.stop();
@@ -446,6 +455,8 @@ tap.test('cleanup port proxy test environment', async () => {
// Verify all resources are cleaned up // Verify all resources are cleaned up
expect(allProxies.length).toEqual(0); expect(allProxies.length).toEqual(0);
expect(allServers.length).toEqual(0); expect(allServers.length).toEqual(0);
await assertPortsFree([TEST_SERVER_PORT, PROXY_PORT, CUSTOM_HOST_PORT, CUSTOM_IP_PROXY_PORT, CUSTOM_IP_TARGET_PORT, CHAIN_DEFAULT_1_PORT, CHAIN_DEFAULT_2_PORT, CHAIN_PRESERVED_1_PORT, CHAIN_PRESERVED_2_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -7,10 +7,15 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/proxies/smart-proxy/index.js'; import { SmartProxy } from '../ts/proxies/smart-proxy/index.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js'; import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts } from './helpers/port-allocator.js';
// Use unique high ports for each test to avoid conflicts let testPorts: number[];
let testPort = 20000; let portIndex = 0;
const getNextPort = () => testPort++; const getNextPort = () => testPorts[portIndex++];
tap.test('setup - allocate ports', async () => {
testPorts = await findFreePorts(16);
});
// --------------------------------- Single Route, No Domain Restriction --------------------------------- // --------------------------------- Single Route, No Domain Restriction ---------------------------------

View File

@@ -1,12 +1,15 @@
import { expect, tap } from '@git.zone/tstest/tapbundle'; import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import { SmartProxy } from '../ts/index.js'; import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
tap.test('should handle async handler that sets up listeners after delay', async () => { tap.test('should handle async handler that sets up listeners after delay', async () => {
const [PORT] = await findFreePorts(1);
const proxy = new SmartProxy({ const proxy = new SmartProxy({
routes: [{ routes: [{
name: 'delayed-setup-handler', name: 'delayed-setup-handler',
match: { ports: 7777 }, match: { ports: PORT },
action: { action: {
type: 'socket-handler', type: 'socket-handler',
socketHandler: async (socket, context) => { socketHandler: async (socket, context) => {
@@ -41,7 +44,7 @@ tap.test('should handle async handler that sets up listeners after delay', async
}); });
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
client.connect(7777, 'localhost', () => { client.connect(PORT, 'localhost', () => {
// Send initial data immediately - this tests the race condition // Send initial data immediately - this tests the race condition
client.write('initial-message\n'); client.write('initial-message\n');
resolve(); resolve();
@@ -78,6 +81,7 @@ tap.test('should handle async handler that sets up listeners after delay', async
expect(response).toContain('RECEIVED: test-message'); expect(response).toContain('RECEIVED: test-message');
await proxy.stop(); await proxy.stop();
await assertPortsFree([PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -2,15 +2,19 @@ import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net'; import * as net from 'net';
import { SmartProxy } from '../ts/index.js'; import { SmartProxy } from '../ts/index.js';
import type { IRouteConfig } from '../ts/index.js'; import type { IRouteConfig } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let proxy: SmartProxy; let proxy: SmartProxy;
let PORT: number;
tap.test('setup socket handler test', async () => { tap.test('setup socket handler test', async () => {
[PORT] = await findFreePorts(1);
// Create a simple socket handler route // Create a simple socket handler route
const routes: IRouteConfig[] = [{ const routes: IRouteConfig[] = [{
name: 'echo-handler', name: 'echo-handler',
match: { match: {
ports: 47780 ports: PORT
// No domains restriction - matches all connections // No domains restriction - matches all connections
}, },
action: { action: {
@@ -43,11 +47,11 @@ tap.test('should handle socket with custom function', async () => {
let response = ''; let response = '';
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
client.connect(47780, 'localhost', () => { client.connect(PORT, 'localhost', () => {
console.log('Client connected to proxy'); console.log('Client connected to proxy');
resolve(); resolve();
}); });
client.on('error', reject); client.on('error', reject);
}); });
@@ -78,7 +82,7 @@ tap.test('should handle async socket handler', async () => {
// Update route with async handler // Update route with async handler
await proxy.updateRoutes([{ await proxy.updateRoutes([{
name: 'async-handler', name: 'async-handler',
match: { ports: 47780 }, match: { ports: PORT },
action: { action: {
type: 'socket-handler', type: 'socket-handler',
socketHandler: async (socket, context) => { socketHandler: async (socket, context) => {
@@ -108,12 +112,12 @@ tap.test('should handle async socket handler', async () => {
}); });
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
client.connect(47780, 'localhost', () => { client.connect(PORT, 'localhost', () => {
// Send initial data to trigger the handler // Send initial data to trigger the handler
client.write('test data\n'); client.write('test data\n');
resolve(); resolve();
}); });
client.on('error', reject); client.on('error', reject);
}); });
@@ -131,7 +135,7 @@ tap.test('should handle errors in socket handler', async () => {
// Update route with error-throwing handler // Update route with error-throwing handler
await proxy.updateRoutes([{ await proxy.updateRoutes([{
name: 'error-handler', name: 'error-handler',
match: { ports: 47780 }, match: { ports: PORT },
action: { action: {
type: 'socket-handler', type: 'socket-handler',
socketHandler: (socket, context) => { socketHandler: (socket, context) => {
@@ -148,12 +152,12 @@ tap.test('should handle errors in socket handler', async () => {
}); });
await new Promise<void>((resolve, reject) => { await new Promise<void>((resolve, reject) => {
client.connect(47780, 'localhost', () => { client.connect(PORT, 'localhost', () => {
// Connection established - send data to trigger handler // Connection established - send data to trigger handler
client.write('trigger\n'); client.write('trigger\n');
resolve(); resolve();
}); });
client.on('error', () => { client.on('error', () => {
// Ignore client errors - we expect the connection to be closed // Ignore client errors - we expect the connection to be closed
}); });
@@ -168,6 +172,7 @@ tap.test('should handle errors in socket handler', async () => {
tap.test('cleanup', async () => { tap.test('cleanup', async () => {
await proxy.stop(); await proxy.stop();
await assertPortsFree([PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -8,24 +8,25 @@ import * as https from 'https';
import * as fs from 'fs'; import * as fs from 'fs';
import * as path from 'path'; import * as path from 'path';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename); const __dirname = path.dirname(__filename);
// ──────────────────────────────────────────────────────────────────────────── // ────────────────────────────────────────────────────────────────────────────
// Port assignments (unique to avoid conflicts with other tests) // Port assignments (dynamically allocated to avoid conflicts)
// ──────────────────────────────────────────────────────────────────────────── // ────────────────────────────────────────────────────────────────────────────
const TCP_ECHO_PORT = 47500; let TCP_ECHO_PORT: number;
const HTTP_ECHO_PORT = 47501; let HTTP_ECHO_PORT: number;
const TLS_ECHO_PORT = 47502; let TLS_ECHO_PORT: number;
const PROXY_TCP_PORT = 47510; let PROXY_TCP_PORT: number;
const PROXY_HTTP_PORT = 47511; let PROXY_HTTP_PORT: number;
const PROXY_TLS_PASS_PORT = 47512; let PROXY_TLS_PASS_PORT: number;
const PROXY_TLS_TERM_PORT = 47513; let PROXY_TLS_TERM_PORT: number;
const PROXY_SOCKET_PORT = 47514; let PROXY_SOCKET_PORT: number;
const PROXY_MULTI_A_PORT = 47515; let PROXY_MULTI_A_PORT: number;
const PROXY_MULTI_B_PORT = 47516; let PROXY_MULTI_B_PORT: number;
const PROXY_TP_HTTP_PORT = 47517; let PROXY_TP_HTTP_PORT: number;
// ──────────────────────────────────────────────────────────────────────────── // ────────────────────────────────────────────────────────────────────────────
// Test certificates // Test certificates
@@ -49,6 +50,8 @@ async function pollMetrics(proxy: SmartProxy): Promise<void> {
// Setup: backend servers // Setup: backend servers
// ════════════════════════════════════════════════════════════════════════════ // ════════════════════════════════════════════════════════════════════════════
tap.test('setup - TCP echo server', async () => { tap.test('setup - TCP echo server', async () => {
[TCP_ECHO_PORT, HTTP_ECHO_PORT, TLS_ECHO_PORT, PROXY_TCP_PORT, PROXY_HTTP_PORT, PROXY_TLS_PASS_PORT, PROXY_TLS_TERM_PORT, PROXY_SOCKET_PORT, PROXY_MULTI_A_PORT, PROXY_MULTI_B_PORT, PROXY_TP_HTTP_PORT] = await findFreePorts(11);
tcpEchoServer = net.createServer((socket) => { tcpEchoServer = net.createServer((socket) => {
socket.on('data', (data) => socket.write(data)); socket.on('data', (data) => socket.write(data));
socket.on('error', () => {}); socket.on('error', () => {});
@@ -700,6 +703,7 @@ tap.test('cleanup - close backend servers', async () => {
await new Promise<void>((resolve) => httpEchoServer.close(() => resolve())); await new Promise<void>((resolve) => httpEchoServer.close(() => resolve()));
await new Promise<void>((resolve) => tlsEchoServer.close(() => resolve())); await new Promise<void>((resolve) => tlsEchoServer.close(() => resolve()));
console.log('All backend servers closed'); console.log('All backend servers closed');
await assertPortsFree([TCP_ECHO_PORT, HTTP_ECHO_PORT, TLS_ECHO_PORT, PROXY_TCP_PORT, PROXY_HTTP_PORT, PROXY_TLS_PASS_PORT, PROXY_TLS_TERM_PORT, PROXY_SOCKET_PORT, PROXY_MULTI_A_PORT, PROXY_MULTI_B_PORT, PROXY_TP_HTTP_PORT]);
}); });
export default tap.start(); export default tap.start();

View File

@@ -3,6 +3,6 @@
*/ */
export const commitinfo = { export const commitinfo = {
name: '@push.rocks/smartproxy', name: '@push.rocks/smartproxy',
version: '25.7.9', version: '25.10.3',
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.' description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
} }

View File

@@ -354,17 +354,17 @@ export class LogDeduplicator {
// Global instance for connection-related log deduplication // Global instance for connection-related log deduplication
export const connectionLogDeduplicator = new LogDeduplicator(5000); // 5 second batches export const connectionLogDeduplicator = new LogDeduplicator(5000); // 5 second batches
// Ensure logs are flushed on process exit // Ensure logs are flushed on process exit.
// Only use beforeExit — do NOT call process.exit() from SIGINT/SIGTERM handlers
// as that kills the host process's graceful shutdown (e.g., dcrouter connection draining).
process.on('beforeExit', () => { process.on('beforeExit', () => {
connectionLogDeduplicator.flushAll(); connectionLogDeduplicator.flushAll();
}); });
process.on('SIGINT', () => { process.on('SIGINT', () => {
connectionLogDeduplicator.cleanup(); connectionLogDeduplicator.cleanup();
process.exit(0);
}); });
process.on('SIGTERM', () => { process.on('SIGTERM', () => {
connectionLogDeduplicator.cleanup(); connectionLogDeduplicator.cleanup();
process.exit(0);
}); });

View File

@@ -18,8 +18,8 @@ export class ProtocolDetector {
private fragmentManager: DetectionFragmentManager; private fragmentManager: DetectionFragmentManager;
private tlsDetector: TlsDetector; private tlsDetector: TlsDetector;
private httpDetector: HttpDetector; private httpDetector: HttpDetector;
private connectionProtocols: Map<string, 'tls' | 'http'> = new Map(); private connectionProtocols: Map<string, { protocol: 'tls' | 'http'; createdAt: number }> = new Map();
constructor() { constructor() {
this.fragmentManager = new DetectionFragmentManager(); this.fragmentManager = new DetectionFragmentManager();
this.tlsDetector = new TlsDetector(); this.tlsDetector = new TlsDetector();
@@ -124,8 +124,9 @@ export class ProtocolDetector {
const connectionId = DetectionFragmentManager.createConnectionId(context); const connectionId = DetectionFragmentManager.createConnectionId(context);
// Check if we already know the protocol for this connection // Check if we already know the protocol for this connection
const knownProtocol = this.connectionProtocols.get(connectionId); const knownEntry = this.connectionProtocols.get(connectionId);
const knownProtocol = knownEntry?.protocol;
if (knownProtocol === 'http') { if (knownProtocol === 'http') {
const result = this.httpDetector.detectWithContext(buffer, context, options); const result = this.httpDetector.detectWithContext(buffer, context, options);
if (result) { if (result) {
@@ -163,7 +164,7 @@ export class ProtocolDetector {
if (!knownProtocol) { if (!knownProtocol) {
// First peek to determine protocol type // First peek to determine protocol type
if (this.tlsDetector.canHandle(buffer)) { if (this.tlsDetector.canHandle(buffer)) {
this.connectionProtocols.set(connectionId, 'tls'); this.connectionProtocols.set(connectionId, { protocol: 'tls', createdAt: Date.now() });
// Handle TLS with fragment accumulation // Handle TLS with fragment accumulation
const handler = this.fragmentManager.getHandler('tls'); const handler = this.fragmentManager.getHandler('tls');
const fragmentResult = handler.addFragment(connectionId, buffer); const fragmentResult = handler.addFragment(connectionId, buffer);
@@ -189,7 +190,7 @@ export class ProtocolDetector {
} }
if (this.httpDetector.canHandle(buffer)) { if (this.httpDetector.canHandle(buffer)) {
this.connectionProtocols.set(connectionId, 'http'); this.connectionProtocols.set(connectionId, { protocol: 'http', createdAt: Date.now() });
const result = this.httpDetector.detectWithContext(buffer, context, options); const result = this.httpDetector.detectWithContext(buffer, context, options);
if (result) { if (result) {
if (result.isComplete) { if (result.isComplete) {
@@ -221,6 +222,14 @@ export class ProtocolDetector {
private cleanupInstance(): void { private cleanupInstance(): void {
this.fragmentManager.cleanup(); this.fragmentManager.cleanup();
// Remove stale connectionProtocols entries (abandoned handshakes, port scanners)
const maxAge = 30_000; // 30 seconds
const now = Date.now();
for (const [id, entry] of this.connectionProtocols) {
if (now - entry.createdAt > maxAge) {
this.connectionProtocols.delete(id);
}
}
} }
/** /**
@@ -242,8 +251,7 @@ export class ProtocolDetector {
* @param _maxAge Maximum age in milliseconds (default: 30 seconds) * @param _maxAge Maximum age in milliseconds (default: 30 seconds)
*/ */
static cleanupConnections(_maxAge: number = 30000): void { static cleanupConnections(_maxAge: number = 30000): void {
// Cleanup is now handled internally by the fragment manager this.getInstance().cleanupInstance();
this.getInstance().fragmentManager.cleanup();
} }
/** /**

View File

@@ -112,12 +112,12 @@ export interface ISmartProxyOptions {
maxVersion?: string; maxVersion?: string;
// Timeout settings // Timeout settings
connectionTimeout?: number; // Timeout for establishing connection to backend (ms), default: 30000 (30s) connectionTimeout?: number; // Timeout for establishing connection to backend (ms), default: 60000 (60s)
initialDataTimeout?: number; // Timeout for initial data/SNI (ms), default: 60000 (60s) initialDataTimeout?: number; // Timeout for initial data/SNI (ms), default: 60000 (60s)
socketTimeout?: number; // Socket inactivity timeout (ms), default: 3600000 (1h) socketTimeout?: number; // Socket inactivity timeout (ms), default: 60000 (60s)
inactivityCheckInterval?: number; // How often to check for inactive connections (ms), default: 60000 (60s) inactivityCheckInterval?: number; // How often to check for inactive connections (ms), default: 60000 (60s)
maxConnectionLifetime?: number; // Default max connection lifetime (ms), default: 86400000 (24h) maxConnectionLifetime?: number; // Max connection lifetime (ms), default: 3600000 (1h)
inactivityTimeout?: number; // Inactivity timeout (ms), default: 14400000 (4h) inactivityTimeout?: number; // Inactivity timeout (ms), default: 75000 (75s)
gracefulShutdownTimeout?: number; // (ms) maximum time to wait for connections to close during shutdown gracefulShutdownTimeout?: number; // (ms) maximum time to wait for connections to close during shutdown

View File

@@ -67,6 +67,13 @@ export interface IMetrics {
connections(): number; connections(): number;
}; };
// Backend metrics
backends: {
byBackend(): Map<string, IBackendMetrics>;
protocols(): Map<string, string>;
topByErrors(limit?: number): Array<{ backend: string; errors: number }>;
};
// Performance metrics // Performance metrics
percentiles: { percentiles: {
connectionDuration(): { p50: number; p95: number; p99: number }; connectionDuration(): { p50: number; p95: number; p99: number };
@@ -98,6 +105,21 @@ export interface IMetricsConfig {
prometheusPrefix: string; // Default: smartproxy_ prometheusPrefix: string; // Default: smartproxy_
} }
/**
* Per-backend metrics
*/
export interface IBackendMetrics {
protocol: string;
activeConnections: number;
totalConnections: number;
connectErrors: number;
handshakeErrors: number;
requestErrors: number;
avgConnectTimeMs: number;
poolHitRate: number;
h2Failures: number;
}
/** /**
* Internal interface for connection byte tracking * Internal interface for connection byte tracking
*/ */

View File

@@ -262,7 +262,7 @@ export interface IRouteAction {
// Additional options for backend-specific settings // Additional options for backend-specific settings
options?: { options?: {
backendProtocol?: 'http1' | 'http2'; backendProtocol?: 'http1' | 'http2' | 'auto';
[key: string]: any; [key: string]: any;
}; };

View File

@@ -1,4 +1,4 @@
import type { IMetrics, IThroughputData, IThroughputHistoryPoint } from './models/metrics-types.js'; import type { IMetrics, IBackendMetrics, IThroughputData, IThroughputHistoryPoint } from './models/metrics-types.js';
import type { RustProxyBridge } from './rust-proxy-bridge.js'; import type { RustProxyBridge } from './rust-proxy-bridge.js';
/** /**
@@ -169,6 +169,55 @@ export class RustMetricsAdapter implements IMetrics {
}, },
}; };
public backends = {
byBackend: (): Map<string, IBackendMetrics> => {
const result = new Map<string, IBackendMetrics>();
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
const m = bm as any;
const totalTimeUs = m.totalConnectTimeUs ?? 0;
const count = m.connectCount ?? 0;
const poolHits = m.poolHits ?? 0;
const poolMisses = m.poolMisses ?? 0;
const poolTotal = poolHits + poolMisses;
result.set(key, {
protocol: m.protocol ?? 'unknown',
activeConnections: m.activeConnections ?? 0,
totalConnections: m.totalConnections ?? 0,
connectErrors: m.connectErrors ?? 0,
handshakeErrors: m.handshakeErrors ?? 0,
requestErrors: m.requestErrors ?? 0,
avgConnectTimeMs: count > 0 ? (totalTimeUs / count) / 1000 : 0,
poolHitRate: poolTotal > 0 ? poolHits / poolTotal : 0,
h2Failures: m.h2Failures ?? 0,
});
}
}
return result;
},
protocols: (): Map<string, string> => {
const result = new Map<string, string>();
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
result.set(key, (bm as any).protocol ?? 'unknown');
}
}
return result;
},
topByErrors: (limit: number = 10): Array<{ backend: string; errors: number }> => {
const result: Array<{ backend: string; errors: number }> = [];
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
const m = bm as any;
const errors = (m.connectErrors ?? 0) + (m.handshakeErrors ?? 0) + (m.requestErrors ?? 0);
if (errors > 0) result.push({ backend: key, errors });
}
}
result.sort((a, b) => b.errors - a.errors);
return result.slice(0, limit);
},
};
public percentiles = { public percentiles = {
connectionDuration: (): { p50: number; p95: number; p99: number } => { connectionDuration: (): { p50: number; p95: number; p99: number } => {
return { p50: 0, p95: 0, p99: 0 }; return { p50: 0, p95: 0, p99: 0 };

View File

@@ -47,16 +47,16 @@ export class SmartProxy extends plugins.EventEmitter {
// Apply defaults // Apply defaults
this.settings = { this.settings = {
...settingsArg, ...settingsArg,
initialDataTimeout: settingsArg.initialDataTimeout || 120000, initialDataTimeout: settingsArg.initialDataTimeout || 60_000,
socketTimeout: settingsArg.socketTimeout || 3600000, socketTimeout: settingsArg.socketTimeout || 60_000,
maxConnectionLifetime: settingsArg.maxConnectionLifetime || 86400000, maxConnectionLifetime: settingsArg.maxConnectionLifetime || 3_600_000,
inactivityTimeout: settingsArg.inactivityTimeout || 14400000, inactivityTimeout: settingsArg.inactivityTimeout || 75_000,
gracefulShutdownTimeout: settingsArg.gracefulShutdownTimeout || 30000, gracefulShutdownTimeout: settingsArg.gracefulShutdownTimeout || 30_000,
maxConnectionsPerIP: settingsArg.maxConnectionsPerIP || 100, maxConnectionsPerIP: settingsArg.maxConnectionsPerIP || 100,
connectionRateLimitPerMinute: settingsArg.connectionRateLimitPerMinute || 300, connectionRateLimitPerMinute: settingsArg.connectionRateLimitPerMinute || 300,
keepAliveTreatment: settingsArg.keepAliveTreatment || 'extended', keepAliveTreatment: settingsArg.keepAliveTreatment || 'standard',
keepAliveInactivityMultiplier: settingsArg.keepAliveInactivityMultiplier || 6, keepAliveInactivityMultiplier: settingsArg.keepAliveInactivityMultiplier || 4,
extendedKeepAliveLifetime: settingsArg.extendedKeepAliveLifetime || 7 * 24 * 60 * 60 * 1000, extendedKeepAliveLifetime: settingsArg.extendedKeepAliveLifetime || 3_600_000,
}; };
// Normalize ACME options // Normalize ACME options

View File

@@ -92,6 +92,16 @@ export class SocketHandlerServer {
let metadataBuffer = ''; let metadataBuffer = '';
let metadataParsed = false; let metadataParsed = false;
// 10s timeout for metadata parsing phase — if Rust connects but never
// sends the JSON metadata line, don't hold the socket open indefinitely.
socket.setTimeout(10_000);
socket.on('timeout', () => {
if (!metadataParsed) {
logger.log('warn', 'Socket handler metadata timeout, closing', { component: 'socket-handler-server' });
socket.destroy();
}
});
const onData = (chunk: Buffer) => { const onData = (chunk: Buffer) => {
if (metadataParsed) return; if (metadataParsed) return;
@@ -108,6 +118,7 @@ export class SocketHandlerServer {
} }
metadataParsed = true; metadataParsed = true;
socket.setTimeout(0); // Clear metadata timeout
socket.removeListener('data', onData); socket.removeListener('data', onData);
socket.pause(); // Prevent data loss between handler removal and pipe setup socket.pause(); // Prevent data loss between handler removal and pipe setup
@@ -254,11 +265,30 @@ export class SocketHandlerServer {
// Connect to the resolved target // Connect to the resolved target
const backend = plugins.net.connect(port, host, () => { const backend = plugins.net.connect(port, host, () => {
// Connection established — set idle timeout on both sides (5 min)
socket.setTimeout(300_000);
backend.setTimeout(300_000);
// Pipe bidirectionally // Pipe bidirectionally
socket.pipe(backend); socket.pipe(backend);
backend.pipe(socket); backend.pipe(socket);
}); });
// Connect timeout: if backend doesn't connect within 30s, destroy both
backend.setTimeout(30_000);
backend.on('timeout', () => {
logger.log('warn', `Dynamic forward timeout to ${host}:${port}`, { component: 'socket-handler-server' });
backend.destroy();
socket.destroy();
});
socket.on('timeout', () => {
logger.log('debug', `Dynamic forward client idle timeout`, { component: 'socket-handler-server' });
socket.destroy();
backend.destroy();
});
backend.on('error', (err) => { backend.on('error', (err) => {
logger.log('error', `Dynamic forward backend error: ${err.message}`, { component: 'socket-handler-server' }); logger.log('error', `Dynamic forward backend error: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy(); socket.destroy();