Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0f6752b9a7 | |||
| b8b7490d44 | |||
| 8c2042a2f5 | |||
| 3514260316 | |||
| f171cc8c5d | |||
| c7722c30f3 | |||
| 0ae882731a | |||
| 53d73c7dc6 | |||
| b4b8bd925d | |||
| 5ac44b898b | |||
| 9b4393b5ac | |||
| 02b4ed8018 |
48
changelog.md
48
changelog.md
@@ -1,5 +1,53 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-02-19 - 25.7.8 - fix(no-changes)
|
||||
no changes detected; nothing to release
|
||||
|
||||
- Current package version: 25.7.7
|
||||
- Git diff: no changes
|
||||
- No files modified; no release necessary
|
||||
|
||||
## 2026-02-19 - 25.7.7 - fix(proxy)
|
||||
restrict PROXY protocol parsing to configured trusted proxy IPs and parse PROXY headers before metrics/fast-path so client IPs reflect the real source
|
||||
|
||||
- Add proxy_ips: Vec<std::net::IpAddr> to ConnectionConfig with a default empty Vec
|
||||
- Populate proxy_ips from options.proxy_ips strings in rust/crates/rustproxy/src/lib.rs, parsing each to IpAddr
|
||||
- Only peek for and parse PROXY v1 headers when the remote IP is contained in proxy_ips (prevents untrusted clients from injecting PROXY headers)
|
||||
- Move PROXY protocol parsing earlier so metrics and fast-path logic use the effective (real client) IP after PROXY parsing
|
||||
- If proxy_ips is empty, behavior remains unchanged (no PROXY parsing)
|
||||
|
||||
## 2026-02-19 - 25.7.6 - fix(throughput)
|
||||
add tests for per-IP connection tracking and throughput history; assert per-IP eviction after connection close to prevent memory leak
|
||||
|
||||
- Adds runtime assertions for per-IP TCP connection tracking (m.connections.byIP) while a connection is active
|
||||
- Adds checks for throughput history (m.throughput.history) to ensure history length and timestamps are recorded
|
||||
- Asserts that per-IP tracking data is evicted after connection close (byIP.size === 0) to verify memory leak fix
|
||||
- Reorders test checks so per-IP and history metrics are validated during the active connection and totals are validated after close
|
||||
|
||||
## 2026-02-19 - 25.7.5 - fix(rustproxy)
|
||||
prune stale per-route metrics, add per-route rate limiter caching and regex cache, and improve connection tracking cleanup to prevent memory growth
|
||||
|
||||
- Prune per-route metrics for routes removed from configuration via MetricsCollector::retain_routes invoked during route table updates
|
||||
- Introduce per-route shared RateLimiter instances (DashMap) with a request-count-triggered periodic cleanup to avoid stale limiters
|
||||
- Cache compiled URL-rewrite regexes (regex_cache) to avoid recompiling patterns on every request and insert compiled regex on first use
|
||||
- Improve upstream connection tracking to remove zero-count entries and guard against underflow, preventing unbounded DashMap growth
|
||||
- Evict per-IP metrics and timestamps when the last connection for an IP closes so per-IP DashMap entries are fully freed
|
||||
- Add unit tests validating connection tracking cleanup, per-IP eviction, and route-metrics retention behavior
|
||||
|
||||
## 2026-02-19 - 25.7.4 - fix(smart-proxy)
|
||||
include proxy IPs in smart proxy configuration
|
||||
|
||||
- Add proxyIps: this.settings.proxyIPs to proxy options in ts/proxies/smart-proxy/smart-proxy.ts
|
||||
- Ensures proxy IPs from settings are passed into the proxy implementation (enables proxy IP filtering/whitelisting)
|
||||
|
||||
## 2026-02-16 - 25.7.3 - fix(metrics)
|
||||
centralize connection-closed reporting via ConnectionGuard and remove duplicate explicit metrics.connection_closed calls
|
||||
|
||||
- Removed numerous explicit metrics.connection_closed calls from rust/crates/rustproxy-http/src/proxy_service.rs so connection teardown and byte counting are handled by the connection guard / counting body instead of ad-hoc calls.
|
||||
- Simplified ConnectionGuard in rust/crates/rustproxy-passthrough/src/tcp_listener.rs: removed the disarm flag and disarm() method so Drop always reports connection_closed.
|
||||
- Stopped disarming the TCP-level guard when handing connections off to HTTP proxy paths (HTTP/WebSocket/streaming flows) to avoid missing or double-reporting metrics.
|
||||
- Fixes incorrect/duplicate connection-closed metric emission and ensures consistent byte/connection accounting during streaming and WebSocket upgrades.
|
||||
|
||||
## 2026-02-16 - 25.7.2 - fix(rustproxy-http)
|
||||
preserve original Host header when proxying and add X-Forwarded-* headers; add TLS WebSocket echo backend helper and integration test for terminate-and-reencrypt websocket
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartproxy",
|
||||
"version": "25.7.2",
|
||||
"version": "25.7.8",
|
||||
"private": false,
|
||||
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
||||
"main": "dist_ts/index.js",
|
||||
|
||||
@@ -9,6 +9,7 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use bytes::Bytes;
|
||||
use dashmap::DashMap;
|
||||
use http_body_util::{BodyExt, Full, combinators::BoxBody};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
@@ -23,6 +24,7 @@ use std::task::{Context, Poll};
|
||||
|
||||
use rustproxy_routing::RouteManager;
|
||||
use rustproxy_metrics::MetricsCollector;
|
||||
use rustproxy_security::RateLimiter;
|
||||
|
||||
use crate::counting_body::{CountingBody, Direction};
|
||||
use crate::request_filter::RequestFilter;
|
||||
@@ -164,6 +166,12 @@ pub struct HttpProxyService {
|
||||
upstream_selector: UpstreamSelector,
|
||||
/// Timeout for connecting to upstream backends.
|
||||
connect_timeout: std::time::Duration,
|
||||
/// Per-route rate limiters (keyed by route ID).
|
||||
route_rate_limiters: Arc<DashMap<String, Arc<RateLimiter>>>,
|
||||
/// Request counter for periodic rate limiter cleanup.
|
||||
request_counter: AtomicU64,
|
||||
/// Cache of compiled URL rewrite regexes (keyed by pattern string).
|
||||
regex_cache: DashMap<String, Regex>,
|
||||
}
|
||||
|
||||
impl HttpProxyService {
|
||||
@@ -173,6 +181,9 @@ impl HttpProxyService {
|
||||
metrics,
|
||||
upstream_selector: UpstreamSelector::new(),
|
||||
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
|
||||
route_rate_limiters: Arc::new(DashMap::new()),
|
||||
request_counter: AtomicU64::new(0),
|
||||
regex_cache: DashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,6 +198,9 @@ impl HttpProxyService {
|
||||
metrics,
|
||||
upstream_selector: UpstreamSelector::new(),
|
||||
connect_timeout,
|
||||
route_rate_limiters: Arc::new(DashMap::new()),
|
||||
request_counter: AtomicU64::new(0),
|
||||
regex_cache: DashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,20 +323,37 @@ impl HttpProxyService {
|
||||
let route_id = route_match.route.id.as_deref();
|
||||
let ip_str = peer_addr.ip().to_string();
|
||||
self.metrics.record_http_request();
|
||||
self.metrics.connection_opened(route_id, Some(&ip_str));
|
||||
|
||||
// Apply request filters (IP check, rate limiting, auth)
|
||||
if let Some(ref security) = route_match.route.security {
|
||||
if let Some(response) = RequestFilter::apply(security, &req, &peer_addr) {
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
// Look up or create a shared rate limiter for this route
|
||||
let rate_limiter = security.rate_limit.as_ref()
|
||||
.filter(|rl| rl.enabled)
|
||||
.map(|rl| {
|
||||
let route_key = route_id.unwrap_or("__default__").to_string();
|
||||
self.route_rate_limiters
|
||||
.entry(route_key)
|
||||
.or_insert_with(|| Arc::new(RateLimiter::new(rl.max_requests, rl.window)))
|
||||
.clone()
|
||||
});
|
||||
if let Some(response) = RequestFilter::apply_with_rate_limiter(
|
||||
security, &req, &peer_addr, rate_limiter.as_ref(),
|
||||
) {
|
||||
return Ok(response);
|
||||
}
|
||||
}
|
||||
|
||||
// Periodic rate limiter cleanup (every 1000 requests)
|
||||
let count = self.request_counter.fetch_add(1, Ordering::Relaxed);
|
||||
if count % 1000 == 0 {
|
||||
for entry in self.route_rate_limiters.iter() {
|
||||
entry.value().cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
// Check for test response (returns immediately, no upstream needed)
|
||||
if let Some(ref advanced) = route_match.route.action.advanced {
|
||||
if let Some(ref test_response) = advanced.test_response {
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(Self::build_test_response(test_response));
|
||||
}
|
||||
}
|
||||
@@ -330,7 +361,6 @@ impl HttpProxyService {
|
||||
// Check for static file serving
|
||||
if let Some(ref advanced) = route_match.route.action.advanced {
|
||||
if let Some(ref static_files) = advanced.static_files {
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(Self::serve_static_file(&path, static_files));
|
||||
}
|
||||
}
|
||||
@@ -339,7 +369,6 @@ impl HttpProxyService {
|
||||
let target = match route_match.target {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "No target available"));
|
||||
}
|
||||
};
|
||||
@@ -384,7 +413,7 @@ impl HttpProxyService {
|
||||
Some(q) => format!("{}?{}", path, q),
|
||||
None => path.clone(),
|
||||
};
|
||||
Self::apply_url_rewrite(&raw_path, &route_match.route)
|
||||
self.apply_url_rewrite(&raw_path, &route_match.route)
|
||||
};
|
||||
|
||||
// Build upstream request - stream body instead of buffering
|
||||
@@ -459,13 +488,11 @@ impl HttpProxyService {
|
||||
Ok(Err(e)) => {
|
||||
error!("Failed TLS connect to upstream {}:{}: {}", upstream.host, upstream.port, e);
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend TLS unavailable"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Upstream TLS connect timeout for {}:{}", upstream.host, upstream.port);
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend TLS connect timeout"));
|
||||
}
|
||||
}
|
||||
@@ -481,13 +508,11 @@ impl HttpProxyService {
|
||||
Ok(Err(e)) => {
|
||||
error!("Failed to connect to upstream {}:{}: {}", upstream.host, upstream.port, e);
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Upstream connect timeout for {}:{}", upstream.host, upstream.port);
|
||||
self.upstream_selector.connection_ended(&upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(&ip_str));
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend connect timeout"));
|
||||
}
|
||||
}
|
||||
@@ -523,7 +548,6 @@ impl HttpProxyService {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
error!("Upstream handshake failed: {}", e);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend handshake failed"));
|
||||
}
|
||||
};
|
||||
@@ -559,7 +583,6 @@ impl HttpProxyService {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => {
|
||||
error!("Upstream request failed: {}", e);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend request failed"));
|
||||
}
|
||||
};
|
||||
@@ -585,7 +608,6 @@ impl HttpProxyService {
|
||||
Ok(h) => h,
|
||||
Err(e) => {
|
||||
error!("HTTP/2 upstream handshake failed: {}", e);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 handshake failed"));
|
||||
}
|
||||
};
|
||||
@@ -620,7 +642,6 @@ impl HttpProxyService {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => {
|
||||
error!("HTTP/2 upstream request failed: {}", e);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 request failed"));
|
||||
}
|
||||
};
|
||||
@@ -631,8 +652,7 @@ impl HttpProxyService {
|
||||
/// Build the client-facing response from an upstream response, streaming the body.
|
||||
///
|
||||
/// The response body is wrapped in a `CountingBody` that counts bytes as they
|
||||
/// stream from upstream to client. When the body is fully consumed (or dropped),
|
||||
/// it reports byte counts to the metrics collector and calls `connection_closed`.
|
||||
/// stream from upstream to client.
|
||||
async fn build_streaming_response(
|
||||
&self,
|
||||
upstream_response: Response<Incoming>,
|
||||
@@ -661,11 +681,6 @@ impl HttpProxyService {
|
||||
Direction::Out,
|
||||
);
|
||||
|
||||
// Close the connection metric now — the HTTP request/response cycle is done
|
||||
// from the proxy's perspective once we hand the streaming body to hyper.
|
||||
// Bytes will still be counted as they flow.
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
|
||||
let body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_body);
|
||||
|
||||
Ok(response.body(body).unwrap())
|
||||
@@ -697,7 +712,6 @@ impl HttpProxyService {
|
||||
.unwrap_or("");
|
||||
if !allowed_origins.is_empty() && !allowed_origins.iter().any(|o| o == "*" || o == origin) {
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::FORBIDDEN, "Origin not allowed"));
|
||||
}
|
||||
}
|
||||
@@ -715,13 +729,11 @@ impl HttpProxyService {
|
||||
Ok(Err(e)) => {
|
||||
error!("WebSocket: failed TLS connect upstream {}:{}: {}", upstream.host, upstream.port, e);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend TLS unavailable"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("WebSocket: upstream TLS connect timeout for {}:{}", upstream.host, upstream.port);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend TLS connect timeout"));
|
||||
}
|
||||
}
|
||||
@@ -737,13 +749,11 @@ impl HttpProxyService {
|
||||
Ok(Err(e)) => {
|
||||
error!("WebSocket: failed to connect upstream {}:{}: {}", upstream.host, upstream.port, e);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("WebSocket: upstream connect timeout for {}:{}", upstream.host, upstream.port);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend connect timeout"));
|
||||
}
|
||||
}
|
||||
@@ -836,7 +846,6 @@ impl HttpProxyService {
|
||||
if let Err(e) = upstream_stream.write_all(raw_request.as_bytes()).await {
|
||||
error!("WebSocket: failed to send upgrade request to upstream: {}", e);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend write failed"));
|
||||
}
|
||||
|
||||
@@ -847,7 +856,6 @@ impl HttpProxyService {
|
||||
Ok(0) => {
|
||||
error!("WebSocket: upstream closed before completing handshake");
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend closed"));
|
||||
}
|
||||
Ok(_) => {
|
||||
@@ -861,14 +869,12 @@ impl HttpProxyService {
|
||||
if response_buf.len() > 8192 {
|
||||
error!("WebSocket: upstream response headers too large");
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend response too large"));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("WebSocket: failed to read upstream response: {}", e);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend read failed"));
|
||||
}
|
||||
}
|
||||
@@ -886,7 +892,6 @@ impl HttpProxyService {
|
||||
if status_code != 101 {
|
||||
debug!("WebSocket: upstream rejected upgrade with status {}", status_code);
|
||||
self.upstream_selector.connection_ended(upstream_key);
|
||||
self.metrics.connection_closed(route_id, Some(source_ip));
|
||||
return Ok(error_response(
|
||||
StatusCode::from_u16(status_code).unwrap_or(StatusCode::BAD_GATEWAY),
|
||||
"WebSocket upgrade rejected by backend",
|
||||
@@ -930,9 +935,6 @@ impl HttpProxyService {
|
||||
Err(e) => {
|
||||
debug!("WebSocket: client upgrade failed: {}", e);
|
||||
upstream_selector.connection_ended(&upstream_key_owned);
|
||||
if let Some(ref rid) = route_id_owned {
|
||||
metrics.connection_closed(Some(rid.as_str()), Some(&source_ip_owned));
|
||||
}
|
||||
return;
|
||||
}
|
||||
};
|
||||
@@ -1037,7 +1039,6 @@ impl HttpProxyService {
|
||||
upstream_selector.connection_ended(&upstream_key_owned);
|
||||
if let Some(ref rid) = route_id_owned {
|
||||
metrics.record_bytes(bytes_in, bytes_out, Some(rid.as_str()), Some(&source_ip_owned));
|
||||
metrics.connection_closed(Some(rid.as_str()), Some(&source_ip_owned));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1067,8 +1068,8 @@ impl HttpProxyService {
|
||||
response.body(BoxBody::new(body)).unwrap()
|
||||
}
|
||||
|
||||
/// Apply URL rewriting rules from route config.
|
||||
fn apply_url_rewrite(path: &str, route: &rustproxy_config::RouteConfig) -> String {
|
||||
/// Apply URL rewriting rules from route config, using the compiled regex cache.
|
||||
fn apply_url_rewrite(&self, path: &str, route: &rustproxy_config::RouteConfig) -> String {
|
||||
let rewrite = match route.action.advanced.as_ref()
|
||||
.and_then(|a| a.url_rewrite.as_ref())
|
||||
{
|
||||
@@ -1087,10 +1088,20 @@ impl HttpProxyService {
|
||||
(path.to_string(), String::new())
|
||||
};
|
||||
|
||||
// Look up or compile the regex, caching for future requests
|
||||
let cached = self.regex_cache.get(&rewrite.pattern);
|
||||
if let Some(re) = cached {
|
||||
let result = re.replace_all(&subject, rewrite.target.as_str());
|
||||
return format!("{}{}", result, suffix);
|
||||
}
|
||||
|
||||
// Not cached — compile and insert
|
||||
match Regex::new(&rewrite.pattern) {
|
||||
Ok(re) => {
|
||||
let result = re.replace_all(&subject, rewrite.target.as_str());
|
||||
format!("{}{}", result, suffix)
|
||||
let out = format!("{}{}", result, suffix);
|
||||
self.regex_cache.insert(rewrite.pattern.clone(), re);
|
||||
out
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Invalid URL rewrite pattern '{}': {}", rewrite.pattern, e);
|
||||
@@ -1217,6 +1228,9 @@ impl Default for HttpProxyService {
|
||||
metrics: Arc::new(MetricsCollector::new()),
|
||||
upstream_selector: UpstreamSelector::new(),
|
||||
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
|
||||
route_rate_limiters: Arc::new(DashMap::new()),
|
||||
request_counter: AtomicU64::new(0),
|
||||
regex_cache: DashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,10 +115,18 @@ impl UpstreamSelector {
|
||||
/// Record that a connection to the given host has ended.
|
||||
pub fn connection_ended(&self, host: &str) {
|
||||
if let Some(counter) = self.active_connections.get(host) {
|
||||
let prev = counter.value().fetch_sub(1, Ordering::Relaxed);
|
||||
// Guard against underflow (shouldn't happen, but be safe)
|
||||
let prev = counter.value().load(Ordering::Relaxed);
|
||||
if prev == 0 {
|
||||
counter.value().store(0, Ordering::Relaxed);
|
||||
// Already at zero — just clean up the entry
|
||||
drop(counter);
|
||||
self.active_connections.remove(host);
|
||||
return;
|
||||
}
|
||||
counter.value().fetch_sub(1, Ordering::Relaxed);
|
||||
// Clean up zero-count entries to prevent memory growth
|
||||
if prev <= 1 {
|
||||
drop(counter);
|
||||
self.active_connections.remove(host);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -204,6 +212,31 @@ mod tests {
|
||||
assert_eq!(r4.host, "a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_tracking_cleanup() {
|
||||
let selector = UpstreamSelector::new();
|
||||
|
||||
selector.connection_started("backend:8080");
|
||||
selector.connection_started("backend:8080");
|
||||
assert_eq!(
|
||||
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
|
||||
2
|
||||
);
|
||||
|
||||
selector.connection_ended("backend:8080");
|
||||
assert_eq!(
|
||||
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
|
||||
1
|
||||
);
|
||||
|
||||
// Last connection ends — entry should be removed entirely
|
||||
selector.connection_ended("backend:8080");
|
||||
assert!(selector.active_connections.get("backend:8080").is_none());
|
||||
|
||||
// Ending on a non-existent key should not panic
|
||||
selector.connection_ended("nonexistent:9999");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ip_hash_consistent() {
|
||||
let selector = UpstreamSelector::new();
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use dashmap::DashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Mutex;
|
||||
|
||||
@@ -196,6 +197,12 @@ impl MetricsCollector {
|
||||
if val <= 1 {
|
||||
drop(counter);
|
||||
self.ip_connections.remove(ip);
|
||||
// Evict all per-IP tracking data for this IP
|
||||
self.ip_total_connections.remove(ip);
|
||||
self.ip_bytes_in.remove(ip);
|
||||
self.ip_bytes_out.remove(ip);
|
||||
self.ip_pending_tp.remove(ip);
|
||||
self.ip_throughput.remove(ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -342,6 +349,17 @@ impl MetricsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove per-route metrics for route IDs that are no longer active.
|
||||
/// Call this after `update_routes()` to prune stale entries.
|
||||
pub fn retain_routes(&self, active_route_ids: &HashSet<String>) {
|
||||
self.route_connections.retain(|k, _| active_route_ids.contains(k));
|
||||
self.route_total_connections.retain(|k, _| active_route_ids.contains(k));
|
||||
self.route_bytes_in.retain(|k, _| active_route_ids.contains(k));
|
||||
self.route_bytes_out.retain(|k, _| active_route_ids.contains(k));
|
||||
self.route_pending_tp.retain(|k, _| active_route_ids.contains(k));
|
||||
self.route_throughput.retain(|k, _| active_route_ids.contains(k));
|
||||
}
|
||||
|
||||
/// Get current active connection count.
|
||||
pub fn active_connections(&self) -> u64 {
|
||||
self.active_connections.load(Ordering::Relaxed)
|
||||
@@ -633,6 +651,42 @@ mod tests {
|
||||
assert!(collector.ip_connections.get("1.2.3.4").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_per_ip_full_eviction_on_last_close() {
|
||||
let collector = MetricsCollector::with_retention(60);
|
||||
|
||||
// Open connections from two IPs
|
||||
collector.connection_opened(Some("route-a"), Some("10.0.0.1"));
|
||||
collector.connection_opened(Some("route-a"), Some("10.0.0.1"));
|
||||
collector.connection_opened(Some("route-b"), Some("10.0.0.2"));
|
||||
|
||||
// Record bytes to populate per-IP DashMaps
|
||||
collector.record_bytes(100, 200, Some("route-a"), Some("10.0.0.1"));
|
||||
collector.record_bytes(300, 400, Some("route-b"), Some("10.0.0.2"));
|
||||
collector.sample_all();
|
||||
|
||||
// Verify per-IP data exists
|
||||
assert!(collector.ip_total_connections.get("10.0.0.1").is_some());
|
||||
assert!(collector.ip_bytes_in.get("10.0.0.1").is_some());
|
||||
assert!(collector.ip_throughput.get("10.0.0.1").is_some());
|
||||
|
||||
// Close all connections for 10.0.0.1
|
||||
collector.connection_closed(Some("route-a"), Some("10.0.0.1"));
|
||||
collector.connection_closed(Some("route-a"), Some("10.0.0.1"));
|
||||
|
||||
// All per-IP data for 10.0.0.1 should be evicted
|
||||
assert!(collector.ip_connections.get("10.0.0.1").is_none());
|
||||
assert!(collector.ip_total_connections.get("10.0.0.1").is_none());
|
||||
assert!(collector.ip_bytes_in.get("10.0.0.1").is_none());
|
||||
assert!(collector.ip_bytes_out.get("10.0.0.1").is_none());
|
||||
assert!(collector.ip_pending_tp.get("10.0.0.1").is_none());
|
||||
assert!(collector.ip_throughput.get("10.0.0.1").is_none());
|
||||
|
||||
// 10.0.0.2 should still have data
|
||||
assert!(collector.ip_connections.get("10.0.0.2").is_some());
|
||||
assert!(collector.ip_total_connections.get("10.0.0.2").is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_tracking() {
|
||||
let collector = MetricsCollector::with_retention(60);
|
||||
@@ -650,6 +704,35 @@ mod tests {
|
||||
assert_eq!(snapshot.http_requests_per_sec, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retain_routes_prunes_stale() {
|
||||
let collector = MetricsCollector::with_retention(60);
|
||||
|
||||
// Create metrics for 3 routes
|
||||
collector.connection_opened(Some("route-a"), None);
|
||||
collector.connection_opened(Some("route-b"), None);
|
||||
collector.connection_opened(Some("route-c"), None);
|
||||
collector.record_bytes(100, 200, Some("route-a"), None);
|
||||
collector.record_bytes(100, 200, Some("route-b"), None);
|
||||
collector.record_bytes(100, 200, Some("route-c"), None);
|
||||
collector.sample_all();
|
||||
|
||||
// Now "route-b" is removed from config
|
||||
let active = HashSet::from(["route-a".to_string(), "route-c".to_string()]);
|
||||
collector.retain_routes(&active);
|
||||
|
||||
// route-b entries should be gone
|
||||
assert!(collector.route_connections.get("route-b").is_none());
|
||||
assert!(collector.route_total_connections.get("route-b").is_none());
|
||||
assert!(collector.route_bytes_in.get("route-b").is_none());
|
||||
assert!(collector.route_bytes_out.get("route-b").is_none());
|
||||
assert!(collector.route_throughput.get("route-b").is_none());
|
||||
|
||||
// route-a and route-c should still exist
|
||||
assert!(collector.route_total_connections.get("route-a").is_some());
|
||||
assert!(collector.route_total_connections.get("route-c").is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_throughput_history_in_snapshot() {
|
||||
let collector = MetricsCollector::with_retention(60);
|
||||
|
||||
@@ -95,10 +95,11 @@ impl ConnectionTracker {
|
||||
pub fn connection_closed(&self, ip: &IpAddr) {
|
||||
if let Some(counter) = self.active.get(ip) {
|
||||
let prev = counter.value().fetch_sub(1, Ordering::Relaxed);
|
||||
// Clean up zero entries
|
||||
// Clean up zero entries to prevent memory growth
|
||||
if prev <= 1 {
|
||||
drop(counter);
|
||||
self.active.remove(ip);
|
||||
self.timestamps.remove(ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -205,10 +206,13 @@ impl ConnectionTracker {
|
||||
let zombies = tracker.scan_zombies();
|
||||
if !zombies.is_empty() {
|
||||
warn!(
|
||||
"Detected {} zombie connection(s): {:?}",
|
||||
"Cleaning up {} zombie connection(s): {:?}",
|
||||
zombies.len(),
|
||||
zombies
|
||||
);
|
||||
for id in &zombies {
|
||||
tracker.unregister_connection(*id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -304,6 +308,30 @@ mod tests {
|
||||
assert_eq!(tracker.tracked_ips(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamps_cleaned_on_last_close() {
|
||||
let tracker = ConnectionTracker::new(None, Some(100));
|
||||
let ip: IpAddr = "10.0.0.1".parse().unwrap();
|
||||
|
||||
// try_accept populates the timestamps map (when rate limiting is enabled)
|
||||
assert!(tracker.try_accept(&ip));
|
||||
tracker.connection_opened(&ip);
|
||||
assert!(tracker.try_accept(&ip));
|
||||
tracker.connection_opened(&ip);
|
||||
|
||||
// Timestamps should exist
|
||||
assert!(tracker.timestamps.get(&ip).is_some());
|
||||
|
||||
// Close one connection — timestamps should still exist
|
||||
tracker.connection_closed(&ip);
|
||||
assert!(tracker.timestamps.get(&ip).is_some());
|
||||
|
||||
// Close last connection — timestamps should be cleaned up
|
||||
tracker.connection_closed(&ip);
|
||||
assert!(tracker.timestamps.get(&ip).is_none());
|
||||
assert!(tracker.active.get(&ip).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_register_unregister_connection() {
|
||||
let tracker = ConnectionTracker::new(None, None);
|
||||
|
||||
@@ -22,7 +22,6 @@ struct ConnectionGuard {
|
||||
metrics: Arc<MetricsCollector>,
|
||||
route_id: Option<String>,
|
||||
source_ip: Option<String>,
|
||||
disarmed: bool,
|
||||
}
|
||||
|
||||
impl ConnectionGuard {
|
||||
@@ -31,22 +30,13 @@ impl ConnectionGuard {
|
||||
metrics,
|
||||
route_id: route_id.map(|s| s.to_string()),
|
||||
source_ip: source_ip.map(|s| s.to_string()),
|
||||
disarmed: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Disarm the guard — prevents the Drop from running.
|
||||
/// Use when handing off to a path that manages its own cleanup (e.g., HTTP proxy).
|
||||
fn disarm(mut self) {
|
||||
self.disarmed = true;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ConnectionGuard {
|
||||
fn drop(&mut self) {
|
||||
if !self.disarmed {
|
||||
self.metrics.connection_closed(self.route_id.as_deref(), self.source_ip.as_deref());
|
||||
}
|
||||
self.metrics.connection_closed(self.route_id.as_deref(), self.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +84,9 @@ pub struct ConnectionConfig {
|
||||
pub accept_proxy_protocol: bool,
|
||||
/// Whether to send PROXY protocol
|
||||
pub send_proxy_protocol: bool,
|
||||
/// Trusted IPs that may send PROXY protocol headers.
|
||||
/// When non-empty, only connections from these IPs will have PROXY headers parsed.
|
||||
pub proxy_ips: Vec<std::net::IpAddr>,
|
||||
}
|
||||
|
||||
impl Default for ConnectionConfig {
|
||||
@@ -111,6 +104,7 @@ impl Default for ConnectionConfig {
|
||||
extended_keep_alive_lifetime_ms: None,
|
||||
accept_proxy_protocol: false,
|
||||
send_proxy_protocol: false,
|
||||
proxy_ips: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -425,7 +419,41 @@ impl TcpListenerManager {
|
||||
|
||||
stream.set_nodelay(true)?;
|
||||
|
||||
// Extract source IP once for all metric calls
|
||||
// --- PROXY protocol: must happen BEFORE ip_str and fast path ---
|
||||
// Only parse PROXY headers from trusted proxy IPs (security).
|
||||
// Non-proxy connections skip the peek entirely (no latency cost).
|
||||
let mut effective_peer_addr = peer_addr;
|
||||
if !conn_config.proxy_ips.is_empty() && conn_config.proxy_ips.contains(&peer_addr.ip()) {
|
||||
// Trusted proxy IP — peek for PROXY protocol header
|
||||
let mut proxy_peek = vec![0u8; 256];
|
||||
let pn = match tokio::time::timeout(
|
||||
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
||||
stream.peek(&mut proxy_peek),
|
||||
).await {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(e.into()),
|
||||
Err(_) => return Err("Initial data timeout (proxy protocol peek)".into()),
|
||||
};
|
||||
|
||||
if pn > 0 && crate::proxy_protocol::is_proxy_protocol_v1(&proxy_peek[..pn]) {
|
||||
match crate::proxy_protocol::parse_v1(&proxy_peek[..pn]) {
|
||||
Ok((header, consumed)) => {
|
||||
debug!("PROXY protocol: real client {} -> {}", header.source_addr, header.dest_addr);
|
||||
effective_peer_addr = header.source_addr;
|
||||
// Consume the proxy protocol header bytes
|
||||
let mut discard = vec![0u8; consumed];
|
||||
stream.read_exact(&mut discard).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to parse PROXY protocol header: {}", e);
|
||||
// Not a PROXY protocol header, continue normally
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let peer_addr = effective_peer_addr;
|
||||
|
||||
// Extract source IP once for all metric calls (reflects real client IP after PROXY parsing)
|
||||
let ip_str = peer_addr.ip().to_string();
|
||||
|
||||
// === Fast path: try port-only matching before peeking at data ===
|
||||
@@ -558,37 +586,6 @@ impl TcpListenerManager {
|
||||
}
|
||||
// === End fast path ===
|
||||
|
||||
// Handle PROXY protocol if configured
|
||||
let mut effective_peer_addr = peer_addr;
|
||||
if conn_config.accept_proxy_protocol {
|
||||
let mut proxy_peek = vec![0u8; 256];
|
||||
let pn = match tokio::time::timeout(
|
||||
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
|
||||
stream.peek(&mut proxy_peek),
|
||||
).await {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(e.into()),
|
||||
Err(_) => return Err("Initial data timeout (proxy protocol peek)".into()),
|
||||
};
|
||||
|
||||
if pn > 0 && crate::proxy_protocol::is_proxy_protocol_v1(&proxy_peek[..pn]) {
|
||||
match crate::proxy_protocol::parse_v1(&proxy_peek[..pn]) {
|
||||
Ok((header, consumed)) => {
|
||||
debug!("PROXY protocol: real client {} -> {}", header.source_addr, header.dest_addr);
|
||||
effective_peer_addr = header.source_addr;
|
||||
// Consume the proxy protocol header bytes
|
||||
let mut discard = vec![0u8; consumed];
|
||||
stream.read_exact(&mut discard).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to parse PROXY protocol header: {}", e);
|
||||
// Not a PROXY protocol header, continue normally
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let peer_addr = effective_peer_addr;
|
||||
|
||||
// Peek at initial bytes with timeout
|
||||
let mut peek_buf = vec![0u8; 4096];
|
||||
let n = match tokio::time::timeout(
|
||||
@@ -844,8 +841,6 @@ impl TcpListenerManager {
|
||||
"TLS Terminate + HTTP: {} -> {}:{} (domain: {:?})",
|
||||
peer_addr, target_host, target_port, domain
|
||||
);
|
||||
// HTTP proxy manages its own per-request metrics — disarm TCP-level guard
|
||||
_conn_guard.disarm();
|
||||
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
|
||||
} else {
|
||||
debug!(
|
||||
@@ -917,7 +912,6 @@ impl TcpListenerManager {
|
||||
"TLS Terminate+Reencrypt + HTTP: {} (domain: {:?})",
|
||||
peer_addr, domain
|
||||
);
|
||||
_conn_guard.disarm();
|
||||
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
|
||||
} else {
|
||||
// Non-HTTP: TLS-to-TLS tunnel (existing behavior for raw TCP protocols)
|
||||
@@ -937,8 +931,6 @@ impl TcpListenerManager {
|
||||
if is_http {
|
||||
// Plain HTTP - use HTTP proxy for request-level routing
|
||||
debug!("HTTP proxy: {} on port {}", peer_addr, port);
|
||||
// HTTP proxy manages its own per-request metrics — disarm TCP-level guard
|
||||
_conn_guard.disarm();
|
||||
http_proxy.handle_connection(stream, peer_addr, port, cancel.clone()).await;
|
||||
Ok(())
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
pub mod challenge_server;
|
||||
pub mod management;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
@@ -217,6 +217,10 @@ impl RustProxy {
|
||||
extended_keep_alive_lifetime_ms: options.extended_keep_alive_lifetime,
|
||||
accept_proxy_protocol: options.accept_proxy_protocol.unwrap_or(false),
|
||||
send_proxy_protocol: options.send_proxy_protocol.unwrap_or(false),
|
||||
proxy_ips: options.proxy_ips.as_deref().unwrap_or(&[])
|
||||
.iter()
|
||||
.filter_map(|s| s.parse::<std::net::IpAddr>().ok())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,6 +569,12 @@ impl RustProxy {
|
||||
vec![]
|
||||
};
|
||||
|
||||
// Prune per-route metrics for route IDs that no longer exist
|
||||
let active_route_ids: HashSet<String> = routes.iter()
|
||||
.filter_map(|r| r.id.clone())
|
||||
.collect();
|
||||
self.metrics.retain_routes(&active_route_ids);
|
||||
|
||||
// Atomically swap the route table
|
||||
let new_manager = Arc::new(new_manager);
|
||||
self.route_table.store(Arc::clone(&new_manager));
|
||||
|
||||
@@ -151,11 +151,28 @@ tap.test('TCP forward - real-time byte tracking', async (tools) => {
|
||||
console.log(`TCP forward (during) — recent throughput: in=${tpDuring.in}, out=${tpDuring.out}`);
|
||||
expect(tpDuring.in + tpDuring.out).toBeGreaterThan(0);
|
||||
|
||||
// ── v25.2.0: Per-IP tracking (TCP connections) ──
|
||||
// Must check WHILE connection is active — per-IP data is evicted on last close
|
||||
const byIP = mDuring.connections.byIP();
|
||||
console.log('TCP forward — connections byIP:', Array.from(byIP.entries()));
|
||||
expect(byIP.size).toBeGreaterThan(0);
|
||||
|
||||
const topIPs = mDuring.connections.topIPs(10);
|
||||
console.log('TCP forward — topIPs:', topIPs);
|
||||
expect(topIPs.length).toBeGreaterThan(0);
|
||||
expect(topIPs[0].ip).toBeTruthy();
|
||||
|
||||
// ── v25.2.0: Throughput history ──
|
||||
const history = mDuring.throughput.history(10);
|
||||
console.log('TCP forward — throughput history length:', history.length);
|
||||
expect(history.length).toBeGreaterThan(0);
|
||||
expect(history[0].timestamp).toBeGreaterThan(0);
|
||||
|
||||
// Close connection
|
||||
client.destroy();
|
||||
await tools.delayFor(500);
|
||||
|
||||
// Final check
|
||||
// Final check — totals persist even after connection close
|
||||
await pollMetrics(proxy);
|
||||
const m = proxy.getMetrics();
|
||||
const bytesIn = m.totals.bytesIn();
|
||||
@@ -168,21 +185,10 @@ tap.test('TCP forward - real-time byte tracking', async (tools) => {
|
||||
const byRoute = m.throughput.byRoute();
|
||||
console.log('TCP forward — throughput byRoute:', Array.from(byRoute.entries()));
|
||||
|
||||
// ── v25.2.0: Per-IP tracking (TCP connections) ──
|
||||
const byIP = m.connections.byIP();
|
||||
console.log('TCP forward — connections byIP:', Array.from(byIP.entries()));
|
||||
expect(byIP.size).toBeGreaterThan(0);
|
||||
|
||||
const topIPs = m.connections.topIPs(10);
|
||||
console.log('TCP forward — topIPs:', topIPs);
|
||||
expect(topIPs.length).toBeGreaterThan(0);
|
||||
expect(topIPs[0].ip).toBeTruthy();
|
||||
|
||||
// ── v25.2.0: Throughput history ──
|
||||
const history = m.throughput.history(10);
|
||||
console.log('TCP forward — throughput history length:', history.length);
|
||||
expect(history.length).toBeGreaterThan(0);
|
||||
expect(history[0].timestamp).toBeGreaterThan(0);
|
||||
// After close, per-IP data should be evicted (memory leak fix)
|
||||
const byIPAfter = m.connections.byIP();
|
||||
console.log('TCP forward — connections byIP after close:', Array.from(byIPAfter.entries()));
|
||||
expect(byIPAfter.size).toEqual(0);
|
||||
|
||||
await proxy.stop();
|
||||
await tools.delayFor(200);
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartproxy',
|
||||
version: '25.7.2',
|
||||
version: '25.7.8',
|
||||
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
|
||||
}
|
||||
|
||||
@@ -409,6 +409,7 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
keepAliveTreatment: this.settings.keepAliveTreatment,
|
||||
keepAliveInactivityMultiplier: this.settings.keepAliveInactivityMultiplier,
|
||||
extendedKeepAliveLifetime: this.settings.extendedKeepAliveLifetime,
|
||||
proxyIps: this.settings.proxyIPs,
|
||||
acceptProxyProtocol: this.settings.acceptProxyProtocol,
|
||||
sendProxyProtocol: this.settings.sendProxyProtocol,
|
||||
metrics: this.settings.metrics,
|
||||
|
||||
Reference in New Issue
Block a user