fix(rustproxy): prune stale per-route metrics, add per-route rate limiter caching and regex cache, and improve connection tracking cleanup to prevent memory growth

This commit is contained in:
2026-02-19 08:48:46 +00:00
parent b4b8bd925d
commit 53d73c7dc6
7 changed files with 219 additions and 12 deletions

View File

@@ -9,6 +9,7 @@ use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use bytes::Bytes;
use dashmap::DashMap;
use http_body_util::{BodyExt, Full, combinators::BoxBody};
use hyper::body::Incoming;
use hyper::{Request, Response, StatusCode};
@@ -23,6 +24,7 @@ use std::task::{Context, Poll};
use rustproxy_routing::RouteManager;
use rustproxy_metrics::MetricsCollector;
use rustproxy_security::RateLimiter;
use crate::counting_body::{CountingBody, Direction};
use crate::request_filter::RequestFilter;
@@ -164,6 +166,12 @@ pub struct HttpProxyService {
upstream_selector: UpstreamSelector,
/// Timeout for connecting to upstream backends.
connect_timeout: std::time::Duration,
/// Per-route rate limiters (keyed by route ID).
route_rate_limiters: Arc<DashMap<String, Arc<RateLimiter>>>,
/// Request counter for periodic rate limiter cleanup.
request_counter: AtomicU64,
/// Cache of compiled URL rewrite regexes (keyed by pattern string).
regex_cache: DashMap<String, Regex>,
}
impl HttpProxyService {
@@ -173,6 +181,9 @@ impl HttpProxyService {
metrics,
upstream_selector: UpstreamSelector::new(),
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
route_rate_limiters: Arc::new(DashMap::new()),
request_counter: AtomicU64::new(0),
regex_cache: DashMap::new(),
}
}
@@ -187,6 +198,9 @@ impl HttpProxyService {
metrics,
upstream_selector: UpstreamSelector::new(),
connect_timeout,
route_rate_limiters: Arc::new(DashMap::new()),
request_counter: AtomicU64::new(0),
regex_cache: DashMap::new(),
}
}
@@ -312,11 +326,31 @@ impl HttpProxyService {
// Apply request filters (IP check, rate limiting, auth)
if let Some(ref security) = route_match.route.security {
if let Some(response) = RequestFilter::apply(security, &req, &peer_addr) {
// Look up or create a shared rate limiter for this route
let rate_limiter = security.rate_limit.as_ref()
.filter(|rl| rl.enabled)
.map(|rl| {
let route_key = route_id.unwrap_or("__default__").to_string();
self.route_rate_limiters
.entry(route_key)
.or_insert_with(|| Arc::new(RateLimiter::new(rl.max_requests, rl.window)))
.clone()
});
if let Some(response) = RequestFilter::apply_with_rate_limiter(
security, &req, &peer_addr, rate_limiter.as_ref(),
) {
return Ok(response);
}
}
// Periodic rate limiter cleanup (every 1000 requests)
let count = self.request_counter.fetch_add(1, Ordering::Relaxed);
if count % 1000 == 0 {
for entry in self.route_rate_limiters.iter() {
entry.value().cleanup();
}
}
// Check for test response (returns immediately, no upstream needed)
if let Some(ref advanced) = route_match.route.action.advanced {
if let Some(ref test_response) = advanced.test_response {
@@ -379,7 +413,7 @@ impl HttpProxyService {
Some(q) => format!("{}?{}", path, q),
None => path.clone(),
};
Self::apply_url_rewrite(&raw_path, &route_match.route)
self.apply_url_rewrite(&raw_path, &route_match.route)
};
// Build upstream request - stream body instead of buffering
@@ -1034,8 +1068,8 @@ impl HttpProxyService {
response.body(BoxBody::new(body)).unwrap()
}
/// Apply URL rewriting rules from route config.
fn apply_url_rewrite(path: &str, route: &rustproxy_config::RouteConfig) -> String {
/// Apply URL rewriting rules from route config, using the compiled regex cache.
fn apply_url_rewrite(&self, path: &str, route: &rustproxy_config::RouteConfig) -> String {
let rewrite = match route.action.advanced.as_ref()
.and_then(|a| a.url_rewrite.as_ref())
{
@@ -1054,10 +1088,20 @@ impl HttpProxyService {
(path.to_string(), String::new())
};
// Look up or compile the regex, caching for future requests
let cached = self.regex_cache.get(&rewrite.pattern);
if let Some(re) = cached {
let result = re.replace_all(&subject, rewrite.target.as_str());
return format!("{}{}", result, suffix);
}
// Not cached — compile and insert
match Regex::new(&rewrite.pattern) {
Ok(re) => {
let result = re.replace_all(&subject, rewrite.target.as_str());
format!("{}{}", result, suffix)
let out = format!("{}{}", result, suffix);
self.regex_cache.insert(rewrite.pattern.clone(), re);
out
}
Err(e) => {
warn!("Invalid URL rewrite pattern '{}': {}", rewrite.pattern, e);
@@ -1184,6 +1228,9 @@ impl Default for HttpProxyService {
metrics: Arc::new(MetricsCollector::new()),
upstream_selector: UpstreamSelector::new(),
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
route_rate_limiters: Arc::new(DashMap::new()),
request_counter: AtomicU64::new(0),
regex_cache: DashMap::new(),
}
}
}

View File

@@ -115,10 +115,18 @@ impl UpstreamSelector {
/// Record that a connection to the given host has ended.
pub fn connection_ended(&self, host: &str) {
if let Some(counter) = self.active_connections.get(host) {
let prev = counter.value().fetch_sub(1, Ordering::Relaxed);
// Guard against underflow (shouldn't happen, but be safe)
let prev = counter.value().load(Ordering::Relaxed);
if prev == 0 {
counter.value().store(0, Ordering::Relaxed);
// Already at zero — just clean up the entry
drop(counter);
self.active_connections.remove(host);
return;
}
counter.value().fetch_sub(1, Ordering::Relaxed);
// Clean up zero-count entries to prevent memory growth
if prev <= 1 {
drop(counter);
self.active_connections.remove(host);
}
}
}
@@ -204,6 +212,31 @@ mod tests {
assert_eq!(r4.host, "a");
}
#[test]
fn test_connection_tracking_cleanup() {
let selector = UpstreamSelector::new();
selector.connection_started("backend:8080");
selector.connection_started("backend:8080");
assert_eq!(
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
2
);
selector.connection_ended("backend:8080");
assert_eq!(
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
1
);
// Last connection ends — entry should be removed entirely
selector.connection_ended("backend:8080");
assert!(selector.active_connections.get("backend:8080").is_none());
// Ending on a non-existent key should not panic
selector.connection_ended("nonexistent:9999");
}
#[test]
fn test_ip_hash_consistent() {
let selector = UpstreamSelector::new();