fix(rustproxy-http,rustproxy-metrics): reduce per-frame metrics overhead by batching body byte accounting

This commit is contained in:
2026-03-17 12:22:51 +00:00
parent 8f6bb30367
commit 92d7113c6c
4 changed files with 92 additions and 49 deletions

View File

@@ -259,40 +259,49 @@ impl MetricsCollector {
/// Called per-chunk in the TCP copy loop. Only touches AtomicU64 counters —
/// no Mutex is taken. The throughput trackers are fed during `sample_all()`.
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>, source_ip: Option<&str>) {
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into lock-free pending throughput counters
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
// Short-circuit: only touch counters for the direction that has data.
// CountingBody always calls with one direction zero — skipping the zero
// direction avoids ~50% of DashMap shard-locked reads per call.
if bytes_in > 0 {
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
}
if bytes_out > 0 {
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
}
// Per-route tracking: use get() first (zero-alloc fast path for existing entries),
// fall back to entry() with to_string() only on the rare first-chunk miss.
if let Some(route_id) = route_id {
if let Some(counter) = self.route_bytes_in.get(route_id) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.route_bytes_in.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
if bytes_in > 0 {
if let Some(counter) = self.route_bytes_in.get(route_id) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.route_bytes_in.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
}
}
if let Some(counter) = self.route_bytes_out.get(route_id) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.route_bytes_out.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_out > 0 {
if let Some(counter) = self.route_bytes_out.get(route_id) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.route_bytes_out.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
}
}
// Accumulate into per-route pending throughput counters (lock-free)
if let Some(entry) = self.route_pending_tp.get(route_id) {
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
} else {
let entry = self.route_pending_tp.entry(route_id.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
}
}
@@ -302,30 +311,34 @@ impl MetricsCollector {
// This prevents orphaned entries when record_bytes races with
// connection_closed (which evicts all per-IP data on last close).
if self.ip_connections.contains_key(ip) {
if let Some(counter) = self.ip_bytes_in.get(ip) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.ip_bytes_in.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
if bytes_in > 0 {
if let Some(counter) = self.ip_bytes_in.get(ip) {
counter.fetch_add(bytes_in, Ordering::Relaxed);
} else {
self.ip_bytes_in.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
}
}
if let Some(counter) = self.ip_bytes_out.get(ip) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.ip_bytes_out.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_out > 0 {
if let Some(counter) = self.ip_bytes_out.get(ip) {
counter.fetch_add(bytes_out, Ordering::Relaxed);
} else {
self.ip_bytes_out.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
}
}
// Accumulate into per-IP pending throughput counters (lock-free)
if let Some(entry) = self.ip_pending_tp.get(ip) {
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
} else {
let entry = self.ip_pending_tp.entry(ip.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
if bytes_in > 0 { entry.0.fetch_add(bytes_in, Ordering::Relaxed); }
if bytes_out > 0 { entry.1.fetch_add(bytes_out, Ordering::Relaxed); }
}
}
}