feat(rustproxy): introduce a Rust-powered proxy engine and workspace with core crates for proxy functionality, ACME/TLS support, passthrough and HTTP proxies, metrics, nftables integration, routing/security, management IPC, tests, and README updates
This commit is contained in:
15
rust/crates/rustproxy-metrics/Cargo.toml
Normal file
15
rust/crates/rustproxy-metrics/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "rustproxy-metrics"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
description = "Metrics and throughput tracking for RustProxy"
|
||||
|
||||
[dependencies]
|
||||
dashmap = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
251
rust/crates/rustproxy-metrics/src/collector.rs
Normal file
251
rust/crates/rustproxy-metrics/src/collector.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
use dashmap::DashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
/// Aggregated metrics snapshot.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Metrics {
|
||||
pub active_connections: u64,
|
||||
pub total_connections: u64,
|
||||
pub bytes_in: u64,
|
||||
pub bytes_out: u64,
|
||||
pub throughput_in_bytes_per_sec: u64,
|
||||
pub throughput_out_bytes_per_sec: u64,
|
||||
pub routes: std::collections::HashMap<String, RouteMetrics>,
|
||||
}
|
||||
|
||||
/// Per-route metrics.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RouteMetrics {
|
||||
pub active_connections: u64,
|
||||
pub total_connections: u64,
|
||||
pub bytes_in: u64,
|
||||
pub bytes_out: u64,
|
||||
pub throughput_in_bytes_per_sec: u64,
|
||||
pub throughput_out_bytes_per_sec: u64,
|
||||
}
|
||||
|
||||
/// Statistics snapshot.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Statistics {
|
||||
pub active_connections: u64,
|
||||
pub total_connections: u64,
|
||||
pub routes_count: u64,
|
||||
pub listening_ports: Vec<u16>,
|
||||
pub uptime_seconds: u64,
|
||||
}
|
||||
|
||||
/// Metrics collector tracking connections and throughput.
|
||||
pub struct MetricsCollector {
|
||||
active_connections: AtomicU64,
|
||||
total_connections: AtomicU64,
|
||||
total_bytes_in: AtomicU64,
|
||||
total_bytes_out: AtomicU64,
|
||||
/// Per-route active connection counts
|
||||
route_connections: DashMap<String, AtomicU64>,
|
||||
/// Per-route total connection counts
|
||||
route_total_connections: DashMap<String, AtomicU64>,
|
||||
/// Per-route byte counters
|
||||
route_bytes_in: DashMap<String, AtomicU64>,
|
||||
route_bytes_out: DashMap<String, AtomicU64>,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active_connections: AtomicU64::new(0),
|
||||
total_connections: AtomicU64::new(0),
|
||||
total_bytes_in: AtomicU64::new(0),
|
||||
total_bytes_out: AtomicU64::new(0),
|
||||
route_connections: DashMap::new(),
|
||||
route_total_connections: DashMap::new(),
|
||||
route_bytes_in: DashMap::new(),
|
||||
route_bytes_out: DashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a new connection.
|
||||
pub fn connection_opened(&self, route_id: Option<&str>) {
|
||||
self.active_connections.fetch_add(1, Ordering::Relaxed);
|
||||
self.total_connections.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
if let Some(route_id) = route_id {
|
||||
self.route_connections
|
||||
.entry(route_id.to_string())
|
||||
.or_insert_with(|| AtomicU64::new(0))
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
self.route_total_connections
|
||||
.entry(route_id.to_string())
|
||||
.or_insert_with(|| AtomicU64::new(0))
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a connection closing.
|
||||
pub fn connection_closed(&self, route_id: Option<&str>) {
|
||||
self.active_connections.fetch_sub(1, Ordering::Relaxed);
|
||||
|
||||
if let Some(route_id) = route_id {
|
||||
if let Some(counter) = self.route_connections.get(route_id) {
|
||||
let val = counter.load(Ordering::Relaxed);
|
||||
if val > 0 {
|
||||
counter.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Record bytes transferred.
|
||||
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>) {
|
||||
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
|
||||
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
|
||||
|
||||
if let Some(route_id) = route_id {
|
||||
self.route_bytes_in
|
||||
.entry(route_id.to_string())
|
||||
.or_insert_with(|| AtomicU64::new(0))
|
||||
.fetch_add(bytes_in, Ordering::Relaxed);
|
||||
self.route_bytes_out
|
||||
.entry(route_id.to_string())
|
||||
.or_insert_with(|| AtomicU64::new(0))
|
||||
.fetch_add(bytes_out, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current active connection count.
|
||||
pub fn active_connections(&self) -> u64 {
|
||||
self.active_connections.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get total connection count.
|
||||
pub fn total_connections(&self) -> u64 {
|
||||
self.total_connections.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get total bytes received.
|
||||
pub fn total_bytes_in(&self) -> u64 {
|
||||
self.total_bytes_in.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get total bytes sent.
|
||||
pub fn total_bytes_out(&self) -> u64 {
|
||||
self.total_bytes_out.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get a full metrics snapshot including per-route data.
|
||||
pub fn snapshot(&self) -> Metrics {
|
||||
let mut routes = std::collections::HashMap::new();
|
||||
|
||||
// Collect per-route metrics
|
||||
for entry in self.route_total_connections.iter() {
|
||||
let route_id = entry.key().clone();
|
||||
let total = entry.value().load(Ordering::Relaxed);
|
||||
let active = self.route_connections
|
||||
.get(&route_id)
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.unwrap_or(0);
|
||||
let bytes_in = self.route_bytes_in
|
||||
.get(&route_id)
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.unwrap_or(0);
|
||||
let bytes_out = self.route_bytes_out
|
||||
.get(&route_id)
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.unwrap_or(0);
|
||||
|
||||
routes.insert(route_id, RouteMetrics {
|
||||
active_connections: active,
|
||||
total_connections: total,
|
||||
bytes_in,
|
||||
bytes_out,
|
||||
throughput_in_bytes_per_sec: 0,
|
||||
throughput_out_bytes_per_sec: 0,
|
||||
});
|
||||
}
|
||||
|
||||
Metrics {
|
||||
active_connections: self.active_connections(),
|
||||
total_connections: self.total_connections(),
|
||||
bytes_in: self.total_bytes_in(),
|
||||
bytes_out: self.total_bytes_out(),
|
||||
throughput_in_bytes_per_sec: 0,
|
||||
throughput_out_bytes_per_sec: 0,
|
||||
routes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_initial_state_zeros() {
|
||||
let collector = MetricsCollector::new();
|
||||
assert_eq!(collector.active_connections(), 0);
|
||||
assert_eq!(collector.total_connections(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_opened_increments() {
|
||||
let collector = MetricsCollector::new();
|
||||
collector.connection_opened(None);
|
||||
assert_eq!(collector.active_connections(), 1);
|
||||
assert_eq!(collector.total_connections(), 1);
|
||||
collector.connection_opened(None);
|
||||
assert_eq!(collector.active_connections(), 2);
|
||||
assert_eq!(collector.total_connections(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_closed_decrements() {
|
||||
let collector = MetricsCollector::new();
|
||||
collector.connection_opened(None);
|
||||
collector.connection_opened(None);
|
||||
assert_eq!(collector.active_connections(), 2);
|
||||
collector.connection_closed(None);
|
||||
assert_eq!(collector.active_connections(), 1);
|
||||
// total_connections should stay at 2
|
||||
assert_eq!(collector.total_connections(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_route_specific_tracking() {
|
||||
let collector = MetricsCollector::new();
|
||||
collector.connection_opened(Some("route-a"));
|
||||
collector.connection_opened(Some("route-a"));
|
||||
collector.connection_opened(Some("route-b"));
|
||||
|
||||
assert_eq!(collector.active_connections(), 3);
|
||||
assert_eq!(collector.total_connections(), 3);
|
||||
|
||||
collector.connection_closed(Some("route-a"));
|
||||
assert_eq!(collector.active_connections(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_bytes() {
|
||||
let collector = MetricsCollector::new();
|
||||
collector.record_bytes(100, 200, Some("route-a"));
|
||||
collector.record_bytes(50, 75, Some("route-a"));
|
||||
collector.record_bytes(25, 30, None);
|
||||
|
||||
let total_in = collector.total_bytes_in.load(Ordering::Relaxed);
|
||||
let total_out = collector.total_bytes_out.load(Ordering::Relaxed);
|
||||
assert_eq!(total_in, 175);
|
||||
assert_eq!(total_out, 305);
|
||||
|
||||
// Route-specific bytes
|
||||
let route_in = collector.route_bytes_in.get("route-a").unwrap();
|
||||
assert_eq!(route_in.load(Ordering::Relaxed), 150);
|
||||
}
|
||||
}
|
||||
11
rust/crates/rustproxy-metrics/src/lib.rs
Normal file
11
rust/crates/rustproxy-metrics/src/lib.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
//! # rustproxy-metrics
|
||||
//!
|
||||
//! Metrics and throughput tracking for RustProxy.
|
||||
|
||||
pub mod throughput;
|
||||
pub mod collector;
|
||||
pub mod log_dedup;
|
||||
|
||||
pub use throughput::*;
|
||||
pub use collector::*;
|
||||
pub use log_dedup::*;
|
||||
219
rust/crates/rustproxy-metrics/src/log_dedup.rs
Normal file
219
rust/crates/rustproxy-metrics/src/log_dedup.rs
Normal file
@@ -0,0 +1,219 @@
|
||||
use dashmap::DashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing::info;
|
||||
|
||||
/// An aggregated event during the deduplication window.
|
||||
struct AggregatedEvent {
|
||||
category: String,
|
||||
first_message: String,
|
||||
count: AtomicU64,
|
||||
first_seen: Instant,
|
||||
#[allow(dead_code)]
|
||||
last_seen: Instant,
|
||||
}
|
||||
|
||||
/// Log deduplicator that batches similar events over a time window.
|
||||
///
|
||||
/// Events are grouped by a composite key of `category:key`. Within each
|
||||
/// deduplication window (`flush_interval`) identical events are counted
|
||||
/// instead of being emitted individually. When the window expires (or the
|
||||
/// batch reaches `max_batch_size`) a single summary line is written via
|
||||
/// `tracing::info!`.
|
||||
pub struct LogDeduplicator {
|
||||
events: DashMap<String, AggregatedEvent>,
|
||||
flush_interval: Duration,
|
||||
max_batch_size: u64,
|
||||
#[allow(dead_code)]
|
||||
rapid_threshold: u64, // events/sec that triggers immediate flush
|
||||
}
|
||||
|
||||
impl LogDeduplicator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
events: DashMap::new(),
|
||||
flush_interval: Duration::from_secs(5),
|
||||
max_batch_size: 100,
|
||||
rapid_threshold: 50,
|
||||
}
|
||||
}
|
||||
|
||||
/// Log an event, deduplicating by `category` + `key`.
|
||||
///
|
||||
/// If the batch for this composite key reaches `max_batch_size` the
|
||||
/// accumulated events are flushed immediately.
|
||||
pub fn log(&self, category: &str, key: &str, message: &str) {
|
||||
let map_key = format!("{}:{}", category, key);
|
||||
let now = Instant::now();
|
||||
|
||||
let entry = self.events.entry(map_key).or_insert_with(|| AggregatedEvent {
|
||||
category: category.to_string(),
|
||||
first_message: message.to_string(),
|
||||
count: AtomicU64::new(0),
|
||||
first_seen: now,
|
||||
last_seen: now,
|
||||
});
|
||||
|
||||
let count = entry.count.fetch_add(1, Ordering::Relaxed) + 1;
|
||||
|
||||
// Check if we should flush (batch size exceeded)
|
||||
if count >= self.max_batch_size {
|
||||
drop(entry);
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush all accumulated events, emitting summary log lines.
|
||||
pub fn flush(&self) {
|
||||
// Collect and remove all events
|
||||
self.events.retain(|_key, event| {
|
||||
let count = event.count.load(Ordering::Relaxed);
|
||||
if count > 0 {
|
||||
let elapsed = event.first_seen.elapsed();
|
||||
if count == 1 {
|
||||
info!("[{}] {}", event.category, event.first_message);
|
||||
} else {
|
||||
info!(
|
||||
"[SUMMARY] {} {} events in {:.1}s: {}",
|
||||
count,
|
||||
event.category,
|
||||
elapsed.as_secs_f64(),
|
||||
event.first_message
|
||||
);
|
||||
}
|
||||
}
|
||||
false // remove all entries after flushing
|
||||
});
|
||||
}
|
||||
|
||||
/// Start a background flush task that periodically drains accumulated
|
||||
/// events. The task runs until the supplied `CancellationToken` is
|
||||
/// cancelled, at which point it performs one final flush before exiting.
|
||||
pub fn start_flush_task(self: &Arc<Self>, cancel: tokio_util::sync::CancellationToken) {
|
||||
let dedup = Arc::clone(self);
|
||||
let interval = self.flush_interval;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel.cancelled() => {
|
||||
dedup.flush();
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(interval) => {
|
||||
dedup.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LogDeduplicator {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_single_event_emitted_as_is() {
|
||||
let dedup = LogDeduplicator::new();
|
||||
dedup.log("conn", "open", "connection opened from 1.2.3.4");
|
||||
// One event should exist
|
||||
assert_eq!(dedup.events.len(), 1);
|
||||
let entry = dedup.events.get("conn:open").unwrap();
|
||||
assert_eq!(entry.count.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(entry.first_message, "connection opened from 1.2.3.4");
|
||||
drop(entry);
|
||||
dedup.flush();
|
||||
// After flush, map should be empty
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_events_aggregated() {
|
||||
let dedup = LogDeduplicator::new();
|
||||
for _ in 0..10 {
|
||||
dedup.log("conn", "timeout", "connection timed out");
|
||||
}
|
||||
assert_eq!(dedup.events.len(), 1);
|
||||
let entry = dedup.events.get("conn:timeout").unwrap();
|
||||
assert_eq!(entry.count.load(Ordering::Relaxed), 10);
|
||||
drop(entry);
|
||||
dedup.flush();
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_keys_separate() {
|
||||
let dedup = LogDeduplicator::new();
|
||||
dedup.log("conn", "open", "opened");
|
||||
dedup.log("conn", "close", "closed");
|
||||
dedup.log("tls", "handshake", "TLS handshake");
|
||||
assert_eq!(dedup.events.len(), 3);
|
||||
dedup.flush();
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_clears_events() {
|
||||
let dedup = LogDeduplicator::new();
|
||||
dedup.log("a", "b", "msg1");
|
||||
dedup.log("a", "b", "msg2");
|
||||
dedup.flush();
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
// Logging after flush creates a new entry
|
||||
dedup.log("a", "b", "msg3");
|
||||
assert_eq!(dedup.events.len(), 1);
|
||||
let entry = dedup.events.get("a:b").unwrap();
|
||||
assert_eq!(entry.count.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(entry.first_message, "msg3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_batch_triggers_flush() {
|
||||
let dedup = LogDeduplicator::new();
|
||||
// max_batch_size defaults to 100
|
||||
for i in 0..100 {
|
||||
dedup.log("flood", "key", &format!("event {}", i));
|
||||
}
|
||||
// After hitting max_batch_size the events map should have been flushed
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_trait() {
|
||||
let dedup = LogDeduplicator::default();
|
||||
assert_eq!(dedup.flush_interval, Duration::from_secs(5));
|
||||
assert_eq!(dedup.max_batch_size, 100);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_background_flush_task() {
|
||||
let dedup = Arc::new(LogDeduplicator {
|
||||
events: DashMap::new(),
|
||||
flush_interval: Duration::from_millis(50),
|
||||
max_batch_size: 100,
|
||||
rapid_threshold: 50,
|
||||
});
|
||||
|
||||
let cancel = tokio_util::sync::CancellationToken::new();
|
||||
dedup.start_flush_task(cancel.clone());
|
||||
|
||||
// Log some events
|
||||
dedup.log("bg", "test", "background flush test");
|
||||
assert_eq!(dedup.events.len(), 1);
|
||||
|
||||
// Wait for the background task to flush
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
assert_eq!(dedup.events.len(), 0);
|
||||
|
||||
// Cancel the task
|
||||
cancel.cancel();
|
||||
tokio::time::sleep(Duration::from_millis(20)).await;
|
||||
}
|
||||
}
|
||||
173
rust/crates/rustproxy-metrics/src/throughput.rs
Normal file
173
rust/crates/rustproxy-metrics/src/throughput.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// A single throughput sample.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ThroughputSample {
|
||||
pub timestamp_ms: u64,
|
||||
pub bytes_in: u64,
|
||||
pub bytes_out: u64,
|
||||
}
|
||||
|
||||
/// Circular buffer for 1Hz throughput sampling.
|
||||
/// Matches smartproxy's ThroughputTracker.
|
||||
pub struct ThroughputTracker {
|
||||
/// Circular buffer of samples
|
||||
samples: Vec<ThroughputSample>,
|
||||
/// Current write index
|
||||
write_index: usize,
|
||||
/// Number of valid samples
|
||||
count: usize,
|
||||
/// Maximum number of samples to retain
|
||||
capacity: usize,
|
||||
/// Accumulated bytes since last sample
|
||||
pending_bytes_in: AtomicU64,
|
||||
pending_bytes_out: AtomicU64,
|
||||
/// When the tracker was created
|
||||
created_at: Instant,
|
||||
}
|
||||
|
||||
impl ThroughputTracker {
|
||||
/// Create a new tracker with the given capacity (seconds of retention).
|
||||
pub fn new(retention_seconds: usize) -> Self {
|
||||
Self {
|
||||
samples: Vec::with_capacity(retention_seconds),
|
||||
write_index: 0,
|
||||
count: 0,
|
||||
capacity: retention_seconds,
|
||||
pending_bytes_in: AtomicU64::new(0),
|
||||
pending_bytes_out: AtomicU64::new(0),
|
||||
created_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record bytes (called from data flow callbacks).
|
||||
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64) {
|
||||
self.pending_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
|
||||
self.pending_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Take a sample (called at 1Hz).
|
||||
pub fn sample(&mut self) {
|
||||
let bytes_in = self.pending_bytes_in.swap(0, Ordering::Relaxed);
|
||||
let bytes_out = self.pending_bytes_out.swap(0, Ordering::Relaxed);
|
||||
let timestamp_ms = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64;
|
||||
|
||||
let sample = ThroughputSample {
|
||||
timestamp_ms,
|
||||
bytes_in,
|
||||
bytes_out,
|
||||
};
|
||||
|
||||
if self.samples.len() < self.capacity {
|
||||
self.samples.push(sample);
|
||||
} else {
|
||||
self.samples[self.write_index] = sample;
|
||||
}
|
||||
self.write_index = (self.write_index + 1) % self.capacity;
|
||||
self.count = (self.count + 1).min(self.capacity);
|
||||
}
|
||||
|
||||
/// Get throughput over the last N seconds.
|
||||
pub fn throughput(&self, window_seconds: usize) -> (u64, u64) {
|
||||
let window = window_seconds.min(self.count);
|
||||
if window == 0 {
|
||||
return (0, 0);
|
||||
}
|
||||
|
||||
let mut total_in = 0u64;
|
||||
let mut total_out = 0u64;
|
||||
|
||||
for i in 0..window {
|
||||
let idx = if self.write_index >= i + 1 {
|
||||
self.write_index - i - 1
|
||||
} else {
|
||||
self.capacity - (i + 1 - self.write_index)
|
||||
};
|
||||
if idx < self.samples.len() {
|
||||
total_in += self.samples[idx].bytes_in;
|
||||
total_out += self.samples[idx].bytes_out;
|
||||
}
|
||||
}
|
||||
|
||||
(total_in / window as u64, total_out / window as u64)
|
||||
}
|
||||
|
||||
/// Get instant throughput (last 1 second).
|
||||
pub fn instant(&self) -> (u64, u64) {
|
||||
self.throughput(1)
|
||||
}
|
||||
|
||||
/// Get recent throughput (last 10 seconds).
|
||||
pub fn recent(&self) -> (u64, u64) {
|
||||
self.throughput(10)
|
||||
}
|
||||
|
||||
/// How long this tracker has been alive.
|
||||
pub fn uptime(&self) -> std::time::Duration {
|
||||
self.created_at.elapsed()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_empty_throughput() {
|
||||
let tracker = ThroughputTracker::new(60);
|
||||
let (bytes_in, bytes_out) = tracker.throughput(10);
|
||||
assert_eq!(bytes_in, 0);
|
||||
assert_eq!(bytes_out, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_sample() {
|
||||
let mut tracker = ThroughputTracker::new(60);
|
||||
tracker.record_bytes(1000, 2000);
|
||||
tracker.sample();
|
||||
let (bytes_in, bytes_out) = tracker.instant();
|
||||
assert_eq!(bytes_in, 1000);
|
||||
assert_eq!(bytes_out, 2000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_circular_buffer_wrap() {
|
||||
let mut tracker = ThroughputTracker::new(3); // Small capacity
|
||||
for i in 0..5 {
|
||||
tracker.record_bytes(i * 100, i * 200);
|
||||
tracker.sample();
|
||||
}
|
||||
// Should still work after wrapping
|
||||
let (bytes_in, bytes_out) = tracker.throughput(3);
|
||||
assert!(bytes_in > 0);
|
||||
assert!(bytes_out > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_window_averaging() {
|
||||
let mut tracker = ThroughputTracker::new(60);
|
||||
// Record 3 samples of different sizes
|
||||
tracker.record_bytes(100, 200);
|
||||
tracker.sample();
|
||||
tracker.record_bytes(200, 400);
|
||||
tracker.sample();
|
||||
tracker.record_bytes(300, 600);
|
||||
tracker.sample();
|
||||
|
||||
// Average over 3 samples: (100+200+300)/3 = 200, (200+400+600)/3 = 400
|
||||
let (avg_in, avg_out) = tracker.throughput(3);
|
||||
assert_eq!(avg_in, 200);
|
||||
assert_eq!(avg_out, 400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uptime_positive() {
|
||||
let tracker = ThroughputTracker::new(60);
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
assert!(tracker.uptime().as_millis() >= 10);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user