Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8cefe9d66a | |||
| d5e08c83fc | |||
| 1247f48856 | |||
| e3bae4c399 | |||
| 0930f7e10c | |||
| aa9e6dfd94 | |||
| 211d5cf835 | |||
| 2ce1899337 | |||
| 2e2ffc4485 | |||
| da26816af5 | |||
| d598bffec3 | |||
| a9dbccfaff | |||
| 386859a2bd | |||
| 2b58615d24 | |||
| 95adf56e52 | |||
| c96a493fb6 | |||
| b92587cc16 | |||
| b3dc0a6db2 | |||
| de3b8d3f58 | |||
| 75089ec975 | |||
| b106db932b | |||
| fb0c0dcc31 | |||
| 61b67b91a0 | |||
| fc64f5a95e |
78
changelog.md
78
changelog.md
@@ -1,5 +1,83 @@
|
||||
# Changelog
|
||||
|
||||
## 2026-03-16 - 25.11.6 - fix(rustproxy-http,rustproxy-passthrough)
|
||||
improve upstream connection cleanup and graceful tunnel shutdown
|
||||
|
||||
- Evict pooled HTTP/2 connections when their driver exits and shorten the maximum pooled H2 age to reduce reuse of stale upstream connections.
|
||||
- Strip hop-by-hop headers from backend responses before forwarding to HTTP/2 clients to avoid invalid H2 response handling.
|
||||
- Replace immediate task aborts in WebSocket and TCP tunnel watchdogs with cancellation-driven graceful shutdown plus timed fallback aborts.
|
||||
- Use non-blocking semaphore acquisition in the TCP listener so connection limits do not stall the accept loop for the entire port.
|
||||
|
||||
## 2026-03-16 - 25.11.5 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-15 - 25.11.4 - fix(rustproxy-http)
|
||||
report streamed HTTP and WebSocket bytes per chunk for real-time throughput metrics
|
||||
|
||||
- Update CountingBody to record bytes immediately on each data frame instead of aggregating until completion or drop
|
||||
- Record WebSocket tunnel traffic inside both copy loops and remove the final aggregate byte report to keep throughput metrics current
|
||||
|
||||
## 2026-03-15 - 25.11.3 - fix(repo)
|
||||
no changes to commit
|
||||
|
||||
|
||||
## 2026-03-15 - 25.11.2 - fix(rustproxy-http)
|
||||
avoid reusing HTTP/1 senders during streaming responses and relax HTTP/2 keep-alive timeouts
|
||||
|
||||
- Stop returning HTTP/1 senders to the connection pool before upstream response bodies finish streaming to prevent unsafe reuse on active connections.
|
||||
- Increase HTTP/2 keep-alive timeout from 5 seconds to 30 seconds in proxy connection builders to better support longer-lived backend streams.
|
||||
- Improves reliability for large streaming payloads and backend fallback request handling.
|
||||
|
||||
## 2026-03-15 - 25.11.1 - fix(rustproxy-http)
|
||||
keep connection idle tracking alive during streaming and tune HTTP/2 connection lifetimes
|
||||
|
||||
- Propagate connection activity tracking through HTTP/1, HTTP/2, and WebSocket forwarding so active request and response body streams do not trigger the idle watchdog.
|
||||
- Update CountingBody to refresh connection activity timestamps while data frames are polled during uploads and downloads.
|
||||
- Increase pooled HTTP/2 max age and set explicit HTTP/2 connection window sizes to improve long-lived streaming behavior.
|
||||
|
||||
## 2026-03-15 - 25.11.0 - feat(rustproxy-http)
|
||||
add HTTP/2 Extended CONNECT WebSocket proxy support
|
||||
|
||||
- Enable HTTP/2 CONNECT protocol support on the Hyper auto connection builder
|
||||
- Detect WebSocket requests for both HTTP/1 Upgrade and HTTP/2 Extended CONNECT flows
|
||||
- Translate HTTP/2 WebSocket requests to an HTTP/1.1 backend handshake and return RFC-compliant client responses
|
||||
|
||||
## 2026-03-12 - 25.10.7 - fix(rustproxy-http)
|
||||
remove Host header from HTTP/2 upstream requests while preserving it for HTTP/1 retries
|
||||
|
||||
- strips the Host header before sending HTTP/2 upstream requests so :authority from the URI is used instead
|
||||
- avoids 400 responses from nginx caused by sending both Host and :authority headers
|
||||
- keeps a cloned header set for bodyless request retries so HTTP/1 fallback still retains the Host header
|
||||
|
||||
## 2026-03-12 - 25.10.6 - fix(rustproxy-http)
|
||||
use the requested domain as HTTP/2 authority instead of the backend host and port
|
||||
|
||||
- build HTTP/2 absolute URIs from the client-facing domain so the :authority pseudo-header matches the Host header
|
||||
- remove backend port from generated HTTP/2 request URIs and fall back to the upstream host only when no domain is available
|
||||
- apply the authority handling consistently across pooled, inline, and generic upstream request paths
|
||||
|
||||
## 2026-03-12 - 25.10.5 - fix(rustproxy-http)
|
||||
configure HTTP/2 client builders with a Tokio timer for keep-alive handling
|
||||
|
||||
- Adds TokioTimer to all HTTP/2 client builder instances in proxy_service.
|
||||
- Ensures configured HTTP/2 keep-alive interval and timeout settings have the required timer runtime support.
|
||||
|
||||
## 2026-03-12 - 25.10.4 - fix(rustproxy-http)
|
||||
stabilize upstream HTTP/2 forwarding and fallback behavior
|
||||
|
||||
- Remove hop-by-hop headers before forwarding requests to HTTP/2 backends to comply with RFC 9113.
|
||||
- Use ALPN-enabled TLS configuration whenever HTTP/2 is possible, including explicit H2 connections and retries.
|
||||
- Add HTTP/2 handshake timeouts, tuned connection settings, and fallback to HTTP/1 when H2 negotiation times out or fails.
|
||||
- Register pooled HTTP/2 senders only after a successful first request to avoid reusing broken connections.
|
||||
- Build absolute URIs for HTTP/2 upstream requests so pseudo-headers such as scheme and authority are derived correctly.
|
||||
|
||||
## 2026-03-12 - 25.10.3 - fix(rustproxy-http)
|
||||
include request domain in backend proxy error and protocol detection logs
|
||||
|
||||
- Adds domain context to backend TCP/TLS connect, handshake, request failure, retry, and fallback log entries in the Rust HTTP proxy service.
|
||||
- Propagates the resolved host/domain through H1, H2, pooled, and fallback forwarding paths so backend-level diagnostics can be correlated with the original request domain.
|
||||
|
||||
## 2026-03-12 - 25.10.2 - fix(repo)
|
||||
no code changes to release
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartproxy",
|
||||
"version": "25.10.2",
|
||||
"version": "25.11.6",
|
||||
"private": false,
|
||||
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
||||
"main": "dist_ts/index.js",
|
||||
|
||||
@@ -20,6 +20,7 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
|
||||
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
|
||||
/// Prevents staleness from backends that close idle connections (e.g. nginx GOAWAY).
|
||||
/// 120s is well within typical server GOAWAY windows (nginx: ~60s idle, envoy: ~60s).
|
||||
const MAX_H2_AGE: Duration = Duration::from_secs(120);
|
||||
|
||||
/// Identifies a unique backend endpoint.
|
||||
|
||||
@@ -11,20 +11,22 @@ use rustproxy_metrics::MetricsCollector;
|
||||
|
||||
/// Wraps any `http_body::Body` and counts data bytes passing through.
|
||||
///
|
||||
/// When the body is fully consumed or dropped, accumulated byte counts
|
||||
/// are reported to the `MetricsCollector`.
|
||||
/// Each chunk is reported to the `MetricsCollector` immediately so that
|
||||
/// the throughput tracker (sampled at 1 Hz) reflects real-time data flow.
|
||||
///
|
||||
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
|
||||
pub struct CountingBody<B> {
|
||||
inner: Pin<Box<B>>,
|
||||
counted_bytes: AtomicU64,
|
||||
metrics: Arc<MetricsCollector>,
|
||||
route_id: Option<String>,
|
||||
source_ip: Option<String>,
|
||||
/// Whether we count bytes as "in" (request body) or "out" (response body).
|
||||
direction: Direction,
|
||||
/// Whether we've already reported the bytes (to avoid double-reporting on drop).
|
||||
reported: bool,
|
||||
/// Optional connection-level activity tracker. When set, poll_frame updates this
|
||||
/// to keep the idle watchdog alive during active body streaming (uploads/downloads).
|
||||
connection_activity: Option<Arc<AtomicU64>>,
|
||||
/// Start instant for computing elapsed ms for connection_activity.
|
||||
activity_start: Option<std::time::Instant>,
|
||||
}
|
||||
|
||||
/// Which direction the bytes flow.
|
||||
@@ -47,42 +49,36 @@ impl<B> CountingBody<B> {
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Box::pin(inner),
|
||||
counted_bytes: AtomicU64::new(0),
|
||||
metrics,
|
||||
route_id,
|
||||
source_ip,
|
||||
direction,
|
||||
reported: false,
|
||||
connection_activity: None,
|
||||
activity_start: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Report accumulated bytes to the metrics collector.
|
||||
fn report(&mut self) {
|
||||
if self.reported {
|
||||
return;
|
||||
}
|
||||
self.reported = true;
|
||||
|
||||
let bytes = self.counted_bytes.load(Ordering::Relaxed);
|
||||
if bytes == 0 {
|
||||
return;
|
||||
}
|
||||
/// Set the connection-level activity tracker. When set, each data frame
|
||||
/// updates this timestamp to prevent the idle watchdog from killing the
|
||||
/// connection during active body streaming.
|
||||
pub fn with_connection_activity(mut self, activity: Arc<AtomicU64>, start: std::time::Instant) -> Self {
|
||||
self.connection_activity = Some(activity);
|
||||
self.activity_start = Some(start);
|
||||
self
|
||||
}
|
||||
|
||||
/// Report a chunk of bytes immediately to the metrics collector.
|
||||
#[inline]
|
||||
fn report_chunk(&self, len: u64) {
|
||||
let route_id = self.route_id.as_deref();
|
||||
let source_ip = self.source_ip.as_deref();
|
||||
match self.direction {
|
||||
Direction::In => self.metrics.record_bytes(bytes, 0, route_id, source_ip),
|
||||
Direction::Out => self.metrics.record_bytes(0, bytes, route_id, source_ip),
|
||||
Direction::In => self.metrics.record_bytes(len, 0, route_id, source_ip),
|
||||
Direction::Out => self.metrics.record_bytes(0, len, route_id, source_ip),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> Drop for CountingBody<B> {
|
||||
fn drop(&mut self) {
|
||||
self.report();
|
||||
}
|
||||
}
|
||||
|
||||
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
|
||||
impl<B> Unpin for CountingBody<B> {}
|
||||
|
||||
@@ -102,16 +98,18 @@ where
|
||||
match this.inner.as_mut().poll_frame(cx) {
|
||||
Poll::Ready(Some(Ok(frame))) => {
|
||||
if let Some(data) = frame.data_ref() {
|
||||
this.counted_bytes.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
let len = data.len() as u64;
|
||||
// Report bytes immediately so the 1 Hz throughput sampler sees them
|
||||
this.report_chunk(len);
|
||||
// Keep the connection-level idle watchdog alive during body streaming
|
||||
if let (Some(activity), Some(start)) = (&this.connection_activity, &this.activity_start) {
|
||||
activity.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Ok(frame)))
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
|
||||
Poll::Ready(None) => {
|
||||
// Body is fully consumed — report now
|
||||
this.report();
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -97,16 +97,25 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
let last_activity = Arc::new(AtomicU64::new(0));
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||
// aborting tasks, so the copy loops can shut down gracefully (TCP FIN instead
|
||||
// of RST, TLS close_notify if the stream is TLS-wrapped).
|
||||
let conn_cancel = CancellationToken::new();
|
||||
|
||||
let la1 = Arc::clone(&last_activity);
|
||||
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
|
||||
let metrics_c2b = metrics.clone();
|
||||
let cc1 = conn_cancel.clone();
|
||||
let c2b = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = initial_len;
|
||||
loop {
|
||||
let n = match client_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = client_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc1.cancelled() => break,
|
||||
};
|
||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -117,19 +126,27 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = backend_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
backend_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
let la2 = Arc::clone(&last_activity);
|
||||
let metrics_b2c = metrics;
|
||||
let cc2 = conn_cancel.clone();
|
||||
let b2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match backend_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = backend_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc2.cancelled() => break,
|
||||
};
|
||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -140,14 +157,20 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
client_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
// Watchdog: inactivity, max lifetime, and cancellation
|
||||
// Watchdog: inactivity, max lifetime, and cancellation.
|
||||
// First cancels the per-connection token for graceful shutdown (FIN/close_notify),
|
||||
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||
let la_watch = Arc::clone(&last_activity);
|
||||
let c2b_handle = c2b.abort_handle();
|
||||
let b2c_handle = b2c.abort_handle();
|
||||
let c2b_abort = c2b.abort_handle();
|
||||
let b2c_abort = b2c.abort_handle();
|
||||
let watchdog = tokio::spawn(async move {
|
||||
let check_interval = std::time::Duration::from_secs(5);
|
||||
let mut last_seen = 0u64;
|
||||
@@ -155,16 +178,12 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
tokio::select! {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("Connection cancelled by shutdown");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(check_interval) => {
|
||||
// Check max lifetime
|
||||
if start.elapsed() >= max_lifetime {
|
||||
debug!("Connection exceeded max lifetime, closing");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -174,8 +193,6 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -183,6 +200,13 @@ pub async fn forward_bidirectional_with_timeouts(
|
||||
}
|
||||
}
|
||||
}
|
||||
// Phase 1: Signal copy loops to exit gracefully (allows FIN/close_notify)
|
||||
conn_cancel.cancel();
|
||||
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||
c2b_abort.abort();
|
||||
b2c_abort.abort();
|
||||
});
|
||||
|
||||
let bytes_in = c2b.await.unwrap_or(0);
|
||||
|
||||
@@ -465,21 +465,19 @@ impl TcpListenerManager {
|
||||
Ok((stream, peer_addr)) => {
|
||||
let ip = peer_addr.ip();
|
||||
|
||||
// Global connection limit — acquire semaphore permit with timeout
|
||||
let permit = match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(5),
|
||||
conn_semaphore.clone().acquire_owned(),
|
||||
).await {
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
// Semaphore closed — shouldn't happen, but be safe
|
||||
debug!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
||||
// Global connection limit — non-blocking check.
|
||||
// MUST NOT block the accept loop: a blocking acquire would stall
|
||||
// ALL connections to this port (not just the one over limit), because
|
||||
// listener.accept() is not polled while we await the semaphore.
|
||||
let permit = match conn_semaphore.clone().try_acquire_owned() {
|
||||
Ok(permit) => permit,
|
||||
Err(tokio::sync::TryAcquireError::NoPermits) => {
|
||||
debug!("Global connection limit reached, dropping connection from {}", peer_addr);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout — global limit reached
|
||||
debug!("Global connection limit reached, dropping connection from {}", peer_addr);
|
||||
Err(tokio::sync::TryAcquireError::Closed) => {
|
||||
debug!("Connection semaphore closed, dropping connection from {}", peer_addr);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
@@ -1396,15 +1394,24 @@ impl TcpListenerManager {
|
||||
let last_activity = Arc::new(AtomicU64::new(0));
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
// Per-connection cancellation token: the watchdog cancels this instead of
|
||||
// aborting tasks, so the copy loops can shut down gracefully (TLS close_notify
|
||||
// for terminate/reencrypt mode, TCP FIN for passthrough mode).
|
||||
let conn_cancel = CancellationToken::new();
|
||||
|
||||
let la1 = Arc::clone(&last_activity);
|
||||
let metrics_c2b = metrics.clone();
|
||||
let cc1 = conn_cancel.clone();
|
||||
let c2b = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match client_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = client_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc1.cancelled() => break,
|
||||
};
|
||||
if backend_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -1418,19 +1425,27 @@ impl TcpListenerManager {
|
||||
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = backend_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
backend_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
let la2 = Arc::clone(&last_activity);
|
||||
let metrics_b2c = metrics;
|
||||
let cc2 = conn_cancel.clone();
|
||||
let b2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; 65536];
|
||||
let mut total = 0u64;
|
||||
loop {
|
||||
let n = match backend_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
let n = tokio::select! {
|
||||
result = backend_read.read(&mut buf) => match result {
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => n,
|
||||
},
|
||||
_ = cc2.cancelled() => break,
|
||||
};
|
||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
@@ -1444,14 +1459,20 @@ impl TcpListenerManager {
|
||||
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
|
||||
}
|
||||
}
|
||||
let _ = client_write.shutdown().await;
|
||||
// Graceful shutdown with timeout (sends TLS close_notify / TCP FIN)
|
||||
let _ = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
client_write.shutdown(),
|
||||
).await;
|
||||
total
|
||||
});
|
||||
|
||||
// Watchdog task: check for inactivity, max lifetime, and cancellation
|
||||
// Watchdog task: check for inactivity, max lifetime, and cancellation.
|
||||
// First cancels the per-connection token for graceful shutdown (close_notify/FIN),
|
||||
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
|
||||
let la_watch = Arc::clone(&last_activity);
|
||||
let c2b_handle = c2b.abort_handle();
|
||||
let b2c_handle = b2c.abort_handle();
|
||||
let c2b_abort = c2b.abort_handle();
|
||||
let b2c_abort = b2c.abort_handle();
|
||||
let watchdog = tokio::spawn(async move {
|
||||
let check_interval = std::time::Duration::from_secs(5);
|
||||
let mut last_seen = 0u64;
|
||||
@@ -1459,16 +1480,12 @@ impl TcpListenerManager {
|
||||
tokio::select! {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("Split-stream connection cancelled by shutdown");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(check_interval) => {
|
||||
// Check max lifetime
|
||||
if start.elapsed() >= max_lifetime {
|
||||
debug!("Connection exceeded max lifetime, closing");
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1479,8 +1496,6 @@ impl TcpListenerManager {
|
||||
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
|
||||
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
|
||||
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
|
||||
c2b_handle.abort();
|
||||
b2c_handle.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1488,6 +1503,13 @@ impl TcpListenerManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Phase 1: Signal copy loops to exit gracefully (allows close_notify/FIN)
|
||||
conn_cancel.cancel();
|
||||
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
|
||||
c2b_abort.abort();
|
||||
b2c_abort.abort();
|
||||
});
|
||||
|
||||
let bytes_in = c2b.await.unwrap_or(0);
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
*/
|
||||
export const commitinfo = {
|
||||
name: '@push.rocks/smartproxy',
|
||||
version: '25.10.2',
|
||||
version: '25.11.6',
|
||||
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user