Compare commits

..

138 Commits

Author SHA1 Message Date
9e1103e7a7 v25.15.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-19 18:55:31 +00:00
2b990527ac feat(readme): document UDP, QUIC, and HTTP/3 support in the README 2026-03-19 18:55:31 +00:00
9595f0a9fc v25.14.1
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-19 16:21:37 +00:00
0fb3988123 fix(deps): update build and runtime dependencies and align route validation test expectations 2026-03-19 16:21:37 +00:00
53938df8db v25.14.0
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-19 16:09:51 +00:00
e890bda8fc feat(udp,http3): add UDP datagram handler relay support and stream HTTP/3 request bodies to backends 2026-03-19 16:09:51 +00:00
bbe8b729ea v25.13.0
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-19 15:06:27 +00:00
4fb91cd868 feat(smart-proxy): add UDP transport support with QUIC/HTTP3 routing and datagram handler relay 2026-03-19 15:06:27 +00:00
cfa958cf3d v25.12.0
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-19 12:41:26 +00:00
db2e586da2 feat(proxy-protocol): add PROXY protocol v2 support to the Rust passthrough listener and streamline TypeScript proxy protocol exports 2026-03-19 12:41:26 +00:00
91832c368d v25.11.24
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 16:47:57 +00:00
c9d0fccb2d fix(rustproxy-http): improve async static file serving, websocket handshake buffering, and shared metric metadata handling 2026-03-17 16:47:57 +00:00
5dccbbc9d1 v25.11.23
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:22:51 +00:00
92d7113c6c fix(rustproxy-http,rustproxy-metrics): reduce per-frame metrics overhead by batching body byte accounting 2026-03-17 12:22:51 +00:00
8f6bb30367 v25.11.22
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 12:12:24 +00:00
ef9bac80ff fix(rustproxy-http): reuse healthy HTTP/2 upstream connections after requests with bodies 2026-03-17 12:12:24 +00:00
9c78701038 v25.11.21
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 11:33:34 +00:00
26fd9409a7 fix(rustproxy-http): reuse pooled HTTP/2 connections for requests with and without bodies 2026-03-17 11:33:34 +00:00
cfff128499 v25.11.20
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-17 01:32:35 +00:00
3baff354bd fix(rustproxy-http): avoid downgrading cached backend protocol on H2 stream errors 2026-03-17 01:32:35 +00:00
c2eacd1b30 v25.11.19
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 20:53:39 +00:00
1fdbfcf0aa fix(rustproxy-http): avoid reusing pooled HTTP/2 connections for requests with bodies to prevent upload flow-control stalls 2026-03-16 20:53:39 +00:00
9b184acc8c v25.11.18
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 17:42:14 +00:00
b475968f4e fix(repo): no changes to commit 2026-03-16 17:42:14 +00:00
878eab6e88 v25.11.17
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 14:30:43 +00:00
77abe0804d fix(rustproxy-http): prevent stale HTTP/2 connection drivers from evicting newer pooled connections 2026-03-16 14:30:43 +00:00
ae0342d018 v25.11.16
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:58:22 +00:00
365981d9cf fix(repo): no changes to commit 2026-03-16 13:58:22 +00:00
2cc0ff0030 v25.11.15
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:54:56 +00:00
72935e7ee0 fix(rustproxy-http): implement vectored write support for backend streams 2026-03-16 13:54:56 +00:00
61db285e04 v25.11.14
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:44:56 +00:00
d165829022 fix(rustproxy-http): forward vectored write support in ShutdownOnDrop AsyncWrite wrapper 2026-03-16 13:44:56 +00:00
5e6cf391ab v25.11.13
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:17:02 +00:00
2b1a21c599 fix(rustproxy-http): remove hot-path debug logging from HTTP/1 connection pool hits 2026-03-16 13:17:02 +00:00
b8e1c9f3cf v25.11.12
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:12:24 +00:00
c65369540c fix(rustproxy-http): remove connection pool hit logging and keep logging limited to actual failures 2026-03-16 13:12:24 +00:00
59e108edbd v25.11.11
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 13:01:32 +00:00
1e2ca68fc7 fix(rustproxy-http): improve HTTP/2 proxy error logging with warning-level connection failures and debug error details 2026-03-16 13:01:32 +00:00
4c76a9f9f3 v25.11.10
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 12:29:15 +00:00
8e76c42cea fix(rustproxy-http): validate pooled HTTP/2 connections asynchronously before reuse and evict stale senders 2026-03-16 12:29:15 +00:00
b1f4181139 v25.11.9
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 09:38:55 +00:00
a1b8d40011 fix(rustproxy-routing): reduce hot-path allocations in routing, metrics, and proxy protocol handling 2026-03-16 09:38:55 +00:00
246b44913e v25.11.8
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 08:58:11 +00:00
b3d4949225 fix(rustproxy-http): prevent premature idle timeouts during streamed HTTP responses and ensure TLS close_notify is sent on dropped connections 2026-03-16 08:58:11 +00:00
0475e6b442 v25.11.7
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 03:01:16 +00:00
8cdb95a853 fix(rustproxy): prevent TLS route reload certificate mismatches and tighten passthrough connection handling 2026-03-16 03:01:16 +00:00
8cefe9d66a v25.11.6
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 02:05:47 +00:00
d5e08c83fc fix(rustproxy-http,rustproxy-passthrough): improve upstream connection cleanup and graceful tunnel shutdown 2026-03-16 02:05:47 +00:00
1247f48856 v25.11.5
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-16 00:03:10 +00:00
e3bae4c399 fix(repo): no changes to commit 2026-03-16 00:03:10 +00:00
0930f7e10c v25.11.4
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-15 21:44:32 +00:00
aa9e6dfd94 fix(rustproxy-http): report streamed HTTP and WebSocket bytes per chunk for real-time throughput metrics 2026-03-15 21:44:32 +00:00
211d5cf835 v25.11.3
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-15 17:00:33 +00:00
2ce1899337 fix(repo): no changes to commit 2026-03-15 17:00:33 +00:00
2e2ffc4485 v25.11.2
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-15 16:58:41 +00:00
da26816af5 fix(rustproxy-http): avoid reusing HTTP/1 senders during streaming responses and relax HTTP/2 keep-alive timeouts 2026-03-15 16:58:41 +00:00
d598bffec3 v25.11.1
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-15 16:24:41 +00:00
a9dbccfaff fix(rustproxy-http): keep connection idle tracking alive during streaming and tune HTTP/2 connection lifetimes 2026-03-15 16:24:41 +00:00
386859a2bd v25.11.0
Some checks failed
Default (tags) / security (push) Failing after 1s
Default (tags) / test (push) Failing after 1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-15 16:00:26 +00:00
2b58615d24 feat(rustproxy-http): add HTTP/2 Extended CONNECT WebSocket proxy support 2026-03-15 16:00:26 +00:00
95adf56e52 v25.10.7
Some checks failed
Default (tags) / security (push) Successful in 1m4s
Default (tags) / test (push) Failing after 4m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 22:41:20 +00:00
c96a493fb6 fix(rustproxy-http): remove Host header from HTTP/2 upstream requests while preserving it for HTTP/1 retries 2026-03-12 22:41:20 +00:00
b92587cc16 v25.10.6
Some checks failed
Default (tags) / security (push) Successful in 52s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 22:06:11 +00:00
b3dc0a6db2 fix(rustproxy-http): use the requested domain as HTTP/2 authority instead of the backend host and port 2026-03-12 22:06:11 +00:00
de3b8d3f58 v25.10.5
Some checks failed
Default (tags) / security (push) Successful in 1m12s
Default (tags) / test (push) Failing after 4m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 21:53:04 +00:00
75089ec975 fix(rustproxy-http): configure HTTP/2 client builders with a Tokio timer for keep-alive handling 2026-03-12 21:53:04 +00:00
b106db932b v25.10.4
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 21:41:54 +00:00
fb0c0dcc31 fix(rustproxy-http): stabilize upstream HTTP/2 forwarding and fallback behavior 2026-03-12 21:41:54 +00:00
61b67b91a0 v25.10.3
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 21:04:58 +00:00
fc64f5a95e fix(rustproxy-http): include request domain in backend proxy error and protocol detection logs 2026-03-12 21:04:58 +00:00
90b83a9dbe v25.10.2
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 20:11:55 +00:00
508621e231 fix(repo): no code changes to release 2026-03-12 20:11:55 +00:00
9ef21dcb41 v25.10.1
Some checks failed
Default (tags) / security (push) Successful in 44s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 20:00:56 +00:00
0acd907431 fix(repo): no changes to commit 2026-03-12 20:00:56 +00:00
80276a70e8 v25.10.0
Some checks failed
Default (tags) / security (push) Successful in 48s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-12 15:16:11 +00:00
0d4399d7f1 feat(metrics): add per-backend connection, error, protocol, and pool metrics with stale backend pruning 2026-03-12 15:16:11 +00:00
0380a957d0 v25.9.3
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-11 11:28:57 +00:00
5271447264 fix(rustproxy-http): Evict stale HTTP/2 pooled senders and retry bodyless requests with fresh backend connections to avoid 502s 2026-03-11 11:28:57 +00:00
be9898805f v25.9.2
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-08 15:24:18 +00:00
d4aa46aed7 fix(protocol-cache): Include requested_host in protocol detection cache key to avoid cache oscillation when multiple frontend domains share the same backend 2026-03-08 15:24:18 +00:00
4f1c5c919f v25.9.1
Some checks failed
Default (tags) / security (push) Successful in 48s
Default (tags) / test (push) Failing after 4m3s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-03 16:14:16 +00:00
d51b2c5890 fix(rustproxy): Cancel connections for routes removed/disabled by adding per-route cancellation tokens and make RouteManager swappable (ArcSwap) for runtime updates 2026-03-03 16:14:16 +00:00
bb471a8cc9 v25.9.0
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-03-03 11:04:01 +00:00
c52128f12d feat(rustproxy-http): add HTTP/2 auto-detection via ALPN with TTL-backed protocol cache and h1-only/h2 ALPN client configs 2026-03-03 11:04:01 +00:00
e69de246e9 v25.8.5
Some checks failed
Default (tags) / security (push) Successful in 43s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 21:31:38 +00:00
5126049ae6 fix(release): bump patch version (no source changes) 2026-02-26 21:31:38 +00:00
8db621657f fix(proxy): close connection buildup vectors in HTTP idle, WebSocket, socket relay, and TLS forwarding paths
- Add HTTP keep-alive idle timeout (60s default) with periodic watchdog that
  skips active requests (panic-safe via RAII ActiveRequestGuard)
- Make WebSocket inactivity/max-lifetime timeouts configurable from ConnectionConfig
  instead of hardcoded 1h/24h
- Replace bare copy_bidirectional in socket handler relay with timeout+cancel-aware
  split forwarding (inactivity, max lifetime, graceful shutdown)
- Add CancellationToken to forward_bidirectional_split_with_timeouts so TLS-terminated
  TCP connections respond to graceful shutdown
- Fix graceful_stop to actually abort listener tasks that exceed the shutdown deadline
  (previously they detached and ran forever)
- Add 10s metadata parsing timeout on TS socket-handler-server to prevent stuck sockets
2026-02-26 21:29:19 +00:00
ef060d5e79 v25.8.4
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 17:32:35 +00:00
cd7f3f7f75 fix(proxy): adjust default proxy timeouts and keep-alive behavior to shorter, more consistent values 2026-02-26 17:32:35 +00:00
8df18728d4 v25.8.3
Some checks failed
Default (tags) / security (push) Successful in 29s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 17:01:57 +00:00
bedecc6b6b fix(smartproxy): no code or dependency changes detected; no version bump required 2026-02-26 17:01:57 +00:00
b5f166bc92 v25.8.2
Some checks failed
Default (tags) / security (push) Successful in 31s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-26 16:58:30 +00:00
94266222fe fix(connection): improve connection handling and timeouts 2026-02-26 16:58:30 +00:00
697d51a9d4 v25.8.1
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m4s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-25 00:12:41 +00:00
7e5fe2bec3 fix(allocator): switch global allocator from tikv-jemallocator to mimalloc 2026-02-25 00:12:41 +00:00
f592bf627f v25.8.0
Some checks failed
Default (tags) / security (push) Successful in 42s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-24 23:22:49 +00:00
6114a00fb8 feat(rustproxy): use tikv-jemallocator as the global allocator to reduce glibc fragmentation and slow RSS growth; add allocator dependency and enable it in rustproxy, update lockfile, and run tsrust before tests 2026-02-24 23:22:49 +00:00
98089b0351 v25.7.10
Some checks failed
Default (tags) / security (push) Successful in 41s
Default (tags) / test (push) Failing after 4m2s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-24 20:56:37 +00:00
33cd5330c4 fix(rustproxy): Use cooperative cancellation for background tasks, prune stale caches and metric entries, and switch tests to dynamic port allocation to avoid port conflicts 2026-02-24 20:56:37 +00:00
755c81c042 v25.7.9
Some checks failed
Default (tags) / security (push) Successful in 44s
Default (tags) / test (push) Failing after 4m0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-21 13:27:55 +00:00
9368226ce0 fix(tests): use high non-privileged ports in tests to avoid conflicts and CI failures 2026-02-21 13:27:55 +00:00
d4739045cd feat: enhance HTTP/2 support by ensuring Host header is set and adding multiplexed request tests 2026-02-20 18:30:57 +00:00
9521f2e044 feat: add TCP keepalive options and connection pooling for improved performance
- Added `socket2` dependency for socket options.
- Introduced `keep_alive`, `keep_alive_initial_delay_ms`, and `max_connections` fields in `ConnectionConfig`.
- Implemented TCP keepalive settings in `TcpListenerManager` for both client and backend connections.
- Created a new `ConnectionPool` for managing idle HTTP/1.1 and HTTP/2 connections to reduce overhead.
- Enhanced TLS configuration to support ALPN for HTTP/2.
- Added performance tests for connection pooling, stability, and concurrent connections.
2026-02-20 18:16:09 +00:00
0f6752b9a7 v25.7.8
Some checks failed
Default (tags) / security (push) Successful in 12m17s
Default (tags) / test (push) Failing after 4m14s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-19 14:21:05 +00:00
b8b7490d44 fix(no-changes): no changes detected; nothing to release 2026-02-19 14:21:05 +00:00
8c2042a2f5 v25.7.7
Some checks failed
Default (tags) / security (push) Successful in 12m19s
Default (tags) / test (push) Failing after 4m16s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-19 14:03:31 +00:00
3514260316 fix(proxy): restrict PROXY protocol parsing to configured trusted proxy IPs and parse PROXY headers before metrics/fast-path so client IPs reflect the real source 2026-02-19 14:03:31 +00:00
f171cc8c5d v25.7.6
Some checks failed
Default (tags) / security (push) Successful in 12m20s
Default (tags) / test (push) Failing after 4m18s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-19 09:12:50 +00:00
c7722c30f3 fix(throughput): add tests for per-IP connection tracking and throughput history; assert per-IP eviction after connection close to prevent memory leak 2026-02-19 09:12:50 +00:00
0ae882731a v25.7.5
Some checks failed
Default (tags) / security (push) Successful in 12m22s
Default (tags) / test (push) Failing after 4m16s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-19 08:48:46 +00:00
53d73c7dc6 fix(rustproxy): prune stale per-route metrics, add per-route rate limiter caching and regex cache, and improve connection tracking cleanup to prevent memory growth 2026-02-19 08:48:46 +00:00
b4b8bd925d v25.7.4
Some checks failed
Default (tags) / security (push) Successful in 12m5s
Default (tags) / test (push) Failing after 4m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-19 08:07:34 +00:00
5ac44b898b fix(smart-proxy): include proxy IPs in smart proxy configuration 2026-02-19 08:07:34 +00:00
9b4393b5ac v25.7.3
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 14:35:26 +00:00
02b4ed8018 fix(metrics): centralize connection-closed reporting via ConnectionGuard and remove duplicate explicit metrics.connection_closed calls 2026-02-16 14:35:26 +00:00
e4e4b4f1ec v25.7.2
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 4m4s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 13:43:22 +00:00
d361a21543 fix(rustproxy-http): preserve original Host header when proxying and add X-Forwarded-* headers; add TLS WebSocket echo backend helper and integration test for terminate-and-reencrypt websocket 2026-02-16 13:43:22 +00:00
106713a546 v25.7.1
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 4m6s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 13:29:45 +00:00
101675b5f8 fix(proxy): use TLS to backends for terminate-and-reencrypt routes 2026-02-16 13:29:45 +00:00
9fac17bc39 v25.7.0
Some checks failed
Default (tags) / security (push) Successful in 30s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 12:11:49 +00:00
2e3cf515a4 feat(routes): add protocol-based route matching and ensure terminate-and-reencrypt routes HTTP through the full HTTP proxy; update docs and tests 2026-02-16 12:11:49 +00:00
754d32fd34 v25.6.0
Some checks failed
Default (tags) / security (push) Successful in 1m39s
Default (tags) / test (push) Failing after 5m7s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 12:02:36 +00:00
f0b7c27996 feat(rustproxy): add protocol-based routing and backend TLS re-encryption support 2026-02-16 12:02:36 +00:00
db932e8acc v25.5.0
Some checks failed
Default (tags) / security (push) Successful in 1m1s
Default (tags) / test (push) Failing after 5m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 03:00:39 +00:00
455d5bb757 feat(tls): add shared TLS acceptor with SNI resolver and session resumption; prefer shared acceptor and fall back to per-connection when routes specify custom TLS versions 2026-02-16 03:00:39 +00:00
fa2a27df6d v25.4.0
Some checks failed
Default (tags) / security (push) Successful in 30s
Default (tags) / test (push) Failing after 5m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 01:37:43 +00:00
7b2ccbdd11 feat(rustproxy): support dynamically loaded TLS certificates via loadCertificate IPC and include them in listener TLS configs for rebuilds and hot-swap 2026-02-16 01:37:43 +00:00
8409984fcc v25.3.1
Some checks failed
Default (tags) / security (push) Successful in 1m44s
Default (tags) / test (push) Failing after 5m8s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-15 15:05:03 +00:00
af10d189a3 fix(plugins): remove unused dependencies and simplify plugin exports 2026-02-15 15:05:03 +00:00
0b4d180cdf v25.3.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 14:02:25 +00:00
7b3545d1b5 feat(smart-proxy): add background concurrent certificate provisioning with per-domain timeouts and concurrency control 2026-02-14 14:02:25 +00:00
e837419d5d v25.2.2
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 12:42:20 +00:00
487a603fa3 fix(smart-proxy): start metrics polling before certificate provisioning to avoid blocking metrics collection 2026-02-14 12:42:20 +00:00
d6fdd3fc86 v25.2.1
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 12:28:42 +00:00
344f224c89 fix(smartproxy): no changes detected in git diff 2026-02-14 12:28:42 +00:00
6bbd2b3ee1 test(metrics): add v25.2.0 end-to-end assertions for per-IP, history, and HTTP request metrics 2026-02-14 12:24:48 +00:00
c44216df28 v25.2.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 11:15:17 +00:00
f80cdcf41c feat(metrics): add per-IP and HTTP-request metrics, propagate source IP through proxy paths, and expose new metrics to the TS adapter 2026-02-14 11:15:17 +00:00
86 changed files with 13093 additions and 5042 deletions

View File

@@ -1,5 +1,495 @@
# Changelog
## 2026-03-19 - 25.15.0 - feat(readme)
document UDP, QUIC, and HTTP/3 support in the README
- Adds README examples for UDP datagram handlers, QUIC/HTTP3 forwarding, and dual-stack TCP/UDP routes
- Expands configuration and API reference sections to cover transport matching, UDP/QUIC options, backend transport selection, and UDP metrics
- Updates architecture and feature descriptions to reflect UDP, QUIC, HTTP/3, and datagram handler capabilities
## 2026-03-19 - 25.14.1 - fix(deps)
update build and runtime dependencies and align route validation test expectations
- split the test preparation step into a dedicated test:before script while keeping test execution separate
- bump development tooling and runtime package versions in package.json
- adjust the route validation test to match the current generic handler error message
## 2026-03-19 - 25.14.0 - feat(udp,http3)
add UDP datagram handler relay support and stream HTTP/3 request bodies to backends
- establish a persistent Unix socket relay for UDP datagram handlers and process handler replies back to clients
- update route validation and smart proxy route reload logic to support datagramHandler routes
- record UDP, QUIC, and HTTP/3 byte metrics more accurately, including request bytes in and UDP session cleanup connection tracking
- add integration tests for UDP forwarding, datagram handlers, and UDP metrics
## 2026-03-19 - 25.13.0 - feat(smart-proxy)
add UDP transport support with QUIC/HTTP3 routing and datagram handler relay
- adds UDP listener and session tracking infrastructure in the Rust proxy, including UDP metrics and hot-reload support for transport-specific ports
- introduces QUIC and HTTP/3 support in routing and HTTP handling, including Alt-Svc advertisement and QUIC TLS configuration
- extends route configuration types in Rust and TypeScript with transport, UDP, QUIC, backend transport, and mixed port range support
- adds a TypeScript datagram handler relay server and bridge command so UDP socket-handler routes can dispatch datagrams to application callbacks
- updates nftables rule generation so protocol=all creates both TCP and UDP rules
## 2026-03-19 - 25.12.0 - feat(proxy-protocol)
add PROXY protocol v2 support to the Rust passthrough listener and streamline TypeScript proxy protocol exports
- detect and parse PROXY protocol v2 headers in the Rust TCP listener, including TCP and UDP address families
- add Rust v2 header generation, incomplete-header handling, and broader parser test coverage
- remove deprecated TypeScript proxy protocol parser exports and tests, leaving shared type definitions only
## 2026-03-17 - 25.11.24 - fix(rustproxy-http)
improve async static file serving, websocket handshake buffering, and shared metric metadata handling
- convert static file serving to async filesystem operations and await directory/file checks
- preserve and forward bytes read past the WebSocket handshake header terminator to avoid dropping buffered upstream data
- reuse Arc<str> values for route and source identifiers across counting bodies and metric reporting
- standardize backend key propagation across H1/H2 forwarding, retry, and fallback paths for consistent logging and metrics
## 2026-03-17 - 25.11.23 - fix(rustproxy-http,rustproxy-metrics)
reduce per-frame metrics overhead by batching body byte accounting
- Buffer HTTP body byte counts and flush them every 64 KB, at end of stream, and on drop to keep totals accurate while preserving throughput sampling.
- Skip zero-value counter updates in metrics collection to avoid unnecessary atomic and DashMap operations for the unused direction.
## 2026-03-17 - 25.11.22 - fix(rustproxy-http)
reuse healthy HTTP/2 upstream connections after requests with bodies
- Registers successful HTTP/2 connections in the pool regardless of whether the proxied request included a body
- Continues to avoid pooling upstream connections that returned 502 Bad Gateway responses
## 2026-03-17 - 25.11.21 - fix(rustproxy-http)
reuse pooled HTTP/2 connections for requests with and without bodies
- remove the bodyless-request restriction from HTTP/2 pool checkout
- always return successful HTTP/2 senders to the connection pool after requests
## 2026-03-17 - 25.11.20 - fix(rustproxy-http)
avoid downgrading cached backend protocol on H2 stream errors
- Treat HTTP/2 stream-level failures as retryable request errors instead of evidence that the backend only supports HTTP/1.1
- Keep protocol cache entries unchanged after successful H2 handshakes so future requests continue using HTTP/2
- Lower log severity for this fallback path from warning to debug while still recording backend H2 failure metrics
## 2026-03-16 - 25.11.19 - fix(rustproxy-http)
avoid reusing pooled HTTP/2 connections for requests with bodies to prevent upload flow-control stalls
- Limit HTTP/2 pool checkout to bodyless requests such as GET, HEAD, and DELETE
- Skip re-registering HTTP/2 connections in the pool after requests that send a body
- Prevent stalled uploads caused by depleted connection-level flow control windows on reused HTTP/2 connections
## 2026-03-16 - 25.11.18 - fix(repo)
no changes to commit
## 2026-03-16 - 25.11.17 - fix(rustproxy-http)
prevent stale HTTP/2 connection drivers from evicting newer pooled connections
- add generation IDs to pooled HTTP/2 senders so pool removal only affects the matching connection
- update HTTP/2 proxy and retry paths to register generation-tagged connections and skip eviction before registration completes
## 2026-03-16 - 25.11.16 - fix(repo)
no changes to commit
## 2026-03-16 - 25.11.15 - fix(rustproxy-http)
implement vectored write support for backend streams
- Add poll_write_vectored forwarding for both plain and TLS backend stream variants
- Expose is_write_vectored so the proxy can correctly report vectored write capability
## 2026-03-16 - 25.11.14 - fix(rustproxy-http)
forward vectored write support in ShutdownOnDrop AsyncWrite wrapper
- Implements poll_write_vectored by delegating to the wrapped writer
- Exposes is_write_vectored so the wrapper preserves underlying AsyncWrite capabilities
## 2026-03-16 - 25.11.13 - fix(rustproxy-http)
remove hot-path debug logging from HTTP/1 connection pool hits
- Stops emitting debug logs when reusing HTTP/1 idle connections in the connection pool.
- Keeps pool hit behavior unchanged while reducing overhead on a frequently executed path.
## 2026-03-16 - 25.11.12 - fix(rustproxy-http)
remove connection pool hit logging and keep logging limited to actual failures
- Removes debug and warning logs for HTTP/2 connection pool hits and age checks.
- Keeps pool behavior unchanged while reducing noisy per-request logging in the Rust HTTP proxy layer.
## 2026-03-16 - 25.11.11 - fix(rustproxy-http)
improve HTTP/2 proxy error logging with warning-level connection failures and debug error details
- Adds debug-formatted error fields to HTTP/2 handshake, retry, fallback, and request failure logs
- Promotes upstream HTTP/2 connection error logs from debug to warn to improve operational visibility
## 2026-03-16 - 25.11.10 - fix(rustproxy-http)
validate pooled HTTP/2 connections asynchronously before reuse and evict stale senders
- Add an async ready() check with a 500ms timeout before reusing pooled HTTP/2 senders to catch GOAWAY/RST states before forwarding requests
- Return connection age from the HTTP/2 pool checkout path and log warnings for older pooled connections
- Evict pooled HTTP/2 senders when they are closed, exceed max age, fail readiness validation, or time out during readiness checks
## 2026-03-16 - 25.11.9 - fix(rustproxy-routing)
reduce hot-path allocations in routing, metrics, and proxy protocol handling
- skip HTTP header map construction unless a route on the current port uses header matching
- reuse computed client IP strings during HTTP route matching to avoid redundant allocations
- optimize per-route and per-IP metric updates with get-first lookups to avoid unnecessary String creation on existing entries
- replace heap-allocated PROXY protocol peek and discard buffers with stack-allocated buffers in the TCP listener
- improve domain matcher case-insensitive wildcard checks while preserving glob fallback behavior
## 2026-03-16 - 25.11.8 - fix(rustproxy-http)
prevent premature idle timeouts during streamed HTTP responses and ensure TLS close_notify is sent on dropped connections
- track active streaming response bodies so the HTTP idle watchdog does not close connections mid-transfer
- add a ShutdownOnDrop wrapper for TLS-terminated HTTP connections to send shutdown on drop and avoid improperly terminated TLS sessions
- apply the shutdown wrapper in passthrough TLS terminate and terminate+reencrypt HTTP handling
## 2026-03-16 - 25.11.7 - fix(rustproxy)
prevent TLS route reload certificate mismatches and tighten passthrough connection handling
- Load updated TLS configs before swapping the route manager so newly visible routes always have their certificates available.
- Add timeouts when peeking initial decrypted data after TLS handshake to avoid leaked idle connections.
- Raise dropped, blocked, unmatched, and errored passthrough connection events from debug to warn for better operational visibility.
## 2026-03-16 - 25.11.6 - fix(rustproxy-http,rustproxy-passthrough)
improve upstream connection cleanup and graceful tunnel shutdown
- Evict pooled HTTP/2 connections when their driver exits and shorten the maximum pooled H2 age to reduce reuse of stale upstream connections.
- Strip hop-by-hop headers from backend responses before forwarding to HTTP/2 clients to avoid invalid H2 response handling.
- Replace immediate task aborts in WebSocket and TCP tunnel watchdogs with cancellation-driven graceful shutdown plus timed fallback aborts.
- Use non-blocking semaphore acquisition in the TCP listener so connection limits do not stall the accept loop for the entire port.
## 2026-03-16 - 25.11.5 - fix(repo)
no changes to commit
## 2026-03-15 - 25.11.4 - fix(rustproxy-http)
report streamed HTTP and WebSocket bytes per chunk for real-time throughput metrics
- Update CountingBody to record bytes immediately on each data frame instead of aggregating until completion or drop
- Record WebSocket tunnel traffic inside both copy loops and remove the final aggregate byte report to keep throughput metrics current
## 2026-03-15 - 25.11.3 - fix(repo)
no changes to commit
## 2026-03-15 - 25.11.2 - fix(rustproxy-http)
avoid reusing HTTP/1 senders during streaming responses and relax HTTP/2 keep-alive timeouts
- Stop returning HTTP/1 senders to the connection pool before upstream response bodies finish streaming to prevent unsafe reuse on active connections.
- Increase HTTP/2 keep-alive timeout from 5 seconds to 30 seconds in proxy connection builders to better support longer-lived backend streams.
- Improves reliability for large streaming payloads and backend fallback request handling.
## 2026-03-15 - 25.11.1 - fix(rustproxy-http)
keep connection idle tracking alive during streaming and tune HTTP/2 connection lifetimes
- Propagate connection activity tracking through HTTP/1, HTTP/2, and WebSocket forwarding so active request and response body streams do not trigger the idle watchdog.
- Update CountingBody to refresh connection activity timestamps while data frames are polled during uploads and downloads.
- Increase pooled HTTP/2 max age and set explicit HTTP/2 connection window sizes to improve long-lived streaming behavior.
## 2026-03-15 - 25.11.0 - feat(rustproxy-http)
add HTTP/2 Extended CONNECT WebSocket proxy support
- Enable HTTP/2 CONNECT protocol support on the Hyper auto connection builder
- Detect WebSocket requests for both HTTP/1 Upgrade and HTTP/2 Extended CONNECT flows
- Translate HTTP/2 WebSocket requests to an HTTP/1.1 backend handshake and return RFC-compliant client responses
## 2026-03-12 - 25.10.7 - fix(rustproxy-http)
remove Host header from HTTP/2 upstream requests while preserving it for HTTP/1 retries
- strips the Host header before sending HTTP/2 upstream requests so :authority from the URI is used instead
- avoids 400 responses from nginx caused by sending both Host and :authority headers
- keeps a cloned header set for bodyless request retries so HTTP/1 fallback still retains the Host header
## 2026-03-12 - 25.10.6 - fix(rustproxy-http)
use the requested domain as HTTP/2 authority instead of the backend host and port
- build HTTP/2 absolute URIs from the client-facing domain so the :authority pseudo-header matches the Host header
- remove backend port from generated HTTP/2 request URIs and fall back to the upstream host only when no domain is available
- apply the authority handling consistently across pooled, inline, and generic upstream request paths
## 2026-03-12 - 25.10.5 - fix(rustproxy-http)
configure HTTP/2 client builders with a Tokio timer for keep-alive handling
- Adds TokioTimer to all HTTP/2 client builder instances in proxy_service.
- Ensures configured HTTP/2 keep-alive interval and timeout settings have the required timer runtime support.
## 2026-03-12 - 25.10.4 - fix(rustproxy-http)
stabilize upstream HTTP/2 forwarding and fallback behavior
- Remove hop-by-hop headers before forwarding requests to HTTP/2 backends to comply with RFC 9113.
- Use ALPN-enabled TLS configuration whenever HTTP/2 is possible, including explicit H2 connections and retries.
- Add HTTP/2 handshake timeouts, tuned connection settings, and fallback to HTTP/1 when H2 negotiation times out or fails.
- Register pooled HTTP/2 senders only after a successful first request to avoid reusing broken connections.
- Build absolute URIs for HTTP/2 upstream requests so pseudo-headers such as scheme and authority are derived correctly.
## 2026-03-12 - 25.10.3 - fix(rustproxy-http)
include request domain in backend proxy error and protocol detection logs
- Adds domain context to backend TCP/TLS connect, handshake, request failure, retry, and fallback log entries in the Rust HTTP proxy service.
- Propagates the resolved host/domain through H1, H2, pooled, and fallback forwarding paths so backend-level diagnostics can be correlated with the original request domain.
## 2026-03-12 - 25.10.2 - fix(repo)
no code changes to release
## 2026-03-12 - 25.10.1 - fix(repo)
no changes to commit
## 2026-03-12 - 25.10.0 - feat(metrics)
add per-backend connection, error, protocol, and pool metrics with stale backend pruning
- tracks backend connection lifecycle, connect timing, protocol detection, pool hit/miss rates, handshake/request errors, and h2 fallback failures in Rust metrics
- exposes backend metrics through the TypeScript metrics adapter with backend listings, protocol lookup, and top error summaries
- prunes backend metrics for backends no longer referenced by active routes, including preserved-port targets expanded across listening ports
## 2026-03-11 - 25.9.3 - fix(rustproxy-http)
Evict stale HTTP/2 pooled senders and retry bodyless requests with fresh backend connections to avoid 502s
- Introduce MAX_H2_AGE (120s) and evict HTTP/2 senders older than this or closed
- Check MAX_H2_AGE on checkout and during background eviction to prevent reuse of stale h2 connections
- Add connection_pool.remove_h2() to explicitly remove dead H2 senders from the pool
- When a pooled H2 request returns a 502 and the original request had an empty body, retry using a fresh H2 connection (retry_h2_with_fresh_connection)
- On H2 auto-detect failures, retry as HTTP/1.1 for bodyless requests via forward_h1_empty_body; return 502 for requests with bodies
- Evict dead H2 senders on backend request failures in reconnect_backend so subsequent attempts create fresh connections
## 2026-03-08 - 25.9.2 - fix(protocol-cache)
Include requested_host in protocol detection cache key to avoid cache oscillation when multiple frontend domains share the same backend
- Add ProtocolCacheKey.requested_host: Option<String> to distinguish cache entries by incoming request Host/:authority
- Update protocol cache lookups/inserts in proxy_service to populate requested_host
- Enhance debug logging to show requested_host on cache hits
- Fixes repeated ALPN probing / cache oscillation when different frontend domains share a backend with differing HTTP/2 support
## 2026-03-03 - 25.9.1 - fix(rustproxy)
Cancel connections for routes removed/disabled by adding per-route cancellation tokens and make RouteManager swappable (ArcSwap) for runtime updates
- Add per-route CancellationToken map (DashMap) to TcpListenerManager and call token.cancel() when routes are removed (invalidate_removed_routes)
- Propagate Arc<ArcSwap<RouteManager>> into HttpProxyService and passthrough listener so the route manager can be hot-swapped without restarting listeners
- Use per-route child cancellation tokens in accept/connection handling and forwarders to terminate existing connections when a route is removed
- Prune HTTP proxy caches and retain/cleanup per-route tokens when routes are active/removed
- Update test.test.sni-requirement.node.ts to allocate unique free ports via findFreePorts to avoid port conflicts during tests
## 2026-03-03 - 25.9.0 - feat(rustproxy-http)
add HTTP/2 auto-detection via ALPN with TTL-backed protocol cache and h1-only/h2 ALPN client configs
- Add protocol_cache module: bounded, TTL-based cache (5min TTL), max entries (4096), background cleanup task and clear() to discard stale detections.
- Introduce BackendProtocol::Auto and expose 'auto' in TypeScript route types to allow ALPN-based protocol auto-detection.
- Add build_tls_acceptor_h1_only() to create a TLS acceptor that advertises only http/1.1 (used for backends/tests that speak plain HTTP/1.1).
- Add shared_backend_tls_config_alpn() and default_backend_tls_config_with_alpn() to provide client TLS configs advertising h2+http/1.1 for auto-detection.
- Wire backend_tls_config_alpn and protocol_cache into proxy_service, tcp_listener and passthrough paths; add set_backend_tls_config_alpn() and prune protocol_cache on route updates.
- Update passthrough tests to use h1-only acceptor to avoid false HTTP/2 detection when backends speak plain HTTP/1.1.
- Include reconnection/fallback handling and ensure ALPN-enabled client config is used for auto-detection mode.
## 2026-02-26 - 25.8.5 - fix(release)
bump patch version (no source changes)
- No changes detected in git diff
- Current version: 25.8.4
- Recommend patch bump to 25.8.5 to record release without code changes
## 2026-02-26 - 25.8.4 - fix(proxy)
adjust default proxy timeouts and keep-alive behavior to shorter, more consistent values
- Increase connection timeout default from 30,000ms to 60,000ms (30s -> 60s).
- Reduce socket timeout default from 3,600,000ms to 60,000ms (1h -> 60s).
- Reduce max connection lifetime default from 86,400,000ms to 3,600,000ms (24h -> 1h).
- Change inactivity timeout default from 14,400,000ms to 75,000ms (4h -> 75s).
- Update keep-alive defaults: keepAliveTreatment 'extended' -> 'standard', keepAliveInactivityMultiplier 6 -> 4, extendedKeepAliveLifetime 604800000 -> 3,600,000ms (7d -> 1h).
- Apply these consistent default values across Rust crates (rustproxy-config, rustproxy-passthrough) and the TypeScript smart-proxy implementation.
- Update unit test expectations to match the new defaults.
## 2026-02-26 - 25.8.3 - fix(smartproxy)
no code or dependency changes detected; no version bump required
- No files changed in the provided diff (No changes).
- package.json version remains 25.8.2.
- No dependency or source updates detected; skip release.
## 2026-02-26 - 25.8.2 - fix(connection)
improve connection handling and timeouts
- Flush logs on process beforeExit and avoid calling process.exit in SIGINT/SIGTERM handlers to preserve host graceful shutdown
- Store protocol entries with a createdAt timestamp in ProtocolDetector and remove stale entries older than 30s to prevent leaked state from abandoned handshakes or port scanners
- Add backend connect timeout (30s) and idle timeouts (5 minutes) for dynamic forwards; destroy sockets on timeout and emit logs for timeout events
## 2026-02-25 - 25.8.1 - fix(allocator)
switch global allocator from tikv-jemallocator to mimalloc
- Replaced tikv-jemallocator with mimalloc in rust/Cargo.toml workspace dependencies.
- Updated rust/crates/rustproxy/Cargo.toml to use mimalloc as a workspace dependency.
- Updated rust/Cargo.lock: added mimalloc and libmimalloc-sys entries and removed tikv-jemallocator and tikv-jemalloc-sys entries.
- Changed the global allocator in crates/rustproxy/src/main.rs from tikv_jemallocator::Jemalloc to mimalloc::MiMalloc.
- Impact: runtime memory allocator is changed which may affect memory usage and performance; no public API changes but recommend testing memory/performance in deployments.
## 2026-02-24 - 25.8.0 - feat(rustproxy)
use tikv-jemallocator as the global allocator to reduce glibc fragmentation and slow RSS growth; add allocator dependency and enable it in rustproxy, update lockfile, and run tsrust before tests
- Added tikv-jemallocator dependency to rust/Cargo.toml and rust/crates/rustproxy/Cargo.toml
- Enabled tikv_jemallocator as the global allocator in rust/crates/rustproxy/src/main.rs
- Updated rust/Cargo.lock with tikv-jemallocator and tikv-jemalloc-sys entries
- Modified package.json test script to run tsrust before tstest
## 2026-02-24 - 25.7.10 - fix(rustproxy)
Use cooperative cancellation for background tasks, prune stale caches and metric entries, and switch tests to dynamic port allocation to avoid port conflicts
- Introduce tokio_util::sync::CancellationToken to coordinate graceful shutdown of sampling and renewal tasks; await handles on stop and reset the token so the proxy can be restarted.
- Add safety Drop impls (RustProxy, TcpListenerManager) as a last-resort abort path when stop() is not called.
- MetricsCollector: avoid creating per-IP metric entries when the IP has no active connections; prune orphaned per-IP metric maps during sampling; add tests covering late record_bytes races and pruning behavior.
- Passthrough/ConnectionTracker: remove per-connection record/zombie-scanner complexity, add cleanup_stale_timestamps to prune rate-limit timestamp entries, and add an RAII ConnectionTrackerGuard to guarantee connection_closed is invoked.
- HTTP proxy improvements: add prune_stale_routes and reset_round_robin to clear caches (rate limiters, regex cache, round-robin counters) on route updates.
- Tests: add test/helpers/port-allocator.ts and update many tests to use findFreePorts/assertPortsFree (dynamic ports + post-test port assertions) to avoid flakiness and port collisions in CI.
## 2026-02-21 - 25.7.9 - fix(tests)
use high non-privileged ports in tests to avoid conflicts and CI failures
- Updated multiple test files to use high-range, non-privileged ports instead of well-known or conflicting ports.
- Files changed: test/test.acme-http01-challenge.ts, test/test.connection-forwarding.ts, test/test.forwarding-regression.ts, test/test.http-port8080-forwarding.ts, test/test.port-mapping.ts, test/test.smartproxy.ts, test/test.socket-handler.ts.
- Notable port remappings: 8080/8081 -> 47730/47731 (and other proxy ports like 47710), 8443 -> 47711, 7001/7002 -> 47712/47713, 9090 -> 47721, 8181/8182 -> 47732/47733, 9999 -> 47780, TEST_PORT_START/PROXY_PORT_START -> 47750/48750, and TEST_SERVER_PORT/PROXY_PORT -> 47770/47771.
## 2026-02-19 - 25.7.8 - fix(no-changes)
no changes detected; nothing to release
- Current package version: 25.7.7
- Git diff: no changes
- No files modified; no release necessary
## 2026-02-19 - 25.7.7 - fix(proxy)
restrict PROXY protocol parsing to configured trusted proxy IPs and parse PROXY headers before metrics/fast-path so client IPs reflect the real source
- Add proxy_ips: Vec<std::net::IpAddr> to ConnectionConfig with a default empty Vec
- Populate proxy_ips from options.proxy_ips strings in rust/crates/rustproxy/src/lib.rs, parsing each to IpAddr
- Only peek for and parse PROXY v1 headers when the remote IP is contained in proxy_ips (prevents untrusted clients from injecting PROXY headers)
- Move PROXY protocol parsing earlier so metrics and fast-path logic use the effective (real client) IP after PROXY parsing
- If proxy_ips is empty, behavior remains unchanged (no PROXY parsing)
## 2026-02-19 - 25.7.6 - fix(throughput)
add tests for per-IP connection tracking and throughput history; assert per-IP eviction after connection close to prevent memory leak
- Adds runtime assertions for per-IP TCP connection tracking (m.connections.byIP) while a connection is active
- Adds checks for throughput history (m.throughput.history) to ensure history length and timestamps are recorded
- Asserts that per-IP tracking data is evicted after connection close (byIP.size === 0) to verify memory leak fix
- Reorders test checks so per-IP and history metrics are validated during the active connection and totals are validated after close
## 2026-02-19 - 25.7.5 - fix(rustproxy)
prune stale per-route metrics, add per-route rate limiter caching and regex cache, and improve connection tracking cleanup to prevent memory growth
- Prune per-route metrics for routes removed from configuration via MetricsCollector::retain_routes invoked during route table updates
- Introduce per-route shared RateLimiter instances (DashMap) with a request-count-triggered periodic cleanup to avoid stale limiters
- Cache compiled URL-rewrite regexes (regex_cache) to avoid recompiling patterns on every request and insert compiled regex on first use
- Improve upstream connection tracking to remove zero-count entries and guard against underflow, preventing unbounded DashMap growth
- Evict per-IP metrics and timestamps when the last connection for an IP closes so per-IP DashMap entries are fully freed
- Add unit tests validating connection tracking cleanup, per-IP eviction, and route-metrics retention behavior
## 2026-02-19 - 25.7.4 - fix(smart-proxy)
include proxy IPs in smart proxy configuration
- Add proxyIps: this.settings.proxyIPs to proxy options in ts/proxies/smart-proxy/smart-proxy.ts
- Ensures proxy IPs from settings are passed into the proxy implementation (enables proxy IP filtering/whitelisting)
## 2026-02-16 - 25.7.3 - fix(metrics)
centralize connection-closed reporting via ConnectionGuard and remove duplicate explicit metrics.connection_closed calls
- Removed numerous explicit metrics.connection_closed calls from rust/crates/rustproxy-http/src/proxy_service.rs so connection teardown and byte counting are handled by the connection guard / counting body instead of ad-hoc calls.
- Simplified ConnectionGuard in rust/crates/rustproxy-passthrough/src/tcp_listener.rs: removed the disarm flag and disarm() method so Drop always reports connection_closed.
- Stopped disarming the TCP-level guard when handing connections off to HTTP proxy paths (HTTP/WebSocket/streaming flows) to avoid missing or double-reporting metrics.
- Fixes incorrect/duplicate connection-closed metric emission and ensures consistent byte/connection accounting during streaming and WebSocket upgrades.
## 2026-02-16 - 25.7.2 - fix(rustproxy-http)
preserve original Host header when proxying and add X-Forwarded-* headers; add TLS WebSocket echo backend helper and integration test for terminate-and-reencrypt websocket
- Preserve the client's original Host header instead of replacing it with backend host:port when proxying requests.
- Add standard reverse-proxy headers: X-Forwarded-For (appends client IP), X-Forwarded-Host, and X-Forwarded-Proto for upstream requests.
- Ensure raw TCP/HTTP upstream requests copy original headers and skip X-Forwarded-* (which are added explicitly).
- Add start_tls_ws_echo_backend test helper to start a TLS WebSocket echo backend for tests.
- Add integration test test_terminate_and_reencrypt_websocket to verify WS upgrade through terminate-and-reencrypt TLS path.
- Rename unused parameter upstream to _upstream in proxy_service functions to avoid warnings.
## 2026-02-16 - 25.7.1 - fix(proxy)
use TLS to backends for terminate-and-reencrypt routes
- Set upstream.use_tls = true when a route's TLS mode is TerminateAndReencrypt so the proxy re-encrypts to backend servers.
- Add start_tls_http_backend test helper and update integration tests to run TLS-enabled backend servers validating re-encryption behavior.
- Make the selected upstream mutable to allow toggling the use_tls flag during request handling.
## 2026-02-16 - 25.7.0 - feat(routes)
add protocol-based route matching and ensure terminate-and-reencrypt routes HTTP through the full HTTP proxy; update docs and tests
- Introduce a new 'protocol' match field for routes (supports 'http' and 'tcp') and preserve it through cloning/merging.
- Add Rust integration test verifying terminate-and-reencrypt decrypts TLS and routes HTTP traffic via the HTTP proxy (per-request Host/path routing) instead of raw tunneling.
- Add TypeScript unit tests covering protocol field validation, preservation, interaction with terminate-and-reencrypt, cloning, merging, and matching behavior.
- Update README with a Protocol-Specific Routing section and clarify terminate-and-reencrypt behavior (HTTP routed via HTTP proxy; non-HTTP uses raw TLS-to-TLS tunnel).
- Example config: include health check thresholds (unhealthyThreshold and healthyThreshold) in the sample healthCheck settings.
## 2026-02-16 - 25.6.0 - feat(rustproxy)
add protocol-based routing and backend TLS re-encryption support
- Introduce optional route_match.protocol ("http" | "tcp") in Rust and TypeScript route types to allow protocol-restricted routing.
- RouteManager: respect protocol field during matching and treat TLS connections without SNI as not matching domain-restricted routes (except wildcard-only routes).
- HTTP proxy: add BackendStream abstraction to unify plain TCP and tokio-rustls TLS backend streams, and support connecting to upstreams over TLS (upstream.use_tls) with an InsecureBackendVerifier for internal/self-signed backends.
- WebSocket and HTTP forwarding updated to use BackendStream so upstream TLS is handled transparently.
- Passthrough listener: perform post-termination protocol detection for TerminateAndReencrypt; route HTTP flows into HttpProxyService and handle non-HTTP as TLS-to-TLS tunnel.
- Add tests for protocol matching, TLS/no-SNI behavior, and other routing edge cases.
- Add rustls and tokio-rustls dependencies (Cargo.toml/Cargo.lock updates).
## 2026-02-16 - 25.5.0 - feat(tls)
add shared TLS acceptor with SNI resolver and session resumption; prefer shared acceptor and fall back to per-connection when routes specify custom TLS versions
- Add CertResolver that pre-parses PEM certs/keys into CertifiedKey instances for SNI-based lookup and cheap runtime resolution
- Introduce build_shared_tls_acceptor to create a shared ServerConfig with session cache (4096) and Ticketer for session ticket resumption
- Add ArcSwap<Option<TlsAcceptor>> shared_tls_acceptor to tcp_listener for hot-reloadable, pre-built acceptor and update accept loop/handlers to use it
- set_tls_configs now attempts to build and store the shared TLS acceptor, falling back to per-connection acceptors on failure; raw PEM configs are still retained for route-level fallbacks
- Add get_tls_acceptor helper: prefer shared acceptor for performance and session resumption, but build per-connection acceptor when a route requests custom TLS versions
## 2026-02-16 - 25.4.0 - feat(rustproxy)
support dynamically loaded TLS certificates via loadCertificate IPC and include them in listener TLS configs for rebuilds and hot-swap
- Adds loaded_certs: HashMap<String, TlsCertConfig> to RustProxy to store certificates loaded at runtime
- Merge loaded_certs into tls_configs in rebuild and listener hot-swap paths so dynamically loaded certs are served immediately
- Persist loaded certificates on loadCertificate so future rebuilds include them
## 2026-02-15 - 25.3.1 - fix(plugins)
remove unused dependencies and simplify plugin exports
- Removed multiple dependencies from package.json to reduce dependency footprint: @push.rocks/lik, @push.rocks/smartacme, @push.rocks/smartdelay, @push.rocks/smartfile, @push.rocks/smartnetwork, @push.rocks/smartpromise, @push.rocks/smartrequest, @push.rocks/smartrx, @push.rocks/smartstring, @push.rocks/taskbuffer, @types/minimatch, @types/ws, pretty-ms, ws
- ts/plugins.ts: stopped importing/exporting node:https and many push.rocks and third-party modules; plugins now only re-export core node modules (without https), tsclass, smartcrypto, smartlog (+ destination-local), smartrust, and minimatch
- Intended effect: trim surface area and remove unused/optional integrations; patch-level change (no feature/API additions)
## 2026-02-14 - 25.3.0 - feat(smart-proxy)
add background concurrent certificate provisioning with per-domain timeouts and concurrency control
- Add ISmartProxyOptions settings: certProvisionTimeout (ms) and certProvisionConcurrency (default 4)
- Run certProvisionFunction as fire-and-forget background tasks (stores promise on start/route-update and awaited on stop)
- Provision certificates in parallel with a concurrency limit using a new ConcurrencySemaphore utility
- Introduce per-domain timeout handling (default 300000ms) via withTimeout and surface timeout errors as certificate-failed events
- Refactor provisioning into provisionSingleDomain to isolate domain handling, ACME fallback preserved
- Run provisioning outside route update mutex so route updates are not blocked by slow provisioning
## 2026-02-14 - 25.2.2 - fix(smart-proxy)
start metrics polling before certificate provisioning to avoid blocking metrics collection
- Start metrics polling immediately after Rust engine startup so metrics are available without waiting for certificate provisioning.
- Run certProvisionFunction after startup because ACME/DNS-01 provisioning can hang or be slow and must not block observability.
- Code change in ts/proxies/smart-proxy/smart-proxy.ts: metricsAdapter.startPolling() moved to run before provisionCertificatesViaCallback().
## 2026-02-14 - 25.2.1 - fix(smartproxy)
no changes detected in git diff
- The provided diff contains no file changes; no code or documentation updates to release.
## 2026-02-14 - 25.2.0 - feat(metrics)
add per-IP and HTTP-request metrics, propagate source IP through proxy paths, and expose new metrics to the TS adapter
- Add per-IP tracking and IpMetrics in MetricsCollector (active/total connections, bytes, throughput).
- Add HTTP request counters and tracking (total_http_requests, http_requests_per_sec, recent counters and tests).
- Include throughput history (ThroughputSample serialization, retention and snapshotting) and expose history in snapshots.
- Propagate source IP through HTTP and passthrough code paths: CountingBody.record_bytes and MetricsCollector methods now accept source_ip; connection_opened/closed calls include source IP.
- Introduce ForwardMetricsCtx to carry metrics context (collector, route_id, source_ip) into passthrough forwarding routines; update ConnectionGuard to include source_ip.
- TypeScript adapter (rust-metrics-adapter.ts) updated to return per-IP counts, top IPs, per-IP throughput, throughput history mapping, and HTTP request rates/total where available.
- Numerous unit tests added for per-IP tracking, HTTP request tracking, throughput history and ThroughputTracker.history behavior.
## 2026-02-13 - 25.1.0 - feat(metrics)
add real-time throughput sampling and byte-counting metrics

281
deno.lock generated
View File

@@ -5,29 +5,15 @@
"npm:@git.zone/tsrun@^2.0.1": "2.0.1",
"npm:@git.zone/tsrust@^1.3.0": "1.3.0",
"npm:@git.zone/tstest@^3.1.8": "3.1.8_@push.rocks+smartserve@2.0.1_typescript@5.9.3",
"npm:@push.rocks/lik@^6.2.2": "6.2.2",
"npm:@push.rocks/smartacme@8": "8.0.0_@push.rocks+smartserve@2.0.1",
"npm:@push.rocks/smartcrypto@^2.0.4": "2.0.4",
"npm:@push.rocks/smartdelay@^3.0.5": "3.0.5",
"npm:@push.rocks/smartfile@^13.1.2": "13.1.2",
"npm:@push.rocks/smartlog@^3.1.10": "3.1.10",
"npm:@push.rocks/smartnetwork@^4.4.0": "4.4.0",
"npm:@push.rocks/smartpromise@^4.2.3": "4.2.3",
"npm:@push.rocks/smartrequest@^5.0.1": "5.0.1",
"npm:@push.rocks/smartrust@^1.2.1": "1.2.1",
"npm:@push.rocks/smartrx@^3.0.10": "3.0.10",
"npm:@push.rocks/smartserve@^2.0.1": "2.0.1",
"npm:@push.rocks/smartstring@^4.1.0": "4.1.0",
"npm:@push.rocks/taskbuffer@^4.2.0": "4.2.0",
"npm:@tsclass/tsclass@^9.3.0": "9.3.0",
"npm:@types/minimatch@6": "6.0.0",
"npm:@types/node@^25.2.3": "25.2.3",
"npm:@types/ws@^8.18.1": "8.18.1",
"npm:minimatch@^10.2.0": "10.2.0",
"npm:pretty-ms@^9.3.0": "9.3.0",
"npm:typescript@^5.9.3": "5.9.3",
"npm:why-is-node-running@^3.2.2": "3.2.2",
"npm:ws@^8.19.0": "8.19.0"
"npm:why-is-node-running@^3.2.2": "3.2.2"
},
"npm": {
"@api.global/typedrequest-interfaces@2.0.2": {
@@ -117,7 +103,7 @@
"@push.rocks/smartsitemap",
"@push.rocks/smartstream",
"@push.rocks/smarttime",
"@push.rocks/taskbuffer@3.5.0",
"@push.rocks/taskbuffer",
"@push.rocks/webrequest@3.0.37",
"@push.rocks/webstore",
"@tsclass/tsclass@9.3.0",
@@ -164,19 +150,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@api.global/typedsocket/-/typedsocket-3.1.1.tgz"
},
"@apiclient.xyz/cloudflare@6.4.3": {
"integrity": "sha512-ztegUdUO3Zd4mUoTSylKlCEKPBMHEcggrLelR+7CiblM4beHMwopMVlryBmiCY7bOVbUSPoK0xsVTF7VIy3p/A==",
"dependencies": [
"@push.rocks/smartdelay",
"@push.rocks/smartlog",
"@push.rocks/smartpromise",
"@push.rocks/smartrequest@5.0.1",
"@push.rocks/smartstring",
"@tsclass/tsclass@9.3.0",
"cloudflare"
],
"tarball": "https://verdaccio.lossless.digital/@apiclient.xyz/cloudflare/-/cloudflare-6.4.3.tgz"
},
"@aws-crypto/crc32@5.2.0": {
"integrity": "sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==",
"dependencies": [
@@ -1590,7 +1563,7 @@
"@push.rocks/smartpromise",
"@push.rocks/smartstring",
"@push.rocks/smartunique",
"@push.rocks/taskbuffer@3.5.0",
"@push.rocks/taskbuffer",
"@tsclass/tsclass@9.3.0"
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/levelcache/-/levelcache-3.2.0.tgz"
@@ -1603,7 +1576,7 @@
"@push.rocks/smartpromise",
"@push.rocks/smartrx",
"@push.rocks/smarttime",
"@types/minimatch@5.1.2",
"@types/minimatch",
"@types/symbol-tree",
"symbol-tree"
],
@@ -1632,7 +1605,7 @@
"@push.rocks/smartpath@6.0.0",
"@push.rocks/smartpromise",
"@push.rocks/smartrx",
"@push.rocks/taskbuffer@3.5.0",
"@push.rocks/taskbuffer",
"@tsclass/tsclass@9.3.0"
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/npmextra/-/npmextra-5.3.3.tgz"
@@ -1648,28 +1621,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/qenv/-/qenv-6.1.3.tgz"
},
"@push.rocks/smartacme@8.0.0_@push.rocks+smartserve@2.0.1": {
"integrity": "sha512-Oq+m+LX4IG0p4qCGZLEwa6UlMo5Hfq7paRjpREwQNsaGSKl23xsjsEJLxjxkePwaXnaIkHEwU/5MtrEkg2uKEQ==",
"dependencies": [
"@api.global/typedserver@3.0.80_@push.rocks+smartserve@2.0.1",
"@apiclient.xyz/cloudflare",
"@push.rocks/lik",
"@push.rocks/smartdata",
"@push.rocks/smartdelay",
"@push.rocks/smartdns@6.2.2",
"@push.rocks/smartfile@11.2.7",
"@push.rocks/smartlog",
"@push.rocks/smartnetwork",
"@push.rocks/smartpromise",
"@push.rocks/smartrequest@2.1.0",
"@push.rocks/smartstring",
"@push.rocks/smarttime",
"@push.rocks/smartunique",
"@tsclass/tsclass@9.3.0",
"acme-client"
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/smartacme/-/smartacme-8.0.0.tgz"
},
"@push.rocks/smartarchive@4.2.4": {
"integrity": "sha512-uiqVAXPxmr8G5rv3uZvZFMOCt8l7cZC3nzvsy4YQqKf/VkPhKIEX+b7LkAeNlxPSYUiBQUkNRoawg9+5BaMcHg==",
"dependencies": [
@@ -1805,7 +1756,7 @@
"@push.rocks/smartstring",
"@push.rocks/smarttime",
"@push.rocks/smartunique",
"@push.rocks/taskbuffer@3.5.0",
"@push.rocks/taskbuffer",
"@tsclass/tsclass@9.3.0",
"mongodb"
],
@@ -1818,23 +1769,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/smartdelay/-/smartdelay-3.0.5.tgz"
},
"@push.rocks/smartdns@6.2.2": {
"integrity": "sha512-MhJcHujbyIuwIIFdnXb2OScGtRjNsliLUS8GoAurFsKtcCOaA0ytfP+PNzkukyBufjb1nMiJF3rjhswXdHakAQ==",
"dependencies": [
"@push.rocks/smartdelay",
"@push.rocks/smartenv@5.0.13",
"@push.rocks/smartpromise",
"@push.rocks/smartrequest@2.1.0",
"@tsclass/tsclass@5.0.0",
"@types/dns-packet",
"@types/elliptic",
"acme-client",
"dns-packet",
"elliptic",
"minimatch@10.2.0"
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/smartdns/-/smartdns-6.2.2.tgz"
},
"@push.rocks/smartdns@7.8.0": {
"integrity": "sha512-5FX74AAgQSqWPZkpTsI/BbUKBQpZKSvs+UdX9IZpwcuPldI+K7D1WeE02mMAGd1Ncd/sYAMor5CTlhnG6L+QhQ==",
"dependencies": [
@@ -2095,7 +2029,7 @@
"@push.rocks/smartnetwork@4.4.0": {
"integrity": "sha512-OvFtz41cvQ7lcXwaIOhghNUUlNoMxvwKDctbDvMyuZyEH08SpLjhyv2FuKbKL/mgwA/WxakTbohoC8SW7t+kiw==",
"dependencies": [
"@push.rocks/smartdns@7.8.0",
"@push.rocks/smartdns",
"@push.rocks/smartping",
"@push.rocks/smartpromise",
"@push.rocks/smartstring",
@@ -2449,20 +2383,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/taskbuffer/-/taskbuffer-3.5.0.tgz"
},
"@push.rocks/taskbuffer@4.2.0": {
"integrity": "sha512-ttoBe5y/WXkAo5/wSMcC/Y4Zbyw4XG8kwAsEaqnAPCxa3M9MI1oV/yM1e9gU1IH97HVPidzbTxRU5/PcHDdUsg==",
"dependencies": [
"@design.estate/dees-element",
"@push.rocks/lik",
"@push.rocks/smartdelay",
"@push.rocks/smartlog",
"@push.rocks/smartpromise",
"@push.rocks/smartrx",
"@push.rocks/smarttime",
"@push.rocks/smartunique"
],
"tarball": "https://verdaccio.lossless.digital/@push.rocks/taskbuffer/-/taskbuffer-4.2.0.tgz"
},
"@push.rocks/webrequest@3.0.37": {
"integrity": "sha512-fLN7kP6GeHFxE4UH4r9C9pjcQb0QkJxHeAMwXvbOqB9hh0MFNKhtGU7GoaTn8SVRGRMPc9UqZVNwo6u5l8Wn0A==",
"dependencies": [
@@ -3317,13 +3237,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@tsclass/tsclass/-/tsclass-4.4.4.tgz"
},
"@tsclass/tsclass@5.0.0": {
"integrity": "sha512-2X66VCk0Oe1L01j6GQHC6F9Gj7lpZPPSUTDNax7e29lm4OqBTyAzTR3ePR8coSbWBwsmRV8awLRSrSI+swlqWA==",
"dependencies": [
"type-fest@4.41.0"
],
"tarball": "https://verdaccio.lossless.digital/@tsclass/tsclass/-/tsclass-5.0.0.tgz"
},
"@tsclass/tsclass@9.3.0": {
"integrity": "sha512-KD3oTUN3RGu67tgjNHgWWZGsdYipr1RUDxQ9MMKSgIJ6oNZ4q5m2rg0ibrgyHWkAjTPlHVa6kHP3uVOY+8bnHw==",
"dependencies": [
@@ -3338,13 +3251,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@tybys/wasm-util/-/wasm-util-0.10.1.tgz"
},
"@types/bn.js@5.2.0": {
"integrity": "sha512-DLbJ1BPqxvQhIGbeu8VbUC1DiAiahHtAYvA0ZEAa4P31F7IaArc8z3C3BRQdWX4mtLQuABG4yzp76ZrS02Ui1Q==",
"dependencies": [
"@types/node@24.2.0"
],
"tarball": "https://verdaccio.lossless.digital/@types/bn.js/-/bn.js-5.2.0.tgz"
},
"@types/body-parser@1.19.6": {
"integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==",
"dependencies": [
@@ -3393,13 +3299,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@types/dns-packet/-/dns-packet-5.6.5.tgz"
},
"@types/elliptic@6.4.18": {
"integrity": "sha512-UseG6H5vjRiNpQvrhy4VF/JXdA3V/Fp5amvveaL+fs28BZ6xIKJBPnUPRlEaZpysD9MbpfaLi8lbl7PGUAkpWw==",
"dependencies": [
"@types/bn.js"
],
"tarball": "https://verdaccio.lossless.digital/@types/elliptic/-/elliptic-6.4.18.tgz"
},
"@types/express-serve-static-core@5.1.1": {
"integrity": "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==",
"dependencies": [
@@ -3481,14 +3380,6 @@
"integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==",
"tarball": "https://verdaccio.lossless.digital/@types/minimatch/-/minimatch-5.1.2.tgz"
},
"@types/minimatch@6.0.0": {
"integrity": "sha512-zmPitbQ8+6zNutpwgcQuLcsEpn/Cj54Kbn7L5pX0Os5kdWplB7xPgEh/g+SWOB/qmows2gpuCaPyduq8ZZRnxA==",
"dependencies": [
"minimatch@10.2.0"
],
"deprecated": true,
"tarball": "https://verdaccio.lossless.digital/@types/minimatch/-/minimatch-6.0.0.tgz"
},
"@types/ms@2.1.0": {
"integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
"tarball": "https://verdaccio.lossless.digital/@types/ms/-/ms-2.1.0.tgz"
@@ -3500,14 +3391,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@types/mute-stream/-/mute-stream-0.0.4.tgz"
},
"@types/node-fetch@2.6.13": {
"integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
"dependencies": [
"@types/node@24.2.0",
"form-data"
],
"tarball": "https://verdaccio.lossless.digital/@types/node-fetch/-/node-fetch-2.6.13.tgz"
},
"@types/node-forge@1.3.14": {
"integrity": "sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==",
"dependencies": [
@@ -3515,13 +3398,6 @@
],
"tarball": "https://verdaccio.lossless.digital/@types/node-forge/-/node-forge-1.3.14.tgz"
},
"@types/node@18.19.130": {
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
"dependencies": [
"undici-types@5.26.5"
],
"tarball": "https://verdaccio.lossless.digital/@types/node/-/node-18.19.130.tgz"
},
"@types/node@22.19.11": {
"integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==",
"dependencies": [
@@ -3660,13 +3536,6 @@
"integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
"tarball": "https://verdaccio.lossless.digital/@ungap/structured-clone/-/structured-clone-1.3.0.tgz"
},
"abort-controller@3.0.0": {
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dependencies": [
"event-target-shim"
],
"tarball": "https://verdaccio.lossless.digital/abort-controller/-/abort-controller-3.0.0.tgz"
},
"accepts@1.3.8": {
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"dependencies": [
@@ -3849,10 +3718,6 @@
"integrity": "sha512-RkaJzeJKDbaDWTIPiJwubyljaEPwpVWkm9Rt5h9Nd6h7tEXTJ3VB4qxdZBioV7JO5yLUaOKwz7vDOzlncUsegw==",
"tarball": "https://verdaccio.lossless.digital/basic-ftp/-/basic-ftp-5.1.0.tgz"
},
"bn.js@4.12.2": {
"integrity": "sha512-n4DSx829VRTRByMRGdjQ9iqsN0Bh4OolPsFnaZBLcbi8iXcB+kJ9s7EnRt4wILZNV3kPLHkRVfOc/HvhC3ovDw==",
"tarball": "https://verdaccio.lossless.digital/bn.js/-/bn.js-4.12.2.tgz"
},
"body-parser@2.2.2": {
"integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==",
"dependencies": [
@@ -3904,10 +3769,6 @@
],
"tarball": "https://verdaccio.lossless.digital/broadcast-channel/-/broadcast-channel-7.3.0.tgz"
},
"brorand@1.1.0": {
"integrity": "12c25efe40a45e3c323eb8675a0a0ce57b22371f",
"tarball": "https://verdaccio.lossless.digital/brorand/-/brorand-1.1.0.tgz"
},
"bson@6.10.4": {
"integrity": "sha512-WIsKqkSC0ABoBJuT1LEX+2HEvNmNKKgnTAyd0fL8qzK4SH2i9NXg+t08YtdZp/V9IZ33cxe3iV4yM0qg8lMQng==",
"tarball": "https://verdaccio.lossless.digital/bson/-/bson-6.10.4.tgz"
@@ -4051,19 +3912,6 @@
],
"tarball": "https://verdaccio.lossless.digital/cliui/-/cliui-8.0.1.tgz"
},
"cloudflare@5.2.0": {
"integrity": "sha512-dVzqDpPFYR9ApEC9e+JJshFJZXcw4HzM8W+3DHzO5oy9+8rLC53G7x6fEf9A7/gSuSCxuvndzui5qJKftfIM9A==",
"dependencies": [
"@types/node@18.19.130",
"@types/node-fetch",
"abort-controller",
"agentkeepalive",
"form-data-encoder@1.7.2",
"formdata-node",
"node-fetch"
],
"tarball": "https://verdaccio.lossless.digital/cloudflare/-/cloudflare-5.2.0.tgz"
},
"color-convert@2.0.1": {
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": [
@@ -4286,19 +4134,6 @@
"integrity": "590c61156b0ae2f4f0255732a158b266bc56b21d",
"tarball": "https://verdaccio.lossless.digital/ee-first/-/ee-first-1.1.1.tgz"
},
"elliptic@6.6.1": {
"integrity": "sha512-RaddvvMatK2LJHqFJ+YA4WysVN5Ita9E35botqIYspQ4TkRAlCicdzKOjlyv/1Za5RyTNn7di//eEV0uTAfe3g==",
"dependencies": [
"bn.js",
"brorand",
"hash.js",
"hmac-drbg",
"inherits",
"minimalistic-assert",
"minimalistic-crypto-utils"
],
"tarball": "https://verdaccio.lossless.digital/elliptic/-/elliptic-6.6.1.tgz"
},
"emoji-regex@8.0.0": {
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"tarball": "https://verdaccio.lossless.digital/emoji-regex/-/emoji-regex-8.0.0.tgz"
@@ -4464,10 +4299,6 @@
"integrity": "41ae2eeb65efa62268aebfea83ac7d79299b0887",
"tarball": "https://verdaccio.lossless.digital/etag/-/etag-1.8.1.tgz"
},
"event-target-shim@5.0.1": {
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"tarball": "https://verdaccio.lossless.digital/event-target-shim/-/event-target-shim-5.0.1.tgz"
},
"eventemitter3@4.0.7": {
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
"tarball": "https://verdaccio.lossless.digital/eventemitter3/-/eventemitter3-4.0.7.tgz"
@@ -4684,10 +4515,6 @@
],
"tarball": "https://verdaccio.lossless.digital/foreground-child/-/foreground-child-3.3.1.tgz"
},
"form-data-encoder@1.7.2": {
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
"tarball": "https://verdaccio.lossless.digital/form-data-encoder/-/form-data-encoder-1.7.2.tgz"
},
"form-data-encoder@2.1.4": {
"integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==",
"tarball": "https://verdaccio.lossless.digital/form-data-encoder/-/form-data-encoder-2.1.4.tgz"
@@ -4707,14 +4534,6 @@
"integrity": "d6170107e9efdc4ed30c9dc39016df942b5cb58b",
"tarball": "https://verdaccio.lossless.digital/format/-/format-0.2.2.tgz"
},
"formdata-node@4.4.1": {
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"dependencies": [
"node-domexception",
"web-streams-polyfill"
],
"tarball": "https://verdaccio.lossless.digital/formdata-node/-/formdata-node-4.4.1.tgz"
},
"forwarded@0.2.0": {
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"tarball": "https://verdaccio.lossless.digital/forwarded/-/forwarded-0.2.0.tgz"
@@ -4846,7 +4665,7 @@
"cacheable-lookup",
"cacheable-request",
"decompress-response",
"form-data-encoder@2.1.4",
"form-data-encoder",
"get-stream@6.0.1",
"http2-wrapper",
"lowercase-keys",
@@ -4863,7 +4682,7 @@
"integrity": "sha512-KyrFvnl+J9US63TEzwoiJOQzZBJY7KgBushJA8X61DMbNsH+2ONkDuLDnCnwUiPTF42tLoEmrPyoqbenVA5zrg==",
"dependencies": [
"entities",
"webidl-conversions@7.0.0",
"webidl-conversions",
"whatwg-mimetype"
],
"tarball": "https://verdaccio.lossless.digital/happy-dom/-/happy-dom-15.11.7.tgz"
@@ -4886,14 +4705,6 @@
],
"tarball": "https://verdaccio.lossless.digital/has-tostringtag/-/has-tostringtag-1.0.2.tgz"
},
"hash.js@1.1.7": {
"integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==",
"dependencies": [
"inherits",
"minimalistic-assert"
],
"tarball": "https://verdaccio.lossless.digital/hash.js/-/hash.js-1.1.7.tgz"
},
"hasown@2.0.2": {
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"dependencies": [
@@ -4939,15 +4750,6 @@
"bin": true,
"tarball": "https://verdaccio.lossless.digital/he/-/he-1.2.0.tgz"
},
"hmac-drbg@1.0.1": {
"integrity": "d2745701025a6c775a6c545793ed502fc0c649a1",
"dependencies": [
"hash.js",
"minimalistic-assert",
"minimalistic-crypto-utils"
],
"tarball": "https://verdaccio.lossless.digital/hmac-drbg/-/hmac-drbg-1.0.1.tgz"
},
"html-minifier@4.0.0": {
"integrity": "sha512-aoGxanpFPLg7MkIl/DDFYtb0iWz7jMFGqFhvEDZga6/4QTjneiD8I/NXL1x5aaoCp7FSIT6h/OhykDdPsbtMig==",
"dependencies": [
@@ -5845,14 +5647,6 @@
"integrity": "sha512-UeX942qZpofn5L97h295SkS7j/ADf7Qac8gdRCMBPxi0/1m70aeB2owLFvWbyuMj1dowonlivlVRQVDx+6h+7Q==",
"tarball": "https://verdaccio.lossless.digital/mingo/-/mingo-7.2.0.tgz"
},
"minimalistic-assert@1.0.1": {
"integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==",
"tarball": "https://verdaccio.lossless.digital/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz"
},
"minimalistic-crypto-utils@1.0.1": {
"integrity": "f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a",
"tarball": "https://verdaccio.lossless.digital/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz"
},
"minimatch@10.2.0": {
"integrity": "sha512-ugkC31VaVg9cF0DFVoADH12k6061zNZkZON+aX8AWsR9GhPcErkcMBceb6znR8wLERM2AkkOxy2nWRLpT9Jq5w==",
"dependencies": [
@@ -5890,7 +5684,7 @@
"integrity": "sha512-rMO7CGo/9BFwyZABcKAWL8UJwH/Kc2x0g72uhDWzG48URRax5TCIcJ7Rc3RZqffZzO/Gwff/jyKwCU9TN8gehA==",
"dependencies": [
"@types/whatwg-url",
"whatwg-url@14.2.0"
"whatwg-url"
],
"tarball": "https://verdaccio.lossless.digital/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.2.tgz"
},
@@ -5969,17 +5763,6 @@
],
"tarball": "https://verdaccio.lossless.digital/no-case/-/no-case-2.3.2.tgz"
},
"node-domexception@1.0.0": {
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
"tarball": "https://verdaccio.lossless.digital/node-domexception/-/node-domexception-1.0.0.tgz"
},
"node-fetch@2.7.0": {
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dependencies": [
"whatwg-url@5.0.0"
],
"tarball": "https://verdaccio.lossless.digital/node-fetch/-/node-fetch-2.7.0.tgz"
},
"node-forge@1.3.3": {
"integrity": "sha512-rLvcdSyRCyouf6jcOIPe/BgwG/d7hKjzMKOas33/pHEr6gbq18IK9zV7DiPvzsz0oBJPme6qr6H6kGZuI9/DZg==",
"tarball": "https://verdaccio.lossless.digital/node-forge/-/node-forge-1.3.3.tgz"
@@ -6913,10 +6696,6 @@
],
"tarball": "https://verdaccio.lossless.digital/token-types/-/token-types-6.1.2.tgz"
},
"tr46@0.0.3": {
"integrity": "8184fd347dac9cdc185992f3a6622e14b9d9ab6a",
"tarball": "https://verdaccio.lossless.digital/tr46/-/tr46-0.0.3.tgz"
},
"tr46@5.1.1": {
"integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==",
"dependencies": [
@@ -7014,10 +6793,6 @@
"integrity": "sha512-rvKSBiC5zqCCiDZ9kAOszZcDvdAHwwIKJG33Ykj43OKcWsnmcBRL09YTU4nOeHZ8Y2a7l1MgTd08SBe9A8Qj6A==",
"tarball": "https://verdaccio.lossless.digital/uint8array-extras/-/uint8array-extras-1.5.0.tgz"
},
"undici-types@5.26.5": {
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"tarball": "https://verdaccio.lossless.digital/undici-types/-/undici-types-5.26.5.tgz"
},
"undici-types@6.21.0": {
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
"tarball": "https://verdaccio.lossless.digital/undici-types/-/undici-types-6.21.0.tgz"
@@ -7134,18 +6909,10 @@
],
"tarball": "https://verdaccio.lossless.digital/vfile/-/vfile-6.0.3.tgz"
},
"web-streams-polyfill@4.0.0-beta.3": {
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"tarball": "https://verdaccio.lossless.digital/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz"
},
"webdriver-bidi-protocol@0.4.0": {
"integrity": "sha512-U9VIlNRrq94d1xxR9JrCEAx5Gv/2W7ERSv8oWRoNe/QYbfccS0V3h/H6qeNeCRJxXGMhhnkqvwNrvPAYeuP9VA==",
"tarball": "https://verdaccio.lossless.digital/webdriver-bidi-protocol/-/webdriver-bidi-protocol-0.4.0.tgz"
},
"webidl-conversions@3.0.1": {
"integrity": "24534275e2a7bc6be7bc86611cc16ae0a5654871",
"tarball": "https://verdaccio.lossless.digital/webidl-conversions/-/webidl-conversions-3.0.1.tgz"
},
"webidl-conversions@7.0.0": {
"integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
"tarball": "https://verdaccio.lossless.digital/webidl-conversions/-/webidl-conversions-7.0.0.tgz"
@@ -7157,19 +6924,11 @@
"whatwg-url@14.2.0": {
"integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==",
"dependencies": [
"tr46@5.1.1",
"webidl-conversions@7.0.0"
"tr46",
"webidl-conversions"
],
"tarball": "https://verdaccio.lossless.digital/whatwg-url/-/whatwg-url-14.2.0.tgz"
},
"whatwg-url@5.0.0": {
"integrity": "966454e8765462e37644d3626f6742ce8b70965d",
"dependencies": [
"tr46@0.0.3",
"webidl-conversions@3.0.1"
],
"tarball": "https://verdaccio.lossless.digital/whatwg-url/-/whatwg-url-5.0.0.tgz"
},
"which@2.0.2": {
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dependencies": [
@@ -7295,29 +7054,15 @@
"npm:@git.zone/tsrun@^2.0.1",
"npm:@git.zone/tsrust@^1.3.0",
"npm:@git.zone/tstest@^3.1.8",
"npm:@push.rocks/lik@^6.2.2",
"npm:@push.rocks/smartacme@8",
"npm:@push.rocks/smartcrypto@^2.0.4",
"npm:@push.rocks/smartdelay@^3.0.5",
"npm:@push.rocks/smartfile@^13.1.2",
"npm:@push.rocks/smartlog@^3.1.10",
"npm:@push.rocks/smartnetwork@^4.4.0",
"npm:@push.rocks/smartpromise@^4.2.3",
"npm:@push.rocks/smartrequest@^5.0.1",
"npm:@push.rocks/smartrust@^1.2.1",
"npm:@push.rocks/smartrx@^3.0.10",
"npm:@push.rocks/smartserve@^2.0.1",
"npm:@push.rocks/smartstring@^4.1.0",
"npm:@push.rocks/taskbuffer@^4.2.0",
"npm:@tsclass/tsclass@^9.3.0",
"npm:@types/minimatch@6",
"npm:@types/node@^25.2.3",
"npm:@types/ws@^8.18.1",
"npm:minimatch@^10.2.0",
"npm:pretty-ms@^9.3.0",
"npm:typescript@^5.9.3",
"npm:why-is-node-running@^3.2.2",
"npm:ws@^8.19.0"
"npm:why-is-node-running@^3.2.2"
]
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/smartproxy",
"version": "25.1.0",
"version": "25.15.0",
"private": false,
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
"main": "dist_ts/index.js",
@@ -9,41 +9,28 @@
"author": "Lossless GmbH",
"license": "MIT",
"scripts": {
"test:before": "(tsrust)",
"test": "(tstest test/**/test*.ts --verbose --timeout 60 --logfile)",
"build": "(tsbuild tsfolders --allowimplicitany) && (tsrust)",
"format": "(gitzone format)",
"buildDocs": "tsdoc"
},
"devDependencies": {
"@git.zone/tsbuild": "^4.1.2",
"@git.zone/tsbuild": "^4.3.0",
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tsrust": "^1.3.0",
"@git.zone/tstest": "^3.1.8",
"@git.zone/tstest": "^3.5.0",
"@push.rocks/smartserve": "^2.0.1",
"@types/node": "^25.2.3",
"@types/node": "^25.5.0",
"typescript": "^5.9.3",
"why-is-node-running": "^3.2.2"
},
"dependencies": {
"@push.rocks/lik": "^6.2.2",
"@push.rocks/smartacme": "^8.0.0",
"@push.rocks/smartcrypto": "^2.0.4",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^13.1.2",
"@push.rocks/smartlog": "^3.1.10",
"@push.rocks/smartnetwork": "^4.4.0",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^5.0.1",
"@push.rocks/smartrust": "^1.2.1",
"@push.rocks/smartrx": "^3.0.10",
"@push.rocks/smartstring": "^4.1.0",
"@push.rocks/taskbuffer": "^4.2.0",
"@tsclass/tsclass": "^9.3.0",
"@types/minimatch": "^6.0.0",
"@types/ws": "^8.18.1",
"minimatch": "^10.2.0",
"pretty-ms": "^9.3.0",
"ws": "^8.19.0"
"@push.rocks/smartlog": "^3.2.1",
"@push.rocks/smartrust": "^1.3.2",
"@tsclass/tsclass": "^9.5.0",
"minimatch": "^10.2.4"
},
"files": [
"ts/**/*",

5107
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

264
readme.md
View File

@@ -1,6 +1,6 @@
# @push.rocks/smartproxy 🚀
**A high-performance, Rust-powered proxy toolkit for Node.js** — unified route-based configuration for SSL/TLS termination, HTTP/HTTPS reverse proxying, WebSocket support, load balancing, custom protocol handlers, and kernel-level NFTables forwarding.
**A high-performance, Rust-powered proxy toolkit for Node.js** — unified route-based configuration for SSL/TLS termination, HTTP/HTTPS reverse proxying, WebSocket support, UDP/QUIC/HTTP3, load balancing, custom protocol handlers, and kernel-level NFTables forwarding.
## 📦 Installation
@@ -16,9 +16,9 @@ For reporting bugs, issues, or security vulnerabilities, please visit [community
## 🎯 What is SmartProxy?
SmartProxy is a production-ready proxy solution that takes the complexity out of traffic management. Under the hood, all networking — TCP, TLS, HTTP reverse proxy, connection tracking, security enforcement, and NFTables — is handled by a **Rust engine** for maximum performance, while you configure everything through a clean TypeScript API with full type safety.
SmartProxy is a production-ready proxy solution that takes the complexity out of traffic management. Under the hood, all networking — TCP, UDP, TLS, HTTP reverse proxy, QUIC/HTTP3, connection tracking, security enforcement, and NFTables — is handled by a **Rust engine** for maximum performance, while you configure everything through a clean TypeScript API with full type safety.
Whether you're building microservices, deploying edge infrastructure, or need a battle-tested reverse proxy with automatic Let's Encrypt certificates, SmartProxy has you covered.
Whether you're building microservices, deploying edge infrastructure, proxying UDP-based protocols, or need a battle-tested reverse proxy with automatic Let's Encrypt certificates, SmartProxy has you covered.
### ⚡ Key Features
@@ -27,13 +27,14 @@ Whether you're building microservices, deploying edge infrastructure, or need a
| 🦀 **Rust-Powered Engine** | All networking handled by a high-performance Rust binary via IPC |
| 🔀 **Unified Route-Based Config** | Clean match/action patterns for intuitive traffic routing |
| 🔒 **Automatic SSL/TLS** | Zero-config HTTPS with Let's Encrypt ACME integration |
| 🎯 **Flexible Matching** | Route by port, domain, path, client IP, TLS version, headers, or custom logic |
| 🎯 **Flexible Matching** | Route by port, domain, path, protocol, client IP, TLS version, headers, or custom logic |
| 🚄 **High-Performance** | Choose between user-space or kernel-level (NFTables) forwarding |
| 📡 **UDP & QUIC/HTTP3** | First-class UDP transport, datagram handlers, QUIC tunneling, and HTTP/3 support |
| ⚖️ **Load Balancing** | Round-robin, least-connections, IP-hash with health checks |
| 🛡️ **Enterprise Security** | IP filtering, rate limiting, basic auth, JWT auth, connection limits |
| 🔌 **WebSocket Support** | First-class WebSocket proxying with ping/pong keep-alive |
| 🎮 **Custom Protocols** | Socket handlers for implementing any protocol in TypeScript |
| 📊 **Live Metrics** | Real-time throughput, connection counts, and performance data |
| 🎮 **Custom Protocols** | Socket and datagram handlers for implementing any protocol in TypeScript |
| 📊 **Live Metrics** | Real-time throughput, connection counts, UDP sessions, and performance data |
| 🔧 **Dynamic Management** | Add/remove ports and routes at runtime without restarts |
| 🔄 **PROXY Protocol** | Full PROXY protocol v1/v2 support for preserving client information |
| 💾 **Consumer Cert Storage** | Bring your own persistence — SmartProxy never writes certs to disk |
@@ -89,7 +90,7 @@ SmartProxy uses a powerful **match/action** pattern that makes routing predictab
```
Every route consists of:
- **Match** — What traffic to capture (ports, domains, paths, IPs, headers)
- **Match** — What traffic to capture (ports, domains, paths, transport, protocol, IPs, headers)
- **Action** — What to do with it (`forward` or `socket-handler`)
- **Security** (optional) — IP allow/block lists, rate limits, authentication
- **Headers** (optional) — Request/response header manipulation with template variables
@@ -103,7 +104,7 @@ SmartProxy supports three TLS handling modes:
|------|-------------|----------|
| `passthrough` | Forward encrypted traffic as-is (SNI-based routing) | Backend handles TLS |
| `terminate` | Decrypt at proxy, forward plain HTTP to backend | Standard reverse proxy |
| `terminate-and-reencrypt` | Decrypt, then re-encrypt to backend | Zero-trust environments |
| `terminate-and-reencrypt` | Decrypt at proxy, re-encrypt to backend. HTTP traffic gets full per-request routing (Host header, path matching) via the HTTP proxy; non-HTTP traffic uses a raw TLS-to-TLS tunnel | Zero-trust / defense-in-depth environments |
## 💡 Common Use Cases
@@ -135,13 +136,13 @@ const proxy = new SmartProxy({
],
{
tls: { mode: 'terminate', certificate: 'auto' },
loadBalancing: {
algorithm: 'round-robin',
healthCheck: {
path: '/health',
interval: 30000,
timeout: 5000
}
algorithm: 'round-robin',
healthCheck: {
path: '/health',
interval: 30000,
timeout: 5000,
unhealthyThreshold: 3,
healthyThreshold: 2
}
}
)
@@ -197,7 +198,7 @@ apiRoute = addRateLimiting(apiRoute, {
const proxy = new SmartProxy({ routes: [apiRoute] });
```
### 🎮 Custom Protocol Handler
### 🎮 Custom Protocol Handler (TCP)
SmartProxy lets you implement any protocol with full socket control. Routes with JavaScript socket handlers are automatically relayed from the Rust engine back to your TypeScript code:
@@ -247,6 +248,98 @@ const proxy = new SmartProxy({ routes: [echoRoute, customRoute] });
| `SocketHandlers.httpBlock(status, message)` | HTTP block response |
| `SocketHandlers.block(message)` | Block with optional message |
### 📡 UDP Datagram Handler
Handle raw UDP datagrams with custom TypeScript logic — perfect for DNS, game servers, IoT protocols, or any UDP-based service:
```typescript
import { SmartProxy } from '@push.rocks/smartproxy';
import type { IRouteConfig, TDatagramHandler, IDatagramInfo } from '@push.rocks/smartproxy';
// Custom UDP echo handler
const udpHandler: TDatagramHandler = (datagram, info, reply) => {
console.log(`UDP from ${info.sourceIp}:${info.sourcePort} on port ${info.destPort}`);
reply(datagram); // Echo it back
};
const proxy = new SmartProxy({
routes: [{
name: 'udp-echo',
match: {
ports: 5353,
transport: 'udp' // 👈 Listen for UDP datagrams
},
action: {
type: 'socket-handler',
datagramHandler: udpHandler, // 👈 Process each datagram
udp: {
sessionTimeout: 60000, // Session idle timeout (ms)
maxSessionsPerIP: 100,
maxDatagramSize: 65535
}
}
}]
});
await proxy.start();
```
### 📡 QUIC / HTTP3 Forwarding
Forward QUIC traffic to backends with optional protocol translation (e.g., receive QUIC, forward as TCP/HTTP1):
```typescript
import { SmartProxy } from '@push.rocks/smartproxy';
import type { IRouteConfig } from '@push.rocks/smartproxy';
const quicRoute: IRouteConfig = {
name: 'quic-to-backend',
match: {
ports: 443,
transport: 'udp',
protocol: 'quic' // 👈 Match QUIC protocol
},
action: {
type: 'forward',
targets: [{
host: 'backend-server',
port: 8443,
backendTransport: 'tcp' // 👈 Translate QUIC → TCP for backend
}],
udp: {
quic: {
enableHttp3: true,
maxIdleTimeout: 30000,
maxConcurrentBidiStreams: 100,
altSvcPort: 443, // Advertise in Alt-Svc header
altSvcMaxAge: 86400
}
}
}
};
const proxy = new SmartProxy({ routes: [quicRoute] });
```
### 🔁 Dual-Stack TCP + UDP Route
Listen on both TCP and UDP with a single route — handle each transport with its own handler:
```typescript
const dualStackRoute: IRouteConfig = {
name: 'dual-stack-dns',
match: {
ports: 53,
transport: 'all' // 👈 Listen on both TCP and UDP
},
action: {
type: 'socket-handler',
socketHandler: handleTcpDns, // 👈 TCP connections
datagramHandler: handleUdpDns, // 👈 UDP datagrams
}
};
```
### ⚡ High-Performance NFTables Forwarding
For ultra-low latency on Linux, use kernel-level forwarding (requires root):
@@ -318,6 +411,42 @@ const proxy = new SmartProxy({
> **Note:** Routes with dynamic functions (host/port callbacks) are automatically relayed through the TypeScript socket handler server, since JavaScript functions can't be serialized to Rust.
### 🔀 Protocol-Specific Routing
Restrict routes to specific application-layer protocols. When `protocol` is set, the Rust engine detects the protocol after connection (or after TLS termination) and only matches routes that accept that protocol:
```typescript
// HTTP-only route (rejects raw TCP connections)
const httpOnlyRoute: IRouteConfig = {
name: 'http-api',
match: {
ports: 443,
domains: 'api.example.com',
protocol: 'http', // Only match HTTP/1.1, HTTP/2, and WebSocket upgrades
},
action: {
type: 'forward',
targets: [{ host: 'api-backend', port: 8080 }],
tls: { mode: 'terminate', certificate: 'auto' }
}
};
// Raw TCP route (rejects HTTP traffic)
const tcpOnlyRoute: IRouteConfig = {
name: 'database-proxy',
match: {
ports: 5432,
protocol: 'tcp', // Only match non-HTTP TCP streams
},
action: {
type: 'forward',
targets: [{ host: 'db-server', port: 5432 }]
}
};
```
> **Note:** Omitting `protocol` (the default) matches any protocol. For TLS routes, protocol detection happens *after* TLS termination — during the initial SNI-based route match, `protocol` is not yet known and the route is allowed to match. The protocol restriction is enforced after the proxy peeks at the decrypted data.
### 🔒 Security Controls
Comprehensive per-route security options:
@@ -383,6 +512,10 @@ console.log(`Bytes in: ${metrics.totals.bytesIn()}`);
console.log(`Requests/sec: ${metrics.requests.perSecond()}`);
console.log(`Throughput in: ${metrics.throughput.instant().in} bytes/sec`);
// UDP metrics
console.log(`UDP sessions: ${metrics.udp.activeSessions()}`);
console.log(`Datagrams in: ${metrics.udp.datagramsIn()}`);
// Get detailed statistics from the Rust engine
const stats = await proxy.getStatistics();
@@ -509,7 +642,7 @@ SmartProxy uses a hybrid **Rust + TypeScript** architecture:
```
┌─────────────────────────────────────────────────────┐
│ Your Application │
│ (TypeScript — routes, config, socket handlers) │
│ (TypeScript — routes, config, handlers)
└──────────────────┬──────────────────────────────────┘
│ IPC (JSON over stdin/stdout)
┌──────────────────▼──────────────────────────────────┐
@@ -520,22 +653,23 @@ SmartProxy uses a hybrid **Rust + TypeScript** architecture:
│ │ │ │ Proxy │ │ │ │ │ │
│ └─────────┘ └─────────┘ └─────────┘ └──────────┘ │
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌──────────┐ │
│ │ Security│ │ Metrics │ │ Connec- │ │ NFTables │ │
│ │ Enforce │ │ Collect │ │ tion │ │ Mgr │ │
│ │ │ │ │ │ Tracker │ │ │ │
│ │ UDP │ │ Security│ │ Metrics │ │ NFTables │ │
│ │ QUIC │ │ Enforce │ │ Collect │ │ Mgr │ │
│ │ HTTP/3 │ │ │ │ │ │ │ │
│ └─────────┘ └─────────┘ └─────────┘ └──────────┘ │
└──────────────────┬──────────────────────────────────┘
│ Unix Socket Relay
┌──────────────────▼──────────────────────────────────┐
TypeScript Socket Handler Server
│ (for JS-defined socket handlers & dynamic routes)
│ TypeScript Socket & Datagram Handler Servers
│ (for JS socket handlers, datagram handlers,
│ and dynamic routes) │
└─────────────────────────────────────────────────────┘
```
- **Rust Engine** handles all networking, TLS, HTTP proxying, connection management, security, and metrics
- **TypeScript** provides the npm API, configuration types, route helpers, validation, and socket handler callbacks
- **Rust Engine** handles all networking: TCP, UDP, TLS, QUIC, HTTP proxying, connection management, security, and metrics
- **TypeScript** provides the npm API, configuration types, route helpers, validation, and handler callbacks
- **IPC** — The TypeScript wrapper uses JSON commands/events over stdin/stdout to communicate with the Rust binary
- **Socket Relay** — A Unix domain socket server for routes requiring TypeScript-side handling (socket handlers, dynamic host/port functions)
- **Socket/Datagram Relay** — Unix domain socket servers for routes requiring TypeScript-side handling (socket handlers, datagram handlers, dynamic host/port functions)
## 🎯 Route Configuration Reference
@@ -543,21 +677,26 @@ SmartProxy uses a hybrid **Rust + TypeScript** architecture:
```typescript
interface IRouteMatch {
ports: number | number[] | Array<{ from: number; to: number }>; // Required — port(s) to listen on
domains?: string | string[]; // 'example.com', '*.example.com'
path?: string; // '/api/*', '/users/:id'
clientIp?: string[]; // ['10.0.0.0/8', '192.168.*']
tlsVersion?: string[]; // ['TLSv1.2', 'TLSv1.3']
ports: TPortRange; // Required — port(s) to listen on
transport?: 'tcp' | 'udp' | 'all'; // Transport protocol (default: 'tcp')
domains?: string | string[]; // 'example.com', '*.example.com'
path?: string; // '/api/*', '/users/:id'
clientIp?: string[]; // ['10.0.0.0/8', '192.168.*']
tlsVersion?: string[]; // ['TLSv1.2', 'TLSv1.3']
headers?: Record<string, string | RegExp>; // Match by HTTP headers
protocol?: 'http' | 'tcp' | 'udp' | 'quic' | 'http3'; // Application-layer protocol
}
// Port range supports single numbers, arrays, and ranges
type TPortRange = number | Array<number | { from: number; to: number }>;
```
### Action Types
| Type | Description |
|------|-------------|
| `forward` | Proxy to one or more backend targets (with optional TLS, WebSocket, load balancing) |
| `socket-handler` | Custom socket handling function in TypeScript |
| `forward` | Proxy to one or more backend targets (with optional TLS, WebSocket, load balancing, UDP/QUIC) |
| `socket-handler` | Custom socket/datagram handling function in TypeScript |
### Target Options
@@ -565,14 +704,15 @@ interface IRouteMatch {
interface IRouteTarget {
host: string | string[] | ((context: IRouteContext) => string | string[]);
port: number | 'preserve' | ((context: IRouteContext) => number);
tls?: IRouteTls; // Per-target TLS override
priority?: number; // Target priority
match?: ITargetMatch; // Sub-match within a route (by port, path, headers, method)
tls?: IRouteTls; // Per-target TLS override
priority?: number; // Target priority
match?: ITargetMatch; // Sub-match within a route (by port, path, headers, method)
websocket?: IRouteWebSocket;
loadBalancing?: IRouteLoadBalancing;
sendProxyProtocol?: boolean;
headers?: IRouteHeaders;
advanced?: IRouteAdvanced;
backendTransport?: 'tcp' | 'udp'; // Backend transport (e.g., receive QUIC, forward as TCP)
}
```
@@ -629,6 +769,27 @@ interface IRouteLoadBalancing {
}
```
### UDP & QUIC Options
```typescript
interface IRouteUdp {
sessionTimeout?: number; // Idle timeout per UDP session (ms, default: 60000)
maxSessionsPerIP?: number; // Max concurrent sessions per IP (default: 1000)
maxDatagramSize?: number; // Max datagram size in bytes (default: 65535)
quic?: IRouteQuic;
}
interface IRouteQuic {
maxIdleTimeout?: number; // QUIC idle timeout (ms, default: 30000)
maxConcurrentBidiStreams?: number; // Max bidi streams (default: 100)
maxConcurrentUniStreams?: number; // Max uni streams (default: 100)
enableHttp3?: boolean; // Enable HTTP/3 (default: false)
altSvcPort?: number; // Port for Alt-Svc header
altSvcMaxAge?: number; // Alt-Svc max age in seconds (default: 86400)
initialCongestionWindow?: number; // Initial congestion window (bytes)
}
```
## 🛠️ Helper Functions Reference
All helpers are fully typed and return `IRouteConfig` or `IRouteConfig[]`:
@@ -652,7 +813,7 @@ import {
createWebSocketRoute, // WebSocket-enabled route
// Custom Protocols
createSocketHandlerRoute, // Custom socket handler
createSocketHandlerRoute, // Custom TCP socket handler
SocketHandlers, // Pre-built handlers (echo, proxy, block, etc.)
// NFTables (Linux, requires root)
@@ -681,6 +842,8 @@ import {
} from '@push.rocks/smartproxy';
```
> **Tip:** For UDP datagram handler routes or QUIC/HTTP3 routes, construct `IRouteConfig` objects directly — there are no helper functions for these yet. See the [UDP Datagram Handler](#-udp-datagram-handler) and [QUIC / HTTP3 Forwarding](#-quic--http3-forwarding) examples above.
## 📖 API Documentation
### SmartProxy Class
@@ -716,6 +879,8 @@ class SmartProxy extends EventEmitter {
// Events
on(event: 'error', handler: (err: Error) => void): this;
on(event: 'certificate-issued', handler: (ev: ICertificateIssuedEvent) => void): this;
on(event: 'certificate-failed', handler: (ev: ICertificateFailedEvent) => void): this;
}
```
@@ -738,6 +903,8 @@ interface ISmartProxyOptions {
// Custom certificate provisioning
certProvisionFunction?: (domain: string) => Promise<ICert | 'http01'>;
certProvisionFallbackToAcme?: boolean; // Fall back to ACME on failure (default: true)
certProvisionTimeout?: number; // Timeout per provision call (ms)
certProvisionConcurrency?: number; // Max concurrent provisions
// Consumer-managed certificate persistence (see "Consumer-Managed Certificate Storage")
certStore?: ISmartProxyCertStore;
@@ -745,6 +912,9 @@ interface ISmartProxyOptions {
// Self-signed fallback
disableDefaultCert?: boolean; // Disable '*' self-signed fallback (default: false)
// Rust binary path override
rustBinaryPath?: string; // Custom path to the Rust proxy binary
// Global defaults
defaults?: {
target?: { host: string; port: number };
@@ -831,11 +1001,22 @@ metrics.requests.perSecond(); // Requests per second
metrics.requests.perMinute(); // Requests per minute
metrics.requests.total(); // Total requests
// UDP metrics
metrics.udp.activeSessions(); // Current active UDP sessions
metrics.udp.totalSessions(); // Total UDP sessions since start
metrics.udp.datagramsIn(); // Datagrams received
metrics.udp.datagramsOut(); // Datagrams sent
// Cumulative totals
metrics.totals.bytesIn(); // Total bytes received
metrics.totals.bytesOut(); // Total bytes sent
metrics.totals.connections(); // Total connections
// Backend metrics
metrics.backends.byBackend(); // Map<backend, IBackendMetrics>
metrics.backends.protocols(); // Map<backend, protocol>
metrics.backends.topByErrors(10); // Top N error-prone backends
// Percentiles
metrics.percentiles.connectionDuration(); // { p50, p95, p99 }
metrics.percentiles.bytesTransferred(); // { in: { p50, p95, p99 }, out: { p50, p95, p99 } }
@@ -859,11 +1040,12 @@ metrics.percentiles.bytesTransferred(); // { in: { p50, p95, p99 }, out: { p5
### Rust Binary Not Found
SmartProxy searches for the Rust binary in this order:
1. `SMARTPROXY_RUST_BINARY` environment variable
2. Platform-specific npm package (`@push.rocks/smartproxy-linux-x64`, etc.)
3. `dist_rust/rustproxy` relative to the package root (built by `tsrust`)
4. Local dev build (`./rust/target/release/rustproxy`)
5. System PATH (`rustproxy`)
1. `rustBinaryPath` option in `ISmartProxyOptions`
2. `SMARTPROXY_RUST_BINARY` environment variable
3. Platform-specific npm package (`@push.rocks/smartproxy-linux-x64`, etc.)
4. `dist_rust/rustproxy` relative to the package root (built by `tsrust`)
5. Local dev build (`./rust/target/release/rustproxy`)
6. System PATH (`rustproxy`)
### Performance Tuning
- ✅ Use NFTables forwarding for high-traffic routes (Linux only)

484
rust/Cargo.lock generated
View File

@@ -157,12 +157,24 @@ dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "clap"
version = "4.5.57"
@@ -218,6 +230,16 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]]
name = "core-foundation"
version = "0.10.1"
@@ -285,6 +307,24 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "fastbloom"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e7f34442dbe69c60fe8eaf58a8cafff81a1f278816d8ab4db255b3bef4ac3c4"
dependencies = [
"getrandom 0.3.4",
"libm",
"rand",
"siphasher",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "find-msvc-tools"
version = "0.1.9"
@@ -303,6 +343,21 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
@@ -310,6 +365,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
@@ -318,6 +374,34 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718"
[[package]]
name = "futures-macro"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "futures-sink"
version = "0.3.31"
@@ -336,10 +420,16 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
]
[[package]]
@@ -362,9 +452,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"r-efi",
"wasip2",
"wasm-bindgen",
]
[[package]]
@@ -392,6 +484,34 @@ dependencies = [
"tracing",
]
[[package]]
name = "h3"
version = "0.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10872b55cfb02a821b69dc7cf8dc6a71d6af25eb9a79662bec4a9d016056b3be"
dependencies = [
"bytes",
"fastrand",
"futures-util",
"http",
"pin-project-lite",
"tokio",
]
[[package]]
name = "h3-quinn"
version = "0.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b2e732c8d91a74731663ac8479ab505042fbf547b9a207213ab7fbcbfc4f8b4"
dependencies = [
"bytes",
"futures",
"h3",
"quinn",
"tokio",
"tokio-util",
]
[[package]]
name = "hashbrown"
version = "0.14.5"
@@ -509,7 +629,7 @@ dependencies = [
"hyper",
"libc",
"pin-project-lite",
"socket2",
"socket2 0.6.2",
"tokio",
"tower-service",
"tracing",
@@ -565,6 +685,28 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "jni"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"log",
"thiserror 1.0.69",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.34"
@@ -612,6 +754,22 @@ version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "libm"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981"
[[package]]
name = "libmimalloc-sys"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "lock_api"
version = "0.4.14"
@@ -627,6 +785,12 @@ version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]]
name = "matchers"
version = "0.2.0"
@@ -642,6 +806,15 @@ version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "mimalloc"
version = "0.1.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8"
dependencies = [
"libmimalloc-sys",
]
[[package]]
name = "mio"
version = "1.1.1"
@@ -765,6 +938,15 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.106"
@@ -774,6 +956,64 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "quinn"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
"cfg_aliases",
"futures-io",
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash",
"rustls",
"socket2 0.6.2",
"thiserror 2.0.18",
"tokio",
"tracing",
"web-time",
]
[[package]]
name = "quinn-proto"
version = "0.11.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
dependencies = [
"bytes",
"fastbloom",
"getrandom 0.3.4",
"lru-slab",
"rand",
"ring",
"rustc-hash",
"rustls",
"rustls-pki-types",
"rustls-platform-verifier",
"slab",
"thiserror 2.0.18",
"tinyvec",
"tracing",
"web-time",
]
[[package]]
name = "quinn-udp"
version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.2",
"tracing",
"windows-sys 0.60.2",
]
[[package]]
name = "quote"
version = "1.0.44"
@@ -789,6 +1029,35 @@ version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
dependencies = [
"getrandom 0.3.4",
]
[[package]]
name = "rcgen"
version = "0.13.2"
@@ -854,6 +1123,12 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rustc-hash"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustls"
version = "0.23.36"
@@ -897,9 +1172,37 @@ version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd"
dependencies = [
"web-time",
"zeroize",
]
[[package]]
name = "rustls-platform-verifier"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784"
dependencies = [
"core-foundation",
"core-foundation-sys",
"jni",
"log",
"once_cell",
"rustls",
"rustls-native-certs",
"rustls-platform-verifier-android",
"rustls-webpki",
"security-framework",
"security-framework-sys",
"webpki-root-certs",
"windows-sys 0.61.2",
]
[[package]]
name = "rustls-platform-verifier-android"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
[[package]]
name = "rustls-webpki"
version = "0.103.9"
@@ -924,8 +1227,10 @@ dependencies = [
"http-body-util",
"hyper",
"hyper-util",
"mimalloc",
"rcgen",
"rustls",
"rustls-pemfile",
"rustproxy-config",
"rustproxy-http",
"rustproxy-metrics",
@@ -961,17 +1266,23 @@ dependencies = [
"arc-swap",
"bytes",
"dashmap",
"h3",
"h3-quinn",
"http-body",
"http-body-util",
"hyper",
"hyper-util",
"quinn",
"regex",
"rustls",
"rustproxy-config",
"rustproxy-metrics",
"rustproxy-routing",
"rustproxy-security",
"socket2 0.5.10",
"thiserror 2.0.18",
"tokio",
"tokio-rustls",
"tokio-util",
"tracing",
]
@@ -1008,7 +1319,10 @@ version = "0.1.0"
dependencies = [
"anyhow",
"arc-swap",
"base64",
"dashmap",
"quinn",
"rcgen",
"rustls",
"rustls-pemfile",
"rustproxy-config",
@@ -1017,6 +1331,7 @@ dependencies = [
"rustproxy-routing",
"serde",
"serde_json",
"socket2 0.5.10",
"thiserror 2.0.18",
"tokio",
"tokio-rustls",
@@ -1072,6 +1387,15 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schannel"
version = "0.1.28"
@@ -1190,6 +1514,12 @@ dependencies = [
"time",
]
[[package]]
name = "siphasher"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e"
[[package]]
name = "slab"
version = "0.4.12"
@@ -1202,6 +1532,16 @@ version = "1.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "socket2"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "socket2"
version = "0.6.2"
@@ -1315,6 +1655,21 @@ dependencies = [
"time-core",
]
[[package]]
name = "tinyvec"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3"
dependencies = [
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.49.0"
@@ -1327,7 +1682,7 @@ dependencies = [
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"socket2 0.6.2",
"tokio-macros",
"windows-sys 0.61.2",
]
@@ -1378,6 +1733,7 @@ version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
@@ -1463,6 +1819,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
@@ -1532,12 +1898,49 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "web-time"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki-root-certs"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "windows-link"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
@@ -1565,6 +1968,21 @@ dependencies = [
"windows-link",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
@@ -1598,6 +2016,12 @@ dependencies = [
"windows_x86_64_msvc 0.53.1",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
@@ -1610,6 +2034,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
@@ -1622,6 +2052,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
@@ -1646,6 +2082,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
@@ -1658,6 +2100,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
@@ -1670,6 +2118,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
@@ -1682,6 +2136,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
@@ -1709,6 +2169,26 @@ dependencies = [
"time",
]
[[package]]
name = "zerocopy"
version = "0.8.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zeroize"
version = "1.8.2"

View File

@@ -88,6 +88,19 @@ async-trait = "0.1"
# libc for uid checks
libc = "0.2"
# Socket-level options (keepalive, etc.)
socket2 = { version = "0.5", features = ["all"] }
# QUIC transport
quinn = "0.11"
# HTTP/3 protocol
h3 = "0.0.8"
h3-quinn = "0.0.10"
# mimalloc allocator (prevents glibc fragmentation / slow RSS growth)
mimalloc = "0.1"
# Internal crates
rustproxy-config = { path = "crates/rustproxy-config" }
rustproxy-routing = { path = "crates/rustproxy-routing" }

View File

@@ -15,8 +15,10 @@ pub fn create_http_route(
domains: Some(domains.into()),
path: None,
client_ip: None,
transport: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -30,6 +32,7 @@ pub fn create_http_route(
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
}]),
tls: None,
@@ -40,6 +43,7 @@ pub fn create_http_route(
forwarding_engine: None,
nftables: None,
send_proxy_protocol: None,
udp: None,
},
headers: None,
security: None,
@@ -106,8 +110,10 @@ pub fn create_http_to_https_redirect(
domains: Some(domains),
path: None,
client_ip: None,
transport: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -135,6 +141,7 @@ pub fn create_http_to_https_redirect(
forwarding_engine: None,
nftables: None,
send_proxy_protocol: None,
udp: None,
},
headers: None,
security: None,
@@ -185,6 +192,7 @@ pub fn create_load_balancer_route(
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
})
.collect();
@@ -198,8 +206,10 @@ pub fn create_load_balancer_route(
domains: Some(domains.into()),
path: None,
client_ip: None,
transport: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -215,6 +225,7 @@ pub fn create_load_balancer_route(
forwarding_engine: None,
nftables: None,
send_proxy_protocol: None,
udp: None,
},
headers: None,
security: None,

View File

@@ -208,6 +208,10 @@ pub struct RustProxyOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub connection_rate_limit_per_minute: Option<u64>,
/// Global maximum simultaneous connections (default: 100000)
#[serde(skip_serializing_if = "Option::is_none")]
pub max_connections: Option<u64>,
// ─── Keep-Alive Settings ─────────────────────────────────────────
/// How to treat keep-alive connections
@@ -272,6 +276,7 @@ impl Default for RustProxyOptions {
enable_randomized_timeouts: None,
max_connections_per_ip: None,
connection_rate_limit_per_minute: None,
max_connections: None,
keep_alive_treatment: None,
keep_alive_inactivity_multiplier: None,
extended_keep_alive_lifetime: None,
@@ -293,7 +298,7 @@ impl RustProxyOptions {
/// Get the effective connection timeout in milliseconds.
pub fn effective_connection_timeout(&self) -> u64 {
self.connection_timeout.unwrap_or(30_000)
self.connection_timeout.unwrap_or(60_000)
}
/// Get the effective initial data timeout in milliseconds.
@@ -303,12 +308,12 @@ impl RustProxyOptions {
/// Get the effective socket timeout in milliseconds.
pub fn effective_socket_timeout(&self) -> u64 {
self.socket_timeout.unwrap_or(3_600_000)
self.socket_timeout.unwrap_or(60_000)
}
/// Get the effective max connection lifetime in milliseconds.
pub fn effective_max_connection_lifetime(&self) -> u64 {
self.max_connection_lifetime.unwrap_or(86_400_000)
self.max_connection_lifetime.unwrap_or(3_600_000)
}
/// Get all unique ports that routes listen on.
@@ -372,10 +377,10 @@ mod tests {
#[test]
fn test_default_timeouts() {
let options = RustProxyOptions::default();
assert_eq!(options.effective_connection_timeout(), 30_000);
assert_eq!(options.effective_connection_timeout(), 60_000);
assert_eq!(options.effective_initial_data_timeout(), 60_000);
assert_eq!(options.effective_socket_timeout(), 3_600_000);
assert_eq!(options.effective_max_connection_lifetime(), 86_400_000);
assert_eq!(options.effective_socket_timeout(), 60_000);
assert_eq!(options.effective_max_connection_lifetime(), 3_600_000);
}
#[test]

View File

@@ -7,16 +7,24 @@ use crate::security_types::RouteSecurity;
// ─── Port Range ──────────────────────────────────────────────────────
/// Port range specification format.
/// Matches TypeScript: `type TPortRange = number | number[] | Array<{ from: number; to: number }>`
/// Matches TypeScript: `type TPortRange = number | Array<number | { from: number; to: number }>`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum PortRange {
/// Single port number
Single(u16),
/// Array of port numbers
List(Vec<u16>),
/// Array of port ranges
Ranges(Vec<PortRangeSpec>),
/// Array of port numbers, ranges, or mixed
List(Vec<PortRangeItem>),
}
/// A single item in a port range array: either a number or a from-to range.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum PortRangeItem {
/// Single port number
Port(u16),
/// A from-to port range
Range(PortRangeSpec),
}
impl PortRange {
@@ -24,9 +32,11 @@ impl PortRange {
pub fn to_ports(&self) -> Vec<u16> {
match self {
PortRange::Single(p) => vec![*p],
PortRange::List(ports) => ports.clone(),
PortRange::Ranges(ranges) => {
ranges.iter().flat_map(|r| r.from..=r.to).collect()
PortRange::List(items) => {
items.iter().flat_map(|item| match item {
PortRangeItem::Port(p) => vec![*p],
PortRangeItem::Range(r) => (r.from..=r.to).collect(),
}).collect()
}
}
}
@@ -95,6 +105,10 @@ pub struct RouteMatch {
/// Listen on these ports (required)
pub ports: PortRange,
/// Transport protocol: tcp (default), udp, or all (both TCP and UDP)
#[serde(skip_serializing_if = "Option::is_none")]
pub transport: Option<TransportProtocol>,
/// Optional domain patterns to match (default: all domains)
#[serde(skip_serializing_if = "Option::is_none")]
pub domains: Option<DomainSpec>,
@@ -114,6 +128,10 @@ pub struct RouteMatch {
/// Match specific HTTP headers
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<HashMap<String, String>>,
/// Match specific protocol: "http", "tcp", "udp", "quic", "http3"
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
}
// ─── Target Match ────────────────────────────────────────────────────
@@ -363,6 +381,17 @@ pub struct NfTablesOptions {
pub enum BackendProtocol {
Http1,
Http2,
Http3,
Auto,
}
/// Transport protocol for route matching.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TransportProtocol {
Tcp,
Udp,
All,
}
/// Action options.
@@ -465,6 +494,10 @@ pub struct RouteTarget {
#[serde(skip_serializing_if = "Option::is_none")]
pub advanced: Option<RouteAdvanced>,
/// Override transport for backend connection (e.g., receive QUIC but forward as TCP)
#[serde(skip_serializing_if = "Option::is_none")]
pub backend_transport: Option<TransportProtocol>,
/// Priority for matching (higher values checked first, default: 0)
#[serde(skip_serializing_if = "Option::is_none")]
pub priority: Option<i32>,
@@ -519,6 +552,68 @@ pub struct RouteAction {
/// PROXY protocol support (default for all targets)
#[serde(skip_serializing_if = "Option::is_none")]
pub send_proxy_protocol: Option<bool>,
/// UDP-specific settings (session tracking, datagram limits, QUIC config)
#[serde(skip_serializing_if = "Option::is_none")]
pub udp: Option<RouteUdp>,
}
// ─── UDP & QUIC Config ──────────────────────────────────────────────
/// UDP-specific settings for route actions.
/// Matches TypeScript: `IRouteUdp`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RouteUdp {
/// Idle timeout for a UDP session/flow in ms. Default: 60000
#[serde(skip_serializing_if = "Option::is_none")]
pub session_timeout: Option<u64>,
/// Max concurrent UDP sessions per source IP. Default: 1000
#[serde(skip_serializing_if = "Option::is_none")]
pub max_sessions_per_ip: Option<u32>,
/// Max accepted datagram size in bytes. Default: 65535
#[serde(skip_serializing_if = "Option::is_none")]
pub max_datagram_size: Option<u32>,
/// QUIC-specific configuration
#[serde(skip_serializing_if = "Option::is_none")]
pub quic: Option<RouteQuic>,
}
/// QUIC and HTTP/3 settings.
/// Matches TypeScript: `IRouteQuic`
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RouteQuic {
/// QUIC connection idle timeout in ms. Default: 30000
#[serde(skip_serializing_if = "Option::is_none")]
pub max_idle_timeout: Option<u64>,
/// Max concurrent bidirectional streams per QUIC connection. Default: 100
#[serde(skip_serializing_if = "Option::is_none")]
pub max_concurrent_bidi_streams: Option<u32>,
/// Max concurrent unidirectional streams per QUIC connection. Default: 100
#[serde(skip_serializing_if = "Option::is_none")]
pub max_concurrent_uni_streams: Option<u32>,
/// Enable HTTP/3 over this QUIC endpoint. Default: false
#[serde(skip_serializing_if = "Option::is_none")]
pub enable_http3: Option<bool>,
/// Port to advertise in Alt-Svc header on TCP HTTP responses
#[serde(skip_serializing_if = "Option::is_none")]
pub alt_svc_port: Option<u16>,
/// Max age for Alt-Svc advertisement in seconds. Default: 86400
#[serde(skip_serializing_if = "Option::is_none")]
pub alt_svc_max_age: Option<u64>,
/// Initial congestion window size in bytes
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_congestion_window: Option<u32>,
}
// ─── Route Config ────────────────────────────────────────────────────

View File

@@ -18,9 +18,15 @@ http-body = { workspace = true }
http-body-util = { workspace = true }
bytes = { workspace = true }
tokio = { workspace = true }
rustls = { workspace = true }
tokio-rustls = { workspace = true }
tracing = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
arc-swap = { workspace = true }
dashmap = { workspace = true }
tokio-util = { workspace = true }
socket2 = { workspace = true }
quinn = { workspace = true }
h3 = { workspace = true }
h3-quinn = { workspace = true }

View File

@@ -0,0 +1,299 @@
//! Backend connection pool for HTTP/1.1, HTTP/2, and HTTP/3 (QUIC).
//!
//! Reuses idle keep-alive connections to avoid per-request TCP+TLS handshakes.
//! HTTP/2 and HTTP/3 connections are multiplexed (clone the sender / share the connection).
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use bytes::Bytes;
use dashmap::DashMap;
use http_body_util::combinators::BoxBody;
use hyper::client::conn::{http1, http2};
/// Maximum idle connections per backend key.
const MAX_IDLE_PER_KEY: usize = 16;
/// Default idle timeout — connections not used within this window are evicted.
const IDLE_TIMEOUT: Duration = Duration::from_secs(90);
/// Background eviction interval.
const EVICTION_INTERVAL: Duration = Duration::from_secs(30);
/// Maximum age for pooled HTTP/2 connections before proactive eviction.
const MAX_H2_AGE: Duration = Duration::from_secs(120);
/// Maximum age for pooled QUIC/HTTP/3 connections.
const MAX_H3_AGE: Duration = Duration::from_secs(120);
/// Protocol for pool key discrimination.
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub enum PoolProtocol {
H1,
H2,
H3,
}
/// Identifies a unique backend endpoint.
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct PoolKey {
pub host: String,
pub port: u16,
pub use_tls: bool,
pub protocol: PoolProtocol,
}
/// An idle HTTP/1.1 sender with a timestamp for eviction.
struct IdleH1 {
sender: http1::SendRequest<BoxBody<Bytes, hyper::Error>>,
idle_since: Instant,
}
/// A pooled HTTP/2 sender (multiplexed, Clone-able) with a generation tag.
struct PooledH2 {
sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>,
created_at: Instant,
/// Unique generation ID. Connection drivers use this to only remove their OWN
/// entry, preventing phantom eviction when multiple connections share the same key.
generation: u64,
}
/// A pooled QUIC/HTTP/3 connection (multiplexed like H2).
pub struct PooledH3 {
pub connection: quinn::Connection,
pub created_at: Instant,
pub generation: u64,
}
/// Backend connection pool.
pub struct ConnectionPool {
/// HTTP/1.1 idle connections indexed by backend key.
h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>>,
/// HTTP/2 multiplexed connections indexed by backend key.
h2_pool: Arc<DashMap<PoolKey, PooledH2>>,
/// HTTP/3 (QUIC) connections indexed by backend key.
h3_pool: Arc<DashMap<PoolKey, PooledH3>>,
/// Monotonic generation counter for H2/H3 pool entries.
h2_generation: AtomicU64,
/// Handle for the background eviction task.
eviction_handle: Option<tokio::task::JoinHandle<()>>,
}
impl ConnectionPool {
/// Create a new pool and start the background eviction task.
pub fn new() -> Self {
let h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>> = Arc::new(DashMap::new());
let h2_pool: Arc<DashMap<PoolKey, PooledH2>> = Arc::new(DashMap::new());
let h3_pool: Arc<DashMap<PoolKey, PooledH3>> = Arc::new(DashMap::new());
let h1_clone = Arc::clone(&h1_pool);
let h2_clone = Arc::clone(&h2_pool);
let h3_clone = Arc::clone(&h3_pool);
let eviction_handle = tokio::spawn(async move {
Self::eviction_loop(h1_clone, h2_clone, h3_clone).await;
});
Self {
h1_pool,
h2_pool,
h3_pool,
h2_generation: AtomicU64::new(0),
eviction_handle: Some(eviction_handle),
}
}
/// Try to check out an idle HTTP/1.1 sender for the given key.
/// Returns `None` if no usable idle connection exists.
pub fn checkout_h1(&self, key: &PoolKey) -> Option<http1::SendRequest<BoxBody<Bytes, hyper::Error>>> {
let mut entry = self.h1_pool.get_mut(key)?;
let idles = entry.value_mut();
while let Some(idle) = idles.pop() {
// Check if the connection is still alive and ready
if idle.idle_since.elapsed() < IDLE_TIMEOUT && idle.sender.is_ready() && !idle.sender.is_closed() {
// H1 pool hit — no logging on hot path
return Some(idle.sender);
}
// Stale or closed — drop it
}
// Clean up empty entry
if idles.is_empty() {
drop(entry);
self.h1_pool.remove(key);
}
None
}
/// Return an HTTP/1.1 sender to the pool after the response body has been prepared.
/// The caller should NOT call this if the sender is closed or not ready.
pub fn checkin_h1(&self, key: PoolKey, sender: http1::SendRequest<BoxBody<Bytes, hyper::Error>>) {
if sender.is_closed() || !sender.is_ready() {
return; // Don't pool broken connections
}
let mut entry = self.h1_pool.entry(key).or_insert_with(Vec::new);
if entry.value().len() < MAX_IDLE_PER_KEY {
entry.value_mut().push(IdleH1 {
sender,
idle_since: Instant::now(),
});
}
// If at capacity, just drop the sender
}
/// Try to get a cloned HTTP/2 sender for the given key.
/// HTTP/2 senders are Clone-able (multiplexed), so we clone rather than remove.
pub fn checkout_h2(&self, key: &PoolKey) -> Option<(http2::SendRequest<BoxBody<Bytes, hyper::Error>>, Duration)> {
let entry = self.h2_pool.get(key)?;
let pooled = entry.value();
let age = pooled.created_at.elapsed();
if pooled.sender.is_closed() || age >= MAX_H2_AGE {
drop(entry);
self.h2_pool.remove(key);
return None;
}
if pooled.sender.is_ready() {
return Some((pooled.sender.clone(), age));
}
None
}
/// Remove a dead HTTP/2 sender from the pool (unconditional).
/// Called when `send_request` fails to prevent subsequent requests from reusing the stale sender.
pub fn remove_h2(&self, key: &PoolKey) {
self.h2_pool.remove(key);
}
/// Remove an HTTP/2 sender ONLY if the current entry has the expected generation.
/// This prevents phantom eviction: when multiple connections share the same key,
/// an old connection's driver won't accidentally remove a newer connection's entry.
pub fn remove_h2_if_generation(&self, key: &PoolKey, expected_gen: u64) {
if let Some(entry) = self.h2_pool.get(key) {
if entry.value().generation == expected_gen {
drop(entry); // release DashMap ref before remove
self.h2_pool.remove(key);
}
// else: a newer connection replaced ours — don't touch it
}
}
/// Register an HTTP/2 sender in the pool. Returns the generation ID for this entry.
/// The caller should pass this generation to the connection driver so it can use
/// `remove_h2_if_generation` instead of `remove_h2` to avoid phantom eviction.
pub fn register_h2(&self, key: PoolKey, sender: http2::SendRequest<BoxBody<Bytes, hyper::Error>>) -> u64 {
let gen = self.h2_generation.fetch_add(1, Ordering::Relaxed);
if sender.is_closed() {
return gen;
}
self.h2_pool.insert(key, PooledH2 {
sender,
created_at: Instant::now(),
generation: gen,
});
gen
}
// ── HTTP/3 (QUIC) pool methods ──
/// Try to get a pooled QUIC connection for the given key.
/// QUIC connections are multiplexed — the connection is shared, not removed.
pub fn checkout_h3(&self, key: &PoolKey) -> Option<(quinn::Connection, Duration)> {
let entry = self.h3_pool.get(key)?;
let pooled = entry.value();
let age = pooled.created_at.elapsed();
if age >= MAX_H3_AGE {
drop(entry);
self.h3_pool.remove(key);
return None;
}
// Check if QUIC connection is still alive
if pooled.connection.close_reason().is_some() {
drop(entry);
self.h3_pool.remove(key);
return None;
}
Some((pooled.connection.clone(), age))
}
/// Register a QUIC connection in the pool. Returns the generation ID.
pub fn register_h3(&self, key: PoolKey, connection: quinn::Connection) -> u64 {
let gen = self.h2_generation.fetch_add(1, Ordering::Relaxed);
self.h3_pool.insert(key, PooledH3 {
connection,
created_at: Instant::now(),
generation: gen,
});
gen
}
/// Remove a QUIC connection only if generation matches.
pub fn remove_h3_if_generation(&self, key: &PoolKey, expected_gen: u64) {
if let Some(entry) = self.h3_pool.get(key) {
if entry.value().generation == expected_gen {
drop(entry);
self.h3_pool.remove(key);
}
}
}
/// Background eviction loop — runs every EVICTION_INTERVAL to remove stale connections.
async fn eviction_loop(
h1_pool: Arc<DashMap<PoolKey, Vec<IdleH1>>>,
h2_pool: Arc<DashMap<PoolKey, PooledH2>>,
h3_pool: Arc<DashMap<PoolKey, PooledH3>>,
) {
let mut interval = tokio::time::interval(EVICTION_INTERVAL);
loop {
interval.tick().await;
// Evict stale H1 connections
let mut empty_keys = Vec::new();
for mut entry in h1_pool.iter_mut() {
entry.value_mut().retain(|idle| {
idle.idle_since.elapsed() < IDLE_TIMEOUT && !idle.sender.is_closed()
});
if entry.value().is_empty() {
empty_keys.push(entry.key().clone());
}
}
for key in empty_keys {
h1_pool.remove(&key);
}
// Evict dead or aged-out H2 connections
let mut dead_h2 = Vec::new();
for entry in h2_pool.iter() {
if entry.value().sender.is_closed() || entry.value().created_at.elapsed() >= MAX_H2_AGE {
dead_h2.push(entry.key().clone());
}
}
for key in dead_h2 {
h2_pool.remove(&key);
}
// Evict dead or aged-out H3 (QUIC) connections
let mut dead_h3 = Vec::new();
for entry in h3_pool.iter() {
if entry.value().connection.close_reason().is_some()
|| entry.value().created_at.elapsed() >= MAX_H3_AGE
{
dead_h3.push(entry.key().clone());
}
}
for key in dead_h3 {
h3_pool.remove(&key);
}
}
}
}
impl Drop for ConnectionPool {
fn drop(&mut self) {
if let Some(handle) = self.eviction_handle.take() {
handle.abort();
}
}
}

View File

@@ -9,21 +9,37 @@ use bytes::Bytes;
use http_body::Frame;
use rustproxy_metrics::MetricsCollector;
/// Flush accumulated bytes to the metrics collector every 64 KB.
/// This reduces per-frame DashMap shard-locked reads from ~15 to ~1 per 4 frames
/// (assuming typical 16 KB upload frames). The 1 Hz throughput sampler still sees
/// data within one sampling period even at low transfer rates.
const BYTE_FLUSH_THRESHOLD: u64 = 65_536;
/// Wraps any `http_body::Body` and counts data bytes passing through.
///
/// When the body is fully consumed or dropped, accumulated byte counts
/// are reported to the `MetricsCollector`.
/// Bytes are accumulated and flushed to the `MetricsCollector` every
/// [`BYTE_FLUSH_THRESHOLD`] bytes (and on Drop) so the throughput tracker
/// (sampled at 1 Hz) reflects real-time data flow without per-frame overhead.
///
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
pub struct CountingBody<B> {
inner: Pin<Box<B>>,
counted_bytes: AtomicU64,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
route_id: Option<Arc<str>>,
source_ip: Option<Arc<str>>,
/// Whether we count bytes as "in" (request body) or "out" (response body).
direction: Direction,
/// Whether we've already reported the bytes (to avoid double-reporting on drop).
reported: bool,
/// Accumulated bytes not yet flushed to the metrics collector.
pending_bytes: u64,
/// Optional connection-level activity tracker. When set, poll_frame updates this
/// to keep the idle watchdog alive during active body streaming (uploads/downloads).
connection_activity: Option<Arc<AtomicU64>>,
/// Start instant for computing elapsed ms for connection_activity.
activity_start: Option<std::time::Instant>,
/// Optional active-request counter. When set, CountingBody increments on creation
/// and decrements on Drop, keeping the HTTP idle watchdog aware that a response
/// body is still streaming (even after the request handler has returned).
active_requests: Option<Arc<AtomicU64>>,
}
/// Which direction the bytes flow.
@@ -40,45 +56,58 @@ impl<B> CountingBody<B> {
pub fn new(
inner: B,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
route_id: Option<Arc<str>>,
source_ip: Option<Arc<str>>,
direction: Direction,
) -> Self {
Self {
inner: Box::pin(inner),
counted_bytes: AtomicU64::new(0),
metrics,
route_id,
source_ip,
direction,
reported: false,
pending_bytes: 0,
connection_activity: None,
activity_start: None,
active_requests: None,
}
}
/// Report accumulated bytes to the metrics collector.
fn report(&mut self) {
if self.reported {
/// Set the connection-level activity tracker. When set, each data frame
/// updates this timestamp to prevent the idle watchdog from killing the
/// connection during active body streaming.
pub fn with_connection_activity(mut self, activity: Arc<AtomicU64>, start: std::time::Instant) -> Self {
self.connection_activity = Some(activity);
self.activity_start = Some(start);
self
}
/// Set the active-request counter for the HTTP idle watchdog.
/// CountingBody increments on creation and decrements on Drop, ensuring the
/// idle watchdog sees an "active request" while the response body streams.
pub fn with_active_requests(mut self, counter: Arc<AtomicU64>) -> Self {
counter.fetch_add(1, Ordering::Relaxed);
self.active_requests = Some(counter);
self
}
/// Flush accumulated bytes to the metrics collector.
#[inline]
fn flush_pending(&mut self) {
if self.pending_bytes == 0 {
return;
}
self.reported = true;
let bytes = self.counted_bytes.load(Ordering::Relaxed);
if bytes == 0 {
return;
}
let bytes = self.pending_bytes;
self.pending_bytes = 0;
let route_id = self.route_id.as_deref();
let source_ip = self.source_ip.as_deref();
match self.direction {
Direction::In => self.metrics.record_bytes(bytes, 0, route_id),
Direction::Out => self.metrics.record_bytes(0, bytes, route_id),
Direction::In => self.metrics.record_bytes(bytes, 0, route_id, source_ip),
Direction::Out => self.metrics.record_bytes(0, bytes, route_id, source_ip),
}
}
}
impl<B> Drop for CountingBody<B> {
fn drop(&mut self) {
self.report();
}
}
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
impl<B> Unpin for CountingBody<B> {}
@@ -98,14 +127,23 @@ where
match this.inner.as_mut().poll_frame(cx) {
Poll::Ready(Some(Ok(frame))) => {
if let Some(data) = frame.data_ref() {
this.counted_bytes.fetch_add(data.len() as u64, Ordering::Relaxed);
let len = data.len() as u64;
this.pending_bytes += len;
if this.pending_bytes >= BYTE_FLUSH_THRESHOLD {
this.flush_pending();
}
// Keep the connection-level idle watchdog alive on every frame
// (this is just one atomic store — cheap enough per-frame)
if let (Some(activity), Some(start)) = (&this.connection_activity, &this.activity_start) {
activity.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
}
}
Poll::Ready(Some(Ok(frame)))
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => {
// Body is fully consumed — report now
this.report();
// End of stream — flush any remaining bytes
this.flush_pending();
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
@@ -120,3 +158,15 @@ where
self.inner.size_hint()
}
}
impl<B> Drop for CountingBody<B> {
fn drop(&mut self) {
// Flush any remaining accumulated bytes so totals stay accurate
self.flush_pending();
// Decrement the active-request counter so the HTTP idle watchdog
// knows this response body is no longer streaming.
if let Some(ref counter) = self.active_requests {
counter.fetch_sub(1, Ordering::Relaxed);
}
}
}

View File

@@ -0,0 +1,332 @@
//! HTTP/3 proxy service.
//!
//! Accepts QUIC connections via quinn, runs h3 server to handle HTTP/3 requests,
//! and forwards them to backends using the same routing and pool infrastructure
//! as the HTTP/1+2 proxy.
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use arc_swap::ArcSwap;
use bytes::{Buf, Bytes};
use http_body::Frame;
use tracing::{debug, warn};
use rustproxy_config::{RouteConfig, TransportProtocol};
use rustproxy_metrics::MetricsCollector;
use rustproxy_routing::{MatchContext, RouteManager};
use crate::connection_pool::ConnectionPool;
use crate::protocol_cache::ProtocolCache;
use crate::upstream_selector::UpstreamSelector;
/// HTTP/3 proxy service.
///
/// Handles QUIC connections with the h3 crate, parses HTTP/3 requests,
/// and forwards them to backends using per-request route matching and
/// shared connection pooling.
pub struct H3ProxyService {
route_manager: Arc<ArcSwap<RouteManager>>,
metrics: Arc<MetricsCollector>,
connection_pool: Arc<ConnectionPool>,
#[allow(dead_code)]
protocol_cache: Arc<ProtocolCache>,
#[allow(dead_code)]
upstream_selector: UpstreamSelector,
#[allow(dead_code)]
backend_tls_config: Arc<rustls::ClientConfig>,
connect_timeout: Duration,
}
impl H3ProxyService {
pub fn new(
route_manager: Arc<ArcSwap<RouteManager>>,
metrics: Arc<MetricsCollector>,
connection_pool: Arc<ConnectionPool>,
protocol_cache: Arc<ProtocolCache>,
backend_tls_config: Arc<rustls::ClientConfig>,
connect_timeout: Duration,
) -> Self {
Self {
route_manager: Arc::clone(&route_manager),
metrics: Arc::clone(&metrics),
connection_pool,
protocol_cache,
upstream_selector: UpstreamSelector::new(),
backend_tls_config,
connect_timeout,
}
}
/// Handle an accepted QUIC connection as HTTP/3.
pub async fn handle_connection(
&self,
connection: quinn::Connection,
_fallback_route: &RouteConfig,
port: u16,
) -> anyhow::Result<()> {
let remote_addr = connection.remote_address();
debug!("HTTP/3 connection from {} on port {}", remote_addr, port);
let mut h3_conn: h3::server::Connection<h3_quinn::Connection, Bytes> =
h3::server::Connection::new(h3_quinn::Connection::new(connection))
.await
.map_err(|e| anyhow::anyhow!("H3 connection setup failed: {}", e))?;
let client_ip = remote_addr.ip().to_string();
loop {
match h3_conn.accept().await {
Ok(Some(resolver)) => {
let (request, stream) = match resolver.resolve_request().await {
Ok(pair) => pair,
Err(e) => {
debug!("HTTP/3 request resolve error: {}", e);
continue;
}
};
self.metrics.record_http_request();
let rm = self.route_manager.load();
let pool = Arc::clone(&self.connection_pool);
let metrics = Arc::clone(&self.metrics);
let connect_timeout = self.connect_timeout;
let client_ip = client_ip.clone();
tokio::spawn(async move {
if let Err(e) = handle_h3_request(
request, stream, port, &client_ip, &rm, &pool, &metrics, connect_timeout,
).await {
debug!("HTTP/3 request error from {}: {}", client_ip, e);
}
});
}
Ok(None) => {
debug!("HTTP/3 connection from {} closed", remote_addr);
break;
}
Err(e) => {
debug!("HTTP/3 accept error from {}: {}", remote_addr, e);
break;
}
}
}
Ok(())
}
}
/// Handle a single HTTP/3 request with per-request route matching.
async fn handle_h3_request(
request: hyper::Request<()>,
mut stream: h3::server::RequestStream<h3_quinn::BidiStream<Bytes>, Bytes>,
port: u16,
client_ip: &str,
route_manager: &RouteManager,
_connection_pool: &ConnectionPool,
metrics: &MetricsCollector,
connect_timeout: Duration,
) -> anyhow::Result<()> {
let method = request.method().clone();
let uri = request.uri().clone();
let path = uri.path().to_string();
// Extract host from :authority or Host header
let host = request.uri().authority()
.map(|a| a.as_str().to_string())
.or_else(|| request.headers().get("host").and_then(|v| v.to_str().ok()).map(|s| s.to_string()))
.unwrap_or_default();
debug!("HTTP/3 {} {} (host: {}, client: {})", method, path, host, client_ip);
// Per-request route matching
let ctx = MatchContext {
port,
domain: if host.is_empty() { None } else { Some(&host) },
path: Some(&path),
client_ip: Some(client_ip),
tls_version: Some("TLSv1.3"),
headers: None,
is_tls: true,
protocol: Some("http"),
transport: Some(TransportProtocol::Udp),
};
let route_match = route_manager.find_route(&ctx)
.ok_or_else(|| anyhow::anyhow!("No route matched for HTTP/3 request to {}{}", host, path))?;
let route = route_match.route;
// Resolve backend target (use matched target or first target)
let target = route_match.target
.or_else(|| route.action.targets.as_ref().and_then(|t| t.first()))
.ok_or_else(|| anyhow::anyhow!("No target for HTTP/3 route"))?;
let backend_host = target.host.first();
let backend_port = target.port.resolve(port);
let backend_addr = format!("{}:{}", backend_host, backend_port);
// Connect to backend via TCP HTTP/1.1 with timeout
let tcp_stream = tokio::time::timeout(
connect_timeout,
tokio::net::TcpStream::connect(&backend_addr),
).await
.map_err(|_| anyhow::anyhow!("Backend connect timeout to {}", backend_addr))?
.map_err(|e| anyhow::anyhow!("Backend connect to {} failed: {}", backend_addr, e))?;
let _ = tcp_stream.set_nodelay(true);
let io = hyper_util::rt::TokioIo::new(tcp_stream);
let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await
.map_err(|e| anyhow::anyhow!("Backend handshake failed: {}", e))?;
tokio::spawn(async move {
if let Err(e) = conn.await {
debug!("Backend connection closed: {}", e);
}
});
// Stream request body from H3 client to backend via an mpsc channel.
// This avoids buffering the entire request body in memory.
let (body_tx, body_rx) = tokio::sync::mpsc::channel::<Bytes>(4);
let total_bytes_in = Arc::new(std::sync::atomic::AtomicU64::new(0));
let total_bytes_in_writer = Arc::clone(&total_bytes_in);
// Spawn the H3 body reader task
let body_reader = tokio::spawn(async move {
while let Ok(Some(mut chunk)) = stream.recv_data().await {
let data = Bytes::copy_from_slice(chunk.chunk());
total_bytes_in_writer.fetch_add(data.len() as u64, std::sync::atomic::Ordering::Relaxed);
chunk.advance(chunk.remaining());
if body_tx.send(data).await.is_err() {
break;
}
}
stream
});
// Create a body that polls from the mpsc receiver
let body = H3RequestBody { receiver: body_rx };
let backend_req = build_backend_request(&method, &backend_addr, &path, &host, &request, body)?;
let response = sender.send_request(backend_req).await
.map_err(|e| anyhow::anyhow!("Backend request failed: {}", e))?;
// Await the body reader to get the stream back
let mut stream = body_reader.await
.map_err(|e| anyhow::anyhow!("Body reader task failed: {}", e))?;
let total_bytes_in = total_bytes_in.load(std::sync::atomic::Ordering::Relaxed);
// Build H3 response
let status = response.status();
let mut h3_response = hyper::Response::builder().status(status);
// Copy response headers (skip hop-by-hop)
for (name, value) in response.headers() {
let n = name.as_str().to_lowercase();
if n == "transfer-encoding" || n == "connection" || n == "keep-alive" || n == "upgrade" {
continue;
}
h3_response = h3_response.header(name, value);
}
// Add Alt-Svc for HTTP/3 advertisement
let alt_svc = route.action.udp.as_ref()
.and_then(|u| u.quic.as_ref())
.map(|q| {
let p = q.alt_svc_port.unwrap_or(port);
let ma = q.alt_svc_max_age.unwrap_or(86400);
format!("h3=\":{}\"; ma={}", p, ma)
})
.unwrap_or_else(|| format!("h3=\":{}\"; ma=86400", port));
h3_response = h3_response.header("alt-svc", alt_svc);
let h3_response = h3_response.body(())
.map_err(|e| anyhow::anyhow!("Failed to build H3 response: {}", e))?;
// Send response headers
stream.send_response(h3_response).await
.map_err(|e| anyhow::anyhow!("Failed to send H3 response: {}", e))?;
// Stream response body back
use http_body_util::BodyExt;
let mut body = response.into_body();
let mut total_bytes_out: u64 = 0;
while let Some(frame) = body.frame().await {
match frame {
Ok(frame) => {
if let Some(data) = frame.data_ref() {
total_bytes_out += data.len() as u64;
stream.send_data(Bytes::copy_from_slice(data)).await
.map_err(|e| anyhow::anyhow!("Failed to send H3 data: {}", e))?;
}
}
Err(e) => {
warn!("Backend body read error: {}", e);
break;
}
}
}
// Record metrics
let route_id = route.name.as_deref().or(route.id.as_deref());
metrics.record_bytes(total_bytes_in, total_bytes_out, route_id, Some(client_ip));
// Finish the stream
stream.finish().await
.map_err(|e| anyhow::anyhow!("Failed to finish H3 stream: {}", e))?;
Ok(())
}
/// Build an HTTP/1.1 backend request from the H3 frontend request.
fn build_backend_request<B>(
method: &hyper::Method,
backend_addr: &str,
path: &str,
host: &str,
original_request: &hyper::Request<()>,
body: B,
) -> anyhow::Result<hyper::Request<B>> {
let mut req = hyper::Request::builder()
.method(method)
.uri(format!("http://{}{}", backend_addr, path))
.header("host", host);
// Forward non-pseudo headers
for (name, value) in original_request.headers() {
let n = name.as_str();
if !n.starts_with(':') && n != "host" {
req = req.header(name, value);
}
}
req.body(body)
.map_err(|e| anyhow::anyhow!("Failed to build backend request: {}", e))
}
/// A streaming request body backed by an mpsc channel receiver.
///
/// Implements `http_body::Body` so hyper can poll chunks as they arrive
/// from the H3 client, avoiding buffering the entire request body in memory.
struct H3RequestBody {
receiver: tokio::sync::mpsc::Receiver<Bytes>,
}
impl http_body::Body for H3RequestBody {
type Data = Bytes;
type Error = hyper::Error;
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match self.receiver.poll_recv(cx) {
Poll::Ready(Some(data)) => Poll::Ready(Some(Ok(Frame::data(data)))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}

View File

@@ -3,13 +3,18 @@
//! Hyper-based HTTP proxy service for RustProxy.
//! Handles HTTP request parsing, route-based forwarding, and response filtering.
pub mod connection_pool;
pub mod counting_body;
pub mod protocol_cache;
pub mod proxy_service;
pub mod request_filter;
pub mod response_filter;
pub mod shutdown_on_drop;
pub mod template;
pub mod upstream_selector;
pub mod h3_service;
pub use connection_pool::*;
pub use counting_body::*;
pub use proxy_service::*;
pub use template::*;

View File

@@ -0,0 +1,141 @@
//! Bounded, TTL-based protocol detection cache for HTTP/2 auto-detection.
//!
//! Caches the ALPN-negotiated protocol (H1 or H2) per backend endpoint and requested
//! domain (host:port + requested_host). This prevents cache oscillation when multiple
//! frontend domains share the same backend but differ in HTTP/2 support.
use std::sync::Arc;
use std::time::{Duration, Instant};
use dashmap::DashMap;
use tracing::debug;
/// TTL for cached protocol detection results.
/// After this duration, the next request will re-probe the backend.
const PROTOCOL_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
/// Maximum number of entries in the protocol cache.
/// Prevents unbounded growth when backends come and go.
const PROTOCOL_CACHE_MAX_ENTRIES: usize = 4096;
/// Background cleanup interval for the protocol cache.
const PROTOCOL_CACHE_CLEANUP_INTERVAL: Duration = Duration::from_secs(60);
/// Detected backend protocol.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DetectedProtocol {
H1,
H2,
H3,
}
/// Key for the protocol cache: (host, port, requested_host).
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct ProtocolCacheKey {
pub host: String,
pub port: u16,
/// The incoming request's domain (Host header / :authority).
/// Distinguishes protocol detection when multiple domains share the same backend.
pub requested_host: Option<String>,
}
/// A cached protocol detection result with a timestamp.
struct CachedEntry {
protocol: DetectedProtocol,
detected_at: Instant,
}
/// Bounded, TTL-based protocol detection cache.
///
/// Memory safety guarantees:
/// - Hard cap at `PROTOCOL_CACHE_MAX_ENTRIES` — cannot grow unboundedly.
/// - TTL expiry — stale entries naturally age out on lookup.
/// - Background cleanup task — proactively removes expired entries every 60s.
/// - `clear()` — called on route updates to discard stale detections.
/// - `Drop` — aborts the background task to prevent dangling tokio tasks.
pub struct ProtocolCache {
cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>>,
cleanup_handle: Option<tokio::task::JoinHandle<()>>,
}
impl ProtocolCache {
/// Create a new protocol cache and start the background cleanup task.
pub fn new() -> Self {
let cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>> = Arc::new(DashMap::new());
let cache_clone = Arc::clone(&cache);
let cleanup_handle = tokio::spawn(async move {
Self::cleanup_loop(cache_clone).await;
});
Self {
cache,
cleanup_handle: Some(cleanup_handle),
}
}
/// Look up the cached protocol for a backend endpoint.
/// Returns `None` if not cached or expired (caller should probe via ALPN).
pub fn get(&self, key: &ProtocolCacheKey) -> Option<DetectedProtocol> {
let entry = self.cache.get(key)?;
if entry.detected_at.elapsed() < PROTOCOL_CACHE_TTL {
debug!("Protocol cache hit: {:?} for {}:{} (requested: {:?})", entry.protocol, key.host, key.port, key.requested_host);
Some(entry.protocol)
} else {
// Expired — remove and return None to trigger re-probe
drop(entry); // release DashMap ref before remove
self.cache.remove(key);
None
}
}
/// Insert a detected protocol into the cache.
/// If the cache is at capacity, evict the oldest entry first.
pub fn insert(&self, key: ProtocolCacheKey, protocol: DetectedProtocol) {
if self.cache.len() >= PROTOCOL_CACHE_MAX_ENTRIES && !self.cache.contains_key(&key) {
// Evict the oldest entry to stay within bounds
let oldest = self.cache.iter()
.min_by_key(|entry| entry.value().detected_at)
.map(|entry| entry.key().clone());
if let Some(oldest_key) = oldest {
self.cache.remove(&oldest_key);
}
}
self.cache.insert(key, CachedEntry {
protocol,
detected_at: Instant::now(),
});
}
/// Clear all entries. Called on route updates to discard stale detections.
pub fn clear(&self) {
self.cache.clear();
}
/// Background cleanup loop — removes expired entries every `PROTOCOL_CACHE_CLEANUP_INTERVAL`.
async fn cleanup_loop(cache: Arc<DashMap<ProtocolCacheKey, CachedEntry>>) {
let mut interval = tokio::time::interval(PROTOCOL_CACHE_CLEANUP_INTERVAL);
loop {
interval.tick().await;
let expired: Vec<ProtocolCacheKey> = cache.iter()
.filter(|entry| entry.value().detected_at.elapsed() >= PROTOCOL_CACHE_TTL)
.map(|entry| entry.key().clone())
.collect();
if !expired.is_empty() {
debug!("Protocol cache cleanup: removing {} expired entries", expired.len());
for key in expired {
cache.remove(&key);
}
}
}
}
}
impl Drop for ProtocolCache {
fn drop(&mut self) {
if let Some(handle) = self.cleanup_handle.take() {
handle.abort();
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,23 @@ pub struct ResponseFilter;
impl ResponseFilter {
/// Apply response headers from route config and CORS settings.
/// If a `RequestContext` is provided, template variables in header values will be expanded.
/// Also injects Alt-Svc header for routes with HTTP/3 enabled.
pub fn apply_headers(route: &RouteConfig, headers: &mut HeaderMap, req_ctx: Option<&RequestContext>) {
// Inject Alt-Svc for HTTP/3 advertisement if QUIC/HTTP3 is enabled on this route
if let Some(ref udp) = route.action.udp {
if let Some(ref quic) = udp.quic {
if quic.enable_http3.unwrap_or(false) {
let port = quic.alt_svc_port
.or_else(|| req_ctx.map(|c| c.port))
.unwrap_or(443);
let max_age = quic.alt_svc_max_age.unwrap_or(86400);
let alt_svc = format!("h3=\":{}\"; ma={}", port, max_age);
if let Ok(val) = HeaderValue::from_str(&alt_svc) {
headers.insert("alt-svc", val);
}
}
}
}
// Apply custom response headers from route config
if let Some(ref route_headers) = route.headers {
if let Some(ref response_headers) = route_headers.response {

View File

@@ -0,0 +1,102 @@
//! Wrapper that ensures TLS close_notify is sent when the stream is dropped.
//!
//! When hyper drops an HTTP connection (backend error, timeout, normal H2 close),
//! the underlying TLS stream is dropped WITHOUT `shutdown()`. tokio-rustls cannot
//! send `close_notify` in Drop (requires async). This wrapper tracks whether
//! `poll_shutdown` was called and, if not, spawns a background task to send it.
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
/// Wraps an AsyncRead+AsyncWrite stream and ensures `shutdown()` is called when
/// dropped, even if the caller (e.g. hyper) doesn't explicitly shut down.
///
/// This guarantees TLS `close_notify` is sent for TLS-wrapped streams, preventing
/// "GnuTLS recv error (-110): The TLS connection was non-properly terminated" errors.
pub struct ShutdownOnDrop<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> {
inner: Option<S>,
shutdown_called: bool,
}
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> ShutdownOnDrop<S> {
/// Create a new wrapper around the given stream.
pub fn new(stream: S) -> Self {
Self {
inner: Some(stream),
shutdown_called: false,
}
}
}
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> AsyncRead for ShutdownOnDrop<S> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> AsyncWrite for ShutdownOnDrop<S> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.inner.as_ref().unwrap().is_write_vectored()
}
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.get_mut().inner.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
let this = self.get_mut();
let result = Pin::new(this.inner.as_mut().unwrap()).poll_shutdown(cx);
if result.is_ready() {
this.shutdown_called = true;
}
result
}
}
impl<S: AsyncRead + AsyncWrite + Unpin + Send + 'static> Drop for ShutdownOnDrop<S> {
fn drop(&mut self) {
// If shutdown was already called (hyper closed properly), nothing to do.
// If not (hyper dropped without shutdown — e.g. H2 close, error, timeout),
// spawn a background task to send close_notify / TCP FIN.
if !self.shutdown_called {
if let Some(mut stream) = self.inner.take() {
tokio::spawn(async move {
let _ = tokio::time::timeout(
std::time::Duration::from_secs(2),
tokio::io::AsyncWriteExt::shutdown(&mut stream),
).await;
// stream is dropped here — all resources freed
});
}
}
}
}

View File

@@ -115,11 +115,27 @@ impl UpstreamSelector {
/// Record that a connection to the given host has ended.
pub fn connection_ended(&self, host: &str) {
if let Some(counter) = self.active_connections.get(host) {
let prev = counter.value().fetch_sub(1, Ordering::Relaxed);
// Guard against underflow (shouldn't happen, but be safe)
let prev = counter.value().load(Ordering::Relaxed);
if prev == 0 {
counter.value().store(0, Ordering::Relaxed);
// Already at zero — just clean up the entry
drop(counter);
self.active_connections.remove(host);
return;
}
counter.value().fetch_sub(1, Ordering::Relaxed);
// Clean up zero-count entries to prevent memory growth
if prev <= 1 {
drop(counter);
self.active_connections.remove(host);
}
}
}
/// Clear stale round-robin counters on route update.
/// Resetting is harmless — counters just restart cycling from index 0.
pub fn reset_round_robin(&self) {
if let Ok(mut counters) = self.round_robin.lock() {
counters.clear();
}
}
@@ -168,6 +184,7 @@ mod tests {
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
}
}
@@ -204,6 +221,31 @@ mod tests {
assert_eq!(r4.host, "a");
}
#[test]
fn test_connection_tracking_cleanup() {
let selector = UpstreamSelector::new();
selector.connection_started("backend:8080");
selector.connection_started("backend:8080");
assert_eq!(
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
2
);
selector.connection_ended("backend:8080");
assert_eq!(
selector.active_connections.get("backend:8080").unwrap().load(Ordering::Relaxed),
1
);
// Last connection ends — entry should be removed entirely
selector.connection_ended("backend:8080");
assert!(selector.active_connections.get("backend:8080").is_none());
// Ending on a non-existent key should not panic
selector.connection_ended("nonexistent:9999");
}
#[test]
fn test_ip_hash_consistent() {
let selector = UpstreamSelector::new();

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,10 @@
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Instant, SystemTime, UNIX_EPOCH};
/// A single throughput sample.
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ThroughputSample {
pub timestamp_ms: u64,
pub bytes_in: u64,
@@ -106,6 +108,27 @@ impl ThroughputTracker {
self.throughput(10)
}
/// Return the last N samples in chronological order (oldest first).
pub fn history(&self, window_seconds: usize) -> Vec<ThroughputSample> {
let window = window_seconds.min(self.count);
if window == 0 {
return Vec::new();
}
let mut result = Vec::with_capacity(window);
for i in 0..window {
let idx = if self.write_index >= i + 1 {
self.write_index - i - 1
} else {
self.capacity - (i + 1 - self.write_index)
};
if idx < self.samples.len() {
result.push(self.samples[idx]);
}
}
result.reverse(); // Return oldest-first (chronological)
result
}
/// How long this tracker has been alive.
pub fn uptime(&self) -> std::time::Duration {
self.created_at.elapsed()
@@ -170,4 +193,40 @@ mod tests {
std::thread::sleep(std::time::Duration::from_millis(10));
assert!(tracker.uptime().as_millis() >= 10);
}
#[test]
fn test_history_returns_chronological() {
let mut tracker = ThroughputTracker::new(60);
for i in 1..=5 {
tracker.record_bytes(i * 100, i * 200);
tracker.sample();
}
let history = tracker.history(5);
assert_eq!(history.len(), 5);
// First sample should have 100 bytes_in, last should have 500
assert_eq!(history[0].bytes_in, 100);
assert_eq!(history[4].bytes_in, 500);
}
#[test]
fn test_history_wraps_around() {
let mut tracker = ThroughputTracker::new(3); // Small capacity
for i in 1..=5 {
tracker.record_bytes(i * 100, i * 200);
tracker.sample();
}
// Only last 3 should be retained
let history = tracker.history(10); // Ask for more than available
assert_eq!(history.len(), 3);
assert_eq!(history[0].bytes_in, 300);
assert_eq!(history[1].bytes_in, 400);
assert_eq!(history[2].bytes_in, 500);
}
#[test]
fn test_history_empty() {
let tracker = ThroughputTracker::new(60);
let history = tracker.history(10);
assert!(history.is_empty());
}
}

View File

@@ -9,34 +9,36 @@ pub fn build_dnat_rule(
target_port: u16,
options: &NfTablesOptions,
) -> Vec<String> {
let protocol = match options.protocol.as_ref().unwrap_or(&NfTablesProtocol::Tcp) {
NfTablesProtocol::Tcp => "tcp",
NfTablesProtocol::Udp => "udp",
NfTablesProtocol::All => "tcp", // TODO: handle "all"
let protocols: Vec<&str> = match options.protocol.as_ref().unwrap_or(&NfTablesProtocol::Tcp) {
NfTablesProtocol::Tcp => vec!["tcp"],
NfTablesProtocol::Udp => vec!["udp"],
NfTablesProtocol::All => vec!["tcp", "udp"],
};
let mut rules = Vec::new();
// DNAT rule
rules.push(format!(
"nft add rule ip {} {} {} dport {} dnat to {}:{}",
table_name, chain_name, protocol, source_port, target_host, target_port,
));
// SNAT rule if preserving source IP is not enabled
if !options.preserve_source_ip.unwrap_or(false) {
for protocol in &protocols {
// DNAT rule
rules.push(format!(
"nft add rule ip {} postrouting {} dport {} masquerade",
table_name, protocol, target_port,
"nft add rule ip {} {} {} dport {} dnat to {}:{}",
table_name, chain_name, protocol, source_port, target_host, target_port,
));
}
// Rate limiting
if let Some(max_rate) = &options.max_rate {
rules.push(format!(
"nft add rule ip {} {} {} dport {} limit rate {} accept",
table_name, chain_name, protocol, source_port, max_rate,
));
// SNAT rule if preserving source IP is not enabled
if !options.preserve_source_ip.unwrap_or(false) {
rules.push(format!(
"nft add rule ip {} postrouting {} dport {} masquerade",
table_name, protocol, target_port,
));
}
// Rate limiting
if let Some(max_rate) = &options.max_rate {
rules.push(format!(
"nft add rule ip {} {} {} dport {} limit rate {} accept",
table_name, chain_name, protocol, source_port, max_rate,
));
}
}
rules
@@ -120,4 +122,25 @@ mod tests {
assert_eq!(commands.len(), 1);
assert!(commands[0].contains("delete table ip rustproxy"));
}
#[test]
fn test_protocol_all_generates_tcp_and_udp_rules() {
let mut options = make_options();
options.protocol = Some(NfTablesProtocol::All);
let rules = build_dnat_rule("rustproxy", "prerouting", 53, "10.0.0.53", 53, &options);
// Should have TCP DNAT + masquerade + UDP DNAT + masquerade = 4 rules
assert_eq!(rules.len(), 4);
assert!(rules.iter().any(|r| r.contains("tcp dport 53 dnat")));
assert!(rules.iter().any(|r| r.contains("udp dport 53 dnat")));
assert!(rules.iter().filter(|r| r.contains("masquerade")).count() == 2);
}
#[test]
fn test_protocol_udp() {
let mut options = make_options();
options.protocol = Some(NfTablesProtocol::Udp);
let rules = build_dnat_rule("rustproxy", "prerouting", 53, "10.0.0.53", 53, &options);
assert!(rules.iter().all(|r| !r.contains("tcp")));
assert!(rules.iter().any(|r| r.contains("udp dport 53 dnat")));
}
}

View File

@@ -23,3 +23,7 @@ rustls-pemfile = { workspace = true }
tokio-util = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
socket2 = { workspace = true }
quinn = { workspace = true }
rcgen = { workspace = true }
base64 = { workspace = true }

View File

@@ -1,155 +0,0 @@
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::time::{Duration, Instant};
/// Per-connection tracking record with atomics for lock-free updates.
///
/// Each field uses atomics so that the forwarding tasks can update
/// bytes_received / bytes_sent / last_activity without holding any lock,
/// while the zombie scanner reads them concurrently.
pub struct ConnectionRecord {
/// Unique connection ID assigned by the ConnectionTracker.
pub id: u64,
/// Wall-clock instant when this connection was created.
pub created_at: Instant,
/// Milliseconds since `created_at` when the last activity occurred.
/// Updated atomically by the forwarding loops.
pub last_activity: AtomicU64,
/// Total bytes received from the client (inbound).
pub bytes_received: AtomicU64,
/// Total bytes sent to the client (outbound / from backend).
pub bytes_sent: AtomicU64,
/// True once the client side of the connection has closed.
pub client_closed: AtomicBool,
/// True once the backend side of the connection has closed.
pub backend_closed: AtomicBool,
/// Whether this connection uses TLS (affects zombie thresholds).
pub is_tls: AtomicBool,
/// Whether this connection has keep-alive semantics.
pub has_keep_alive: AtomicBool,
}
impl ConnectionRecord {
/// Create a new connection record with the given ID.
/// All counters start at zero, all flags start as false.
pub fn new(id: u64) -> Self {
Self {
id,
created_at: Instant::now(),
last_activity: AtomicU64::new(0),
bytes_received: AtomicU64::new(0),
bytes_sent: AtomicU64::new(0),
client_closed: AtomicBool::new(false),
backend_closed: AtomicBool::new(false),
is_tls: AtomicBool::new(false),
has_keep_alive: AtomicBool::new(false),
}
}
/// Update `last_activity` to reflect the current elapsed time.
pub fn touch(&self) {
let elapsed_ms = self.created_at.elapsed().as_millis() as u64;
self.last_activity.store(elapsed_ms, Ordering::Relaxed);
}
/// Record `n` bytes received from the client (inbound).
pub fn record_bytes_in(&self, n: u64) {
self.bytes_received.fetch_add(n, Ordering::Relaxed);
self.touch();
}
/// Record `n` bytes sent to the client (outbound / from backend).
pub fn record_bytes_out(&self, n: u64) {
self.bytes_sent.fetch_add(n, Ordering::Relaxed);
self.touch();
}
/// How long since the last activity on this connection.
pub fn idle_duration(&self) -> Duration {
let last_ms = self.last_activity.load(Ordering::Relaxed);
let age_ms = self.created_at.elapsed().as_millis() as u64;
Duration::from_millis(age_ms.saturating_sub(last_ms))
}
/// Total age of this connection (time since creation).
pub fn age(&self) -> Duration {
self.created_at.elapsed()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_new_record() {
let record = ConnectionRecord::new(42);
assert_eq!(record.id, 42);
assert_eq!(record.bytes_received.load(Ordering::Relaxed), 0);
assert_eq!(record.bytes_sent.load(Ordering::Relaxed), 0);
assert!(!record.client_closed.load(Ordering::Relaxed));
assert!(!record.backend_closed.load(Ordering::Relaxed));
assert!(!record.is_tls.load(Ordering::Relaxed));
assert!(!record.has_keep_alive.load(Ordering::Relaxed));
}
#[test]
fn test_record_bytes() {
let record = ConnectionRecord::new(1);
record.record_bytes_in(100);
record.record_bytes_in(200);
assert_eq!(record.bytes_received.load(Ordering::Relaxed), 300);
record.record_bytes_out(50);
record.record_bytes_out(75);
assert_eq!(record.bytes_sent.load(Ordering::Relaxed), 125);
}
#[test]
fn test_touch_updates_activity() {
let record = ConnectionRecord::new(1);
assert_eq!(record.last_activity.load(Ordering::Relaxed), 0);
// Sleep briefly so elapsed time is nonzero
thread::sleep(Duration::from_millis(10));
record.touch();
let activity = record.last_activity.load(Ordering::Relaxed);
assert!(activity >= 10, "last_activity should be at least 10ms, got {}", activity);
}
#[test]
fn test_idle_duration() {
let record = ConnectionRecord::new(1);
// Initially idle_duration ~ age since last_activity is 0
thread::sleep(Duration::from_millis(20));
let idle = record.idle_duration();
assert!(idle >= Duration::from_millis(20));
// After touch, idle should be near zero
record.touch();
let idle = record.idle_duration();
assert!(idle < Duration::from_millis(10));
}
#[test]
fn test_age() {
let record = ConnectionRecord::new(1);
thread::sleep(Duration::from_millis(20));
let age = record.age();
assert!(age >= Duration::from_millis(20));
}
#[test]
fn test_flags() {
let record = ConnectionRecord::new(1);
record.client_closed.store(true, Ordering::Relaxed);
record.is_tls.store(true, Ordering::Relaxed);
record.has_keep_alive.store(true, Ordering::Relaxed);
assert!(record.client_closed.load(Ordering::Relaxed));
assert!(!record.backend_closed.load(Ordering::Relaxed));
assert!(record.is_tls.load(Ordering::Relaxed));
assert!(record.has_keep_alive.load(Ordering::Relaxed));
}
}

View File

@@ -2,24 +2,9 @@ use dashmap::DashMap;
use std::collections::VecDeque;
use std::net::IpAddr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio_util::sync::CancellationToken;
use tracing::{debug, warn};
use super::connection_record::ConnectionRecord;
/// Thresholds for zombie detection (non-TLS connections).
const HALF_ZOMBIE_TIMEOUT_PLAIN: Duration = Duration::from_secs(30);
/// Thresholds for zombie detection (TLS connections).
const HALF_ZOMBIE_TIMEOUT_TLS: Duration = Duration::from_secs(300);
/// Stuck connection timeout (non-TLS): received data but never sent any.
const STUCK_TIMEOUT_PLAIN: Duration = Duration::from_secs(60);
/// Stuck connection timeout (TLS): received data but never sent any.
const STUCK_TIMEOUT_TLS: Duration = Duration::from_secs(300);
/// Tracks active connections per IP and enforces per-IP limits and rate limiting.
/// Also maintains per-connection records for zombie detection.
pub struct ConnectionTracker {
/// Active connection counts per IP
active: DashMap<IpAddr, AtomicU64>,
@@ -29,10 +14,6 @@ pub struct ConnectionTracker {
max_per_ip: Option<u64>,
/// Maximum new connections per minute per IP (None = unlimited)
rate_limit_per_minute: Option<u64>,
/// Per-connection tracking records for zombie detection
connections: DashMap<u64, Arc<ConnectionRecord>>,
/// Monotonically increasing connection ID counter
next_id: AtomicU64,
}
impl ConnectionTracker {
@@ -42,8 +23,6 @@ impl ConnectionTracker {
timestamps: DashMap::new(),
max_per_ip,
rate_limit_per_minute,
connections: DashMap::new(),
next_id: AtomicU64::new(1),
}
}
@@ -95,10 +74,11 @@ impl ConnectionTracker {
pub fn connection_closed(&self, ip: &IpAddr) {
if let Some(counter) = self.active.get(ip) {
let prev = counter.value().fetch_sub(1, Ordering::Relaxed);
// Clean up zero entries
// Clean up zero entries to prevent memory growth
if prev <= 1 {
drop(counter);
self.active.remove(ip);
self.timestamps.remove(ip);
}
}
}
@@ -111,115 +91,27 @@ impl ConnectionTracker {
.unwrap_or(0)
}
/// Prune stale timestamp entries for IPs that have no active connections
/// and no recent timestamps. This cleans up entries left by rate-limited IPs
/// that never had connection_opened called.
pub fn cleanup_stale_timestamps(&self) {
if self.rate_limit_per_minute.is_none() {
return; // No rate limiting — timestamps map should be empty
}
let now = Instant::now();
let one_minute = Duration::from_secs(60);
self.timestamps.retain(|ip, timestamps| {
timestamps.retain(|t| now.duration_since(*t) < one_minute);
// Keep if there are active connections or recent timestamps
!timestamps.is_empty() || self.active.contains_key(ip)
});
}
/// Get the total number of tracked IPs.
pub fn tracked_ips(&self) -> usize {
self.active.len()
}
/// Register a new connection and return its tracking record.
///
/// The returned `Arc<ConnectionRecord>` should be passed to the forwarding
/// loop so it can update bytes / activity atomics in real time.
pub fn register_connection(&self, is_tls: bool) -> Arc<ConnectionRecord> {
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
let record = Arc::new(ConnectionRecord::new(id));
record.is_tls.store(is_tls, Ordering::Relaxed);
self.connections.insert(id, Arc::clone(&record));
record
}
/// Remove a connection record when the connection is fully closed.
pub fn unregister_connection(&self, id: u64) {
self.connections.remove(&id);
}
/// Scan all tracked connections and return IDs of zombie connections.
///
/// A connection is considered a zombie in any of these cases:
/// - **Full zombie**: both `client_closed` and `backend_closed` are true.
/// - **Half zombie**: one side closed for longer than the threshold
/// (5 min for TLS, 30s for non-TLS).
/// - **Stuck**: `bytes_received > 0` but `bytes_sent == 0` for longer
/// than the stuck threshold (5 min for TLS, 60s for non-TLS).
pub fn scan_zombies(&self) -> Vec<u64> {
let mut zombies = Vec::new();
for entry in self.connections.iter() {
let record = entry.value();
let id = *entry.key();
let is_tls = record.is_tls.load(Ordering::Relaxed);
let client_closed = record.client_closed.load(Ordering::Relaxed);
let backend_closed = record.backend_closed.load(Ordering::Relaxed);
let idle = record.idle_duration();
let bytes_in = record.bytes_received.load(Ordering::Relaxed);
let bytes_out = record.bytes_sent.load(Ordering::Relaxed);
// Full zombie: both sides closed
if client_closed && backend_closed {
zombies.push(id);
continue;
}
// Half zombie: one side closed for too long
let half_timeout = if is_tls {
HALF_ZOMBIE_TIMEOUT_TLS
} else {
HALF_ZOMBIE_TIMEOUT_PLAIN
};
if (client_closed || backend_closed) && idle >= half_timeout {
zombies.push(id);
continue;
}
// Stuck: received data but never sent anything for too long
let stuck_timeout = if is_tls {
STUCK_TIMEOUT_TLS
} else {
STUCK_TIMEOUT_PLAIN
};
if bytes_in > 0 && bytes_out == 0 && idle >= stuck_timeout {
zombies.push(id);
}
}
zombies
}
/// Start a background task that periodically scans for zombie connections.
///
/// The scanner runs every 10 seconds and logs any zombies it finds.
/// It stops when the provided `CancellationToken` is cancelled.
pub fn start_zombie_scanner(self: &Arc<Self>, cancel: CancellationToken) {
let tracker = Arc::clone(self);
tokio::spawn(async move {
let interval = Duration::from_secs(10);
loop {
tokio::select! {
_ = cancel.cancelled() => {
debug!("Zombie scanner shutting down");
break;
}
_ = tokio::time::sleep(interval) => {
let zombies = tracker.scan_zombies();
if !zombies.is_empty() {
warn!(
"Detected {} zombie connection(s): {:?}",
zombies.len(),
zombies
);
}
}
}
}
});
}
/// Get the total number of tracked connections (with records).
pub fn total_connections(&self) -> usize {
self.connections.len()
}
}
#[cfg(test)]
@@ -305,98 +197,51 @@ mod tests {
}
#[test]
fn test_register_unregister_connection() {
let tracker = ConnectionTracker::new(None, None);
assert_eq!(tracker.total_connections(), 0);
fn test_timestamps_cleaned_on_last_close() {
let tracker = ConnectionTracker::new(None, Some(100));
let ip: IpAddr = "10.0.0.1".parse().unwrap();
let record1 = tracker.register_connection(false);
assert_eq!(tracker.total_connections(), 1);
assert!(!record1.is_tls.load(Ordering::Relaxed));
// try_accept populates the timestamps map (when rate limiting is enabled)
assert!(tracker.try_accept(&ip));
tracker.connection_opened(&ip);
assert!(tracker.try_accept(&ip));
tracker.connection_opened(&ip);
let record2 = tracker.register_connection(true);
assert_eq!(tracker.total_connections(), 2);
assert!(record2.is_tls.load(Ordering::Relaxed));
// Timestamps should exist
assert!(tracker.timestamps.get(&ip).is_some());
// IDs should be unique
assert_ne!(record1.id, record2.id);
// Close one connection — timestamps should still exist
tracker.connection_closed(&ip);
assert!(tracker.timestamps.get(&ip).is_some());
tracker.unregister_connection(record1.id);
assert_eq!(tracker.total_connections(), 1);
tracker.unregister_connection(record2.id);
assert_eq!(tracker.total_connections(), 0);
// Close last connection — timestamps should be cleaned up
tracker.connection_closed(&ip);
assert!(tracker.timestamps.get(&ip).is_none());
assert!(tracker.active.get(&ip).is_none());
}
#[test]
fn test_full_zombie_detection() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
fn test_cleanup_stale_timestamps() {
// Rate limit of 100/min so timestamps are tracked
let tracker = ConnectionTracker::new(None, Some(100));
let ip: IpAddr = "10.0.0.1".parse().unwrap();
// Not a zombie initially
assert!(tracker.scan_zombies().is_empty());
// try_accept adds a timestamp entry
assert!(tracker.try_accept(&ip));
// Set both sides closed -> full zombie
record.client_closed.store(true, Ordering::Relaxed);
record.backend_closed.store(true, Ordering::Relaxed);
// Simulate: connection was rate-limited and never accepted,
// so no connection_opened / connection_closed pair
assert!(tracker.timestamps.get(&ip).is_some());
assert!(tracker.active.get(&ip).is_none()); // never opened
let zombies = tracker.scan_zombies();
assert_eq!(zombies.len(), 1);
assert_eq!(zombies[0], record.id);
}
// Cleanup won't remove it yet because timestamp is recent
tracker.cleanup_stale_timestamps();
assert!(tracker.timestamps.get(&ip).is_some());
#[test]
fn test_half_zombie_not_triggered_immediately() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
record.touch(); // mark activity now
// Only one side closed, but just now -> not a zombie yet
record.client_closed.store(true, Ordering::Relaxed);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_stuck_connection_not_triggered_immediately() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
record.touch(); // mark activity now
// Has received data but sent nothing -> but just started, not stuck yet
record.bytes_received.store(1000, Ordering::Relaxed);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_unregister_removes_from_zombie_scan() {
let tracker = ConnectionTracker::new(None, None);
let record = tracker.register_connection(false);
let id = record.id;
// Make it a full zombie
record.client_closed.store(true, Ordering::Relaxed);
record.backend_closed.store(true, Ordering::Relaxed);
assert_eq!(tracker.scan_zombies().len(), 1);
// Unregister should remove it
tracker.unregister_connection(id);
assert!(tracker.scan_zombies().is_empty());
}
#[test]
fn test_total_connections() {
let tracker = ConnectionTracker::new(None, None);
assert_eq!(tracker.total_connections(), 0);
let r1 = tracker.register_connection(false);
let r2 = tracker.register_connection(true);
let r3 = tracker.register_connection(false);
assert_eq!(tracker.total_connections(), 3);
tracker.unregister_connection(r2.id);
assert_eq!(tracker.total_connections(), 2);
tracker.unregister_connection(r1.id);
tracker.unregister_connection(r3.id);
assert_eq!(tracker.total_connections(), 0);
// After expiry (use 0-second window trick: create tracker with 0 rate)
// Actually, we can't fast-forward time easily, so just verify the cleanup
// doesn't panic and handles the no-rate-limit case
let tracker2 = ConnectionTracker::new(None, None);
tracker2.cleanup_stale_timestamps(); // should be a no-op
}
}

View File

@@ -7,6 +7,14 @@ use tracing::debug;
use rustproxy_metrics::MetricsCollector;
/// Context for forwarding metrics, replacing the growing tuple pattern.
#[derive(Clone)]
pub struct ForwardMetricsCtx {
pub collector: Arc<MetricsCollector>,
pub route_id: Option<String>,
pub source_ip: Option<String>,
}
/// Perform bidirectional TCP forwarding between client and backend.
///
/// This is the core data path for passthrough connections.
@@ -73,13 +81,13 @@ pub async fn forward_bidirectional_with_timeouts(
inactivity_timeout: std::time::Duration,
max_lifetime: std::time::Duration,
cancel: CancellationToken,
metrics: Option<(Arc<MetricsCollector>, Option<String>)>,
metrics: Option<ForwardMetricsCtx>,
) -> std::io::Result<(u64, u64)> {
// Send initial data (peeked bytes) to backend
if let Some(data) = initial_data {
backend.write_all(data).await?;
if let Some((ref m, ref rid)) = metrics {
m.record_bytes(data.len() as u64, 0, rid.as_deref());
if let Some(ref ctx) = metrics {
ctx.collector.record_bytes(data.len() as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
@@ -89,57 +97,80 @@ pub async fn forward_bidirectional_with_timeouts(
let last_activity = Arc::new(AtomicU64::new(0));
let start = std::time::Instant::now();
// Per-connection cancellation token: the watchdog cancels this instead of
// aborting tasks, so the copy loops can shut down gracefully (TCP FIN instead
// of RST, TLS close_notify if the stream is TLS-wrapped).
let conn_cancel = CancellationToken::new();
let la1 = Arc::clone(&last_activity);
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
let metrics_c2b = metrics.clone();
let cc1 = conn_cancel.clone();
let c2b = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = initial_len;
loop {
let n = match client_read.read(&mut buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
let n = tokio::select! {
result = client_read.read(&mut buf) => match result {
Ok(0) | Err(_) => break,
Ok(n) => n,
},
_ = cc1.cancelled() => break,
};
if backend_write.write_all(&buf[..n]).await.is_err() {
break;
}
total += n as u64;
la1.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some((ref m, ref rid)) = metrics_c2b {
m.record_bytes(n as u64, 0, rid.as_deref());
if let Some(ref ctx) = metrics_c2b {
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = backend_write.shutdown().await;
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
let _ = tokio::time::timeout(
std::time::Duration::from_secs(2),
backend_write.shutdown(),
).await;
total
});
let la2 = Arc::clone(&last_activity);
let metrics_b2c = metrics;
let cc2 = conn_cancel.clone();
let b2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
loop {
let n = match backend_read.read(&mut buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
let n = tokio::select! {
result = backend_read.read(&mut buf) => match result {
Ok(0) | Err(_) => break,
Ok(n) => n,
},
_ = cc2.cancelled() => break,
};
if client_write.write_all(&buf[..n]).await.is_err() {
break;
}
total += n as u64;
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some((ref m, ref rid)) = metrics_b2c {
m.record_bytes(0, n as u64, rid.as_deref());
if let Some(ref ctx) = metrics_b2c {
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = client_write.shutdown().await;
// Graceful shutdown with timeout (sends TCP FIN / TLS close_notify)
let _ = tokio::time::timeout(
std::time::Duration::from_secs(2),
client_write.shutdown(),
).await;
total
});
// Watchdog: inactivity, max lifetime, and cancellation
// Watchdog: inactivity, max lifetime, and cancellation.
// First cancels the per-connection token for graceful shutdown (FIN/close_notify),
// then falls back to abort if the tasks are stuck (e.g. on a blocked write_all).
let la_watch = Arc::clone(&last_activity);
let c2b_handle = c2b.abort_handle();
let b2c_handle = b2c.abort_handle();
let c2b_abort = c2b.abort_handle();
let b2c_abort = b2c.abort_handle();
let watchdog = tokio::spawn(async move {
let check_interval = std::time::Duration::from_secs(5);
let mut last_seen = 0u64;
@@ -147,16 +178,12 @@ pub async fn forward_bidirectional_with_timeouts(
tokio::select! {
_ = cancel.cancelled() => {
debug!("Connection cancelled by shutdown");
c2b_handle.abort();
b2c_handle.abort();
break;
}
_ = tokio::time::sleep(check_interval) => {
// Check max lifetime
if start.elapsed() >= max_lifetime {
debug!("Connection exceeded max lifetime, closing");
c2b_handle.abort();
b2c_handle.abort();
break;
}
@@ -166,8 +193,6 @@ pub async fn forward_bidirectional_with_timeouts(
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
c2b_handle.abort();
b2c_handle.abort();
break;
}
}
@@ -175,6 +200,13 @@ pub async fn forward_bidirectional_with_timeouts(
}
}
}
// Phase 1: Signal copy loops to exit gracefully (allows FIN/close_notify)
conn_cancel.cancel();
// Phase 2: Wait for graceful shutdown (2s shutdown timeout + 2s margin)
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
// Phase 3: Force-abort if still stuck (e.g. blocked on write_all)
c2b_abort.abort();
b2c_abort.abort();
});
let bytes_in = c2b.await.unwrap_or(0);
@@ -182,4 +214,3 @@ pub async fn forward_bidirectional_with_timeouts(
watchdog.abort();
Ok((bytes_in, bytes_out))
}

View File

@@ -1,22 +1,29 @@
//! # rustproxy-passthrough
//!
//! Raw TCP/SNI passthrough engine for RustProxy.
//! Handles TCP listening, TLS ClientHello SNI extraction, and bidirectional forwarding.
//! Raw TCP/SNI passthrough engine and UDP listener for RustProxy.
//! Handles TCP listening, TLS ClientHello SNI extraction, bidirectional forwarding,
//! and UDP datagram session tracking with forwarding.
pub mod tcp_listener;
pub mod sni_parser;
pub mod forwarder;
pub mod proxy_protocol;
pub mod tls_handler;
pub mod connection_record;
pub mod connection_tracker;
pub mod socket_relay;
pub mod socket_opts;
pub mod udp_session;
pub mod udp_listener;
pub mod quic_handler;
pub use tcp_listener::*;
pub use sni_parser::*;
pub use forwarder::*;
pub use proxy_protocol::*;
pub use tls_handler::*;
pub use connection_record::*;
pub use connection_tracker::*;
pub use socket_relay::*;
pub use socket_opts::*;
pub use udp_session::*;
pub use udp_listener::*;
pub use quic_handler::*;

View File

@@ -1,4 +1,4 @@
use std::net::SocketAddr;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use thiserror::Error;
#[derive(Debug, Error)]
@@ -9,9 +9,11 @@ pub enum ProxyProtocolError {
UnsupportedVersion,
#[error("Parse error: {0}")]
Parse(String),
#[error("Incomplete header: need {0} bytes, got {1}")]
Incomplete(usize, usize),
}
/// Parsed PROXY protocol v1 header.
/// Parsed PROXY protocol header (v1 or v2).
#[derive(Debug, Clone)]
pub struct ProxyProtocolHeader {
pub source_addr: SocketAddr,
@@ -24,14 +26,29 @@ pub struct ProxyProtocolHeader {
pub enum ProxyProtocol {
Tcp4,
Tcp6,
Udp4,
Udp6,
Unknown,
}
/// Transport type for PROXY v2 header generation.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ProxyV2Transport {
Stream, // TCP
Datagram, // UDP
}
/// PROXY protocol v2 signature (12 bytes).
const PROXY_V2_SIGNATURE: [u8; 12] = [
0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A,
];
// ===== v1 (text format) =====
/// Parse a PROXY protocol v1 header from data.
///
/// Format: `PROXY TCP4 <src_ip> <dst_ip> <src_port> <dst_port>\r\n`
pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtocolError> {
// Find the end of the header line
let line_end = data
.windows(2)
.position(|w| w == b"\r\n")
@@ -56,10 +73,10 @@ pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtoc
_ => return Err(ProxyProtocolError::UnsupportedVersion),
};
let src_ip: std::net::IpAddr = parts[2]
let src_ip: IpAddr = parts[2]
.parse()
.map_err(|_| ProxyProtocolError::Parse("Invalid source IP".to_string()))?;
let dst_ip: std::net::IpAddr = parts[3]
let dst_ip: IpAddr = parts[3]
.parse()
.map_err(|_| ProxyProtocolError::Parse("Invalid destination IP".to_string()))?;
let src_port: u16 = parts[4]
@@ -75,7 +92,6 @@ pub fn parse_v1(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtoc
protocol,
};
// Consumed bytes = line + \r\n
Ok((header, line_end + 2))
}
@@ -97,10 +113,219 @@ pub fn is_proxy_protocol_v1(data: &[u8]) -> bool {
data.starts_with(b"PROXY ")
}
// ===== v2 (binary format) =====
/// Check if data starts with a PROXY protocol v2 header.
pub fn is_proxy_protocol_v2(data: &[u8]) -> bool {
data.len() >= 12 && data[..12] == PROXY_V2_SIGNATURE
}
/// Parse a PROXY protocol v2 binary header.
///
/// Binary format:
/// - [0..12] signature (12 bytes)
/// - [12] version (high nibble) + command (low nibble)
/// - [13] address family (high nibble) + transport (low nibble)
/// - [14..16] address block length (big-endian u16)
/// - [16..] address block (variable, depends on family)
pub fn parse_v2(data: &[u8]) -> Result<(ProxyProtocolHeader, usize), ProxyProtocolError> {
if data.len() < 16 {
return Err(ProxyProtocolError::Incomplete(16, data.len()));
}
// Validate signature
if data[..12] != PROXY_V2_SIGNATURE {
return Err(ProxyProtocolError::InvalidHeader);
}
// Version (high nibble of byte 12) must be 0x2
let version = (data[12] >> 4) & 0x0F;
if version != 2 {
return Err(ProxyProtocolError::UnsupportedVersion);
}
// Command (low nibble of byte 12)
let command = data[12] & 0x0F;
// 0x0 = LOCAL, 0x1 = PROXY
if command > 1 {
return Err(ProxyProtocolError::Parse(format!("Unknown command: {}", command)));
}
// Address family (high nibble) + transport (low nibble) of byte 13
let family = (data[13] >> 4) & 0x0F;
let transport = data[13] & 0x0F;
// Address block length
let addr_len = u16::from_be_bytes([data[14], data[15]]) as usize;
let total_len = 16 + addr_len;
if data.len() < total_len {
return Err(ProxyProtocolError::Incomplete(total_len, data.len()));
}
// LOCAL command: no real addresses, return unspecified
if command == 0 {
return Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
dest_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
protocol: ProxyProtocol::Unknown,
},
total_len,
));
}
// PROXY command: parse addresses based on family + transport
let addr_block = &data[16..16 + addr_len];
match (family, transport) {
// AF_INET (0x1) + STREAM (0x1) = TCP4
(0x1, 0x1) => {
if addr_len < 12 {
return Err(ProxyProtocolError::Parse("IPv4 address block too short".to_string()));
}
let src_ip = Ipv4Addr::new(addr_block[0], addr_block[1], addr_block[2], addr_block[3]);
let dst_ip = Ipv4Addr::new(addr_block[4], addr_block[5], addr_block[6], addr_block[7]);
let src_port = u16::from_be_bytes([addr_block[8], addr_block[9]]);
let dst_port = u16::from_be_bytes([addr_block[10], addr_block[11]]);
Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V4(src_ip), src_port),
dest_addr: SocketAddr::new(IpAddr::V4(dst_ip), dst_port),
protocol: ProxyProtocol::Tcp4,
},
total_len,
))
}
// AF_INET (0x1) + DGRAM (0x2) = UDP4
(0x1, 0x2) => {
if addr_len < 12 {
return Err(ProxyProtocolError::Parse("IPv4 address block too short".to_string()));
}
let src_ip = Ipv4Addr::new(addr_block[0], addr_block[1], addr_block[2], addr_block[3]);
let dst_ip = Ipv4Addr::new(addr_block[4], addr_block[5], addr_block[6], addr_block[7]);
let src_port = u16::from_be_bytes([addr_block[8], addr_block[9]]);
let dst_port = u16::from_be_bytes([addr_block[10], addr_block[11]]);
Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V4(src_ip), src_port),
dest_addr: SocketAddr::new(IpAddr::V4(dst_ip), dst_port),
protocol: ProxyProtocol::Udp4,
},
total_len,
))
}
// AF_INET6 (0x2) + STREAM (0x1) = TCP6
(0x2, 0x1) => {
if addr_len < 36 {
return Err(ProxyProtocolError::Parse("IPv6 address block too short".to_string()));
}
let src_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[0..16]).unwrap());
let dst_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[16..32]).unwrap());
let src_port = u16::from_be_bytes([addr_block[32], addr_block[33]]);
let dst_port = u16::from_be_bytes([addr_block[34], addr_block[35]]);
Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V6(src_ip), src_port),
dest_addr: SocketAddr::new(IpAddr::V6(dst_ip), dst_port),
protocol: ProxyProtocol::Tcp6,
},
total_len,
))
}
// AF_INET6 (0x2) + DGRAM (0x2) = UDP6
(0x2, 0x2) => {
if addr_len < 36 {
return Err(ProxyProtocolError::Parse("IPv6 address block too short".to_string()));
}
let src_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[0..16]).unwrap());
let dst_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&addr_block[16..32]).unwrap());
let src_port = u16::from_be_bytes([addr_block[32], addr_block[33]]);
let dst_port = u16::from_be_bytes([addr_block[34], addr_block[35]]);
Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V6(src_ip), src_port),
dest_addr: SocketAddr::new(IpAddr::V6(dst_ip), dst_port),
protocol: ProxyProtocol::Udp6,
},
total_len,
))
}
// AF_UNSPEC or unknown
(0x0, _) => Ok((
ProxyProtocolHeader {
source_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
dest_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
protocol: ProxyProtocol::Unknown,
},
total_len,
)),
_ => Err(ProxyProtocolError::Parse(format!(
"Unsupported family/transport: 0x{:X}{:X}",
family, transport
))),
}
}
/// Generate a PROXY protocol v2 binary header.
pub fn generate_v2(
source: &SocketAddr,
dest: &SocketAddr,
transport: ProxyV2Transport,
) -> Vec<u8> {
let transport_nibble: u8 = match transport {
ProxyV2Transport::Stream => 0x1,
ProxyV2Transport::Datagram => 0x2,
};
match (source.ip(), dest.ip()) {
(IpAddr::V4(src_ip), IpAddr::V4(dst_ip)) => {
let mut buf = Vec::with_capacity(28);
buf.extend_from_slice(&PROXY_V2_SIGNATURE);
buf.push(0x21); // version 2, PROXY command
buf.push(0x10 | transport_nibble); // AF_INET + transport
buf.extend_from_slice(&12u16.to_be_bytes()); // addr block length
buf.extend_from_slice(&src_ip.octets());
buf.extend_from_slice(&dst_ip.octets());
buf.extend_from_slice(&source.port().to_be_bytes());
buf.extend_from_slice(&dest.port().to_be_bytes());
buf
}
(IpAddr::V6(src_ip), IpAddr::V6(dst_ip)) => {
let mut buf = Vec::with_capacity(52);
buf.extend_from_slice(&PROXY_V2_SIGNATURE);
buf.push(0x21); // version 2, PROXY command
buf.push(0x20 | transport_nibble); // AF_INET6 + transport
buf.extend_from_slice(&36u16.to_be_bytes()); // addr block length
buf.extend_from_slice(&src_ip.octets());
buf.extend_from_slice(&dst_ip.octets());
buf.extend_from_slice(&source.port().to_be_bytes());
buf.extend_from_slice(&dest.port().to_be_bytes());
buf
}
// Mixed IPv4/IPv6: map IPv4 to IPv6-mapped address
_ => {
let src_v6 = match source.ip() {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
let dst_v6 = match dest.ip() {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
let src6 = SocketAddr::new(IpAddr::V6(src_v6), source.port());
let dst6 = SocketAddr::new(IpAddr::V6(dst_v6), dest.port());
generate_v2(&src6, &dst6, transport)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// ===== v1 tests =====
#[test]
fn test_parse_v1_tcp4() {
let header = b"PROXY TCP4 192.168.1.100 10.0.0.1 12345 443\r\n";
@@ -126,4 +351,130 @@ mod tests {
assert!(is_proxy_protocol_v1(b"PROXY TCP4 ..."));
assert!(!is_proxy_protocol_v1(b"GET / HTTP/1.1"));
}
// ===== v2 tests =====
#[test]
fn test_is_proxy_protocol_v2() {
assert!(is_proxy_protocol_v2(&PROXY_V2_SIGNATURE));
assert!(!is_proxy_protocol_v2(b"PROXY TCP4 ..."));
assert!(!is_proxy_protocol_v2(b"short"));
}
#[test]
fn test_parse_v2_tcp4() {
let source: SocketAddr = "198.51.100.10:54321".parse().unwrap();
let dest: SocketAddr = "203.0.113.25:8443".parse().unwrap();
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
assert_eq!(header.len(), 28);
let (parsed, consumed) = parse_v2(&header).unwrap();
assert_eq!(consumed, 28);
assert_eq!(parsed.protocol, ProxyProtocol::Tcp4);
assert_eq!(parsed.source_addr, source);
assert_eq!(parsed.dest_addr, dest);
}
#[test]
fn test_parse_v2_udp4() {
let source: SocketAddr = "10.0.0.1:12345".parse().unwrap();
let dest: SocketAddr = "10.0.0.2:53".parse().unwrap();
let header = generate_v2(&source, &dest, ProxyV2Transport::Datagram);
assert_eq!(header.len(), 28);
assert_eq!(header[13], 0x12); // AF_INET + DGRAM
let (parsed, consumed) = parse_v2(&header).unwrap();
assert_eq!(consumed, 28);
assert_eq!(parsed.protocol, ProxyProtocol::Udp4);
assert_eq!(parsed.source_addr, source);
assert_eq!(parsed.dest_addr, dest);
}
#[test]
fn test_parse_v2_tcp6() {
let source: SocketAddr = "[2001:db8::1]:54321".parse().unwrap();
let dest: SocketAddr = "[2001:db8::2]:443".parse().unwrap();
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
assert_eq!(header.len(), 52);
assert_eq!(header[13], 0x21); // AF_INET6 + STREAM
let (parsed, consumed) = parse_v2(&header).unwrap();
assert_eq!(consumed, 52);
assert_eq!(parsed.protocol, ProxyProtocol::Tcp6);
assert_eq!(parsed.source_addr, source);
assert_eq!(parsed.dest_addr, dest);
}
#[test]
fn test_generate_v2_tcp4_byte_layout() {
let source: SocketAddr = "1.2.3.4:1000".parse().unwrap();
let dest: SocketAddr = "5.6.7.8:443".parse().unwrap();
let header = generate_v2(&source, &dest, ProxyV2Transport::Stream);
assert_eq!(&header[0..12], &PROXY_V2_SIGNATURE);
assert_eq!(header[12], 0x21); // v2, PROXY
assert_eq!(header[13], 0x11); // AF_INET, STREAM
assert_eq!(u16::from_be_bytes([header[14], header[15]]), 12); // addr len
assert_eq!(&header[16..20], &[1, 2, 3, 4]); // src ip
assert_eq!(&header[20..24], &[5, 6, 7, 8]); // dst ip
assert_eq!(u16::from_be_bytes([header[24], header[25]]), 1000); // src port
assert_eq!(u16::from_be_bytes([header[26], header[27]]), 443); // dst port
}
#[test]
fn test_generate_v2_udp4_byte_layout() {
let source: SocketAddr = "10.0.0.1:5000".parse().unwrap();
let dest: SocketAddr = "10.0.0.2:53".parse().unwrap();
let header = generate_v2(&source, &dest, ProxyV2Transport::Datagram);
assert_eq!(header[12], 0x21); // v2, PROXY
assert_eq!(header[13], 0x12); // AF_INET, DGRAM (UDP)
}
#[test]
fn test_parse_v2_local_command() {
// Build a LOCAL command header (no addresses)
let mut header = Vec::new();
header.extend_from_slice(&PROXY_V2_SIGNATURE);
header.push(0x20); // v2, LOCAL
header.push(0x00); // AF_UNSPEC
header.extend_from_slice(&0u16.to_be_bytes()); // 0-length address block
let (parsed, consumed) = parse_v2(&header).unwrap();
assert_eq!(consumed, 16);
assert_eq!(parsed.protocol, ProxyProtocol::Unknown);
assert_eq!(parsed.source_addr.port(), 0);
}
#[test]
fn test_parse_v2_incomplete() {
let data = &PROXY_V2_SIGNATURE[..8]; // only 8 bytes
assert!(parse_v2(data).is_err());
}
#[test]
fn test_parse_v2_wrong_version() {
let mut header = Vec::new();
header.extend_from_slice(&PROXY_V2_SIGNATURE);
header.push(0x11); // version 1, not 2
header.push(0x11);
header.extend_from_slice(&12u16.to_be_bytes());
header.extend_from_slice(&[0u8; 12]);
assert!(matches!(parse_v2(&header), Err(ProxyProtocolError::UnsupportedVersion)));
}
#[test]
fn test_v2_roundtrip_with_trailing_data() {
let source: SocketAddr = "192.168.1.1:8080".parse().unwrap();
let dest: SocketAddr = "10.0.0.1:443".parse().unwrap();
let mut data = generate_v2(&source, &dest, ProxyV2Transport::Stream);
data.extend_from_slice(b"GET / HTTP/1.1\r\n"); // trailing app data
let (parsed, consumed) = parse_v2(&data).unwrap();
assert_eq!(consumed, 28);
assert_eq!(parsed.source_addr, source);
assert_eq!(&data[consumed..], b"GET / HTTP/1.1\r\n");
}
}

View File

@@ -0,0 +1,311 @@
//! QUIC connection handling.
//!
//! Manages QUIC endpoints (via quinn), accepts connections, and either:
//! - Forwards streams bidirectionally to TCP backends (QUIC termination)
//! - Dispatches to H3ProxyService for HTTP/3 handling (Phase 5)
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use arc_swap::ArcSwap;
use quinn::{Endpoint, ServerConfig as QuinnServerConfig};
use rustls::ServerConfig as RustlsServerConfig;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use rustproxy_config::{RouteConfig, TransportProtocol};
use rustproxy_metrics::MetricsCollector;
use rustproxy_routing::{MatchContext, RouteManager};
use crate::connection_tracker::ConnectionTracker;
/// Create a QUIC server endpoint on the given port with the provided TLS config.
///
/// The TLS config must have ALPN protocols set (e.g., `h3` for HTTP/3).
pub fn create_quic_endpoint(
port: u16,
tls_config: Arc<RustlsServerConfig>,
) -> anyhow::Result<Endpoint> {
let quic_crypto = quinn::crypto::rustls::QuicServerConfig::try_from(tls_config)
.map_err(|e| anyhow::anyhow!("Failed to create QUIC crypto config: {}", e))?;
let server_config = QuinnServerConfig::with_crypto(Arc::new(quic_crypto));
let socket = std::net::UdpSocket::bind(SocketAddr::from(([0, 0, 0, 0], port)))?;
let endpoint = Endpoint::new(
quinn::EndpointConfig::default(),
Some(server_config),
socket,
quinn::default_runtime()
.ok_or_else(|| anyhow::anyhow!("No async runtime for quinn"))?,
)?;
info!("QUIC endpoint listening on port {}", port);
Ok(endpoint)
}
/// Run the QUIC accept loop for a single endpoint.
///
/// Accepts incoming QUIC connections and spawns a task per connection.
pub async fn quic_accept_loop(
endpoint: Endpoint,
port: u16,
route_manager: Arc<ArcSwap<RouteManager>>,
metrics: Arc<MetricsCollector>,
conn_tracker: Arc<ConnectionTracker>,
cancel: CancellationToken,
) {
loop {
let incoming = tokio::select! {
_ = cancel.cancelled() => {
debug!("QUIC accept loop on port {} cancelled", port);
break;
}
incoming = endpoint.accept() => {
match incoming {
Some(conn) => conn,
None => {
debug!("QUIC endpoint on port {} closed", port);
break;
}
}
}
};
let remote_addr = incoming.remote_address();
let ip = remote_addr.ip();
// Per-IP rate limiting
if !conn_tracker.try_accept(&ip) {
debug!("QUIC connection rejected from {} (rate limit)", remote_addr);
// Drop `incoming` to refuse the connection
continue;
}
// Route matching (port + client IP, no domain yet — QUIC Initial is encrypted)
let rm = route_manager.load();
let ip_str = ip.to_string();
let ctx = MatchContext {
port,
domain: None,
path: None,
client_ip: Some(&ip_str),
tls_version: None,
headers: None,
is_tls: true,
protocol: Some("quic"),
transport: Some(TransportProtocol::Udp),
};
let route = match rm.find_route(&ctx) {
Some(m) => m.route.clone(),
None => {
debug!("No QUIC route matched for port {} from {}", port, remote_addr);
continue;
}
};
conn_tracker.connection_opened(&ip);
let route_id = route.name.clone().or(route.id.clone());
metrics.connection_opened(route_id.as_deref(), Some(&ip_str));
let metrics = Arc::clone(&metrics);
let conn_tracker = Arc::clone(&conn_tracker);
let cancel = cancel.child_token();
tokio::spawn(async move {
match handle_quic_connection(incoming, route, port, Arc::clone(&metrics), &cancel).await {
Ok(()) => debug!("QUIC connection from {} completed", remote_addr),
Err(e) => debug!("QUIC connection from {} error: {}", remote_addr, e),
}
// Cleanup
conn_tracker.connection_closed(&ip);
metrics.connection_closed(route_id.as_deref(), Some(&ip_str));
});
}
// Graceful shutdown: close endpoint and wait for in-flight connections
endpoint.close(quinn::VarInt::from_u32(0), b"server shutting down");
endpoint.wait_idle().await;
info!("QUIC endpoint on port {} shut down", port);
}
/// Handle a single accepted QUIC connection.
async fn handle_quic_connection(
incoming: quinn::Incoming,
route: RouteConfig,
port: u16,
metrics: Arc<MetricsCollector>,
cancel: &CancellationToken,
) -> anyhow::Result<()> {
let connection = incoming.await?;
let remote_addr = connection.remote_address();
debug!("QUIC connection established from {}", remote_addr);
// Check if this route has HTTP/3 enabled
let enable_http3 = route.action.udp.as_ref()
.and_then(|u| u.quic.as_ref())
.and_then(|q| q.enable_http3)
.unwrap_or(false);
if enable_http3 {
// Phase 5: dispatch to H3ProxyService
// For now, log and accept streams for basic handling
debug!("HTTP/3 enabled for route {:?}, dispatching to H3 handler", route.name);
handle_h3_connection(connection, route, port, &metrics, cancel).await
} else {
// Non-HTTP3 QUIC: bidirectional stream forwarding to TCP backend
handle_quic_stream_forwarding(connection, route, port, metrics, cancel).await
}
}
/// Forward QUIC streams bidirectionally to a TCP backend.
///
/// For each accepted bidirectional QUIC stream, connects to the backend
/// via TCP and forwards data in both directions. Quinn's RecvStream/SendStream
/// implement AsyncRead/AsyncWrite, enabling reuse of existing forwarder patterns.
async fn handle_quic_stream_forwarding(
connection: quinn::Connection,
route: RouteConfig,
port: u16,
metrics: Arc<MetricsCollector>,
cancel: &CancellationToken,
) -> anyhow::Result<()> {
let remote_addr = connection.remote_address();
let route_id = route.name.as_deref().or(route.id.as_deref());
let metrics_arc = metrics;
// Resolve backend target
let target = route.action.targets.as_ref()
.and_then(|t| t.first())
.ok_or_else(|| anyhow::anyhow!("No target for QUIC route"))?;
let backend_host = target.host.first();
let backend_port = target.port.resolve(port);
let backend_addr = format!("{}:{}", backend_host, backend_port);
loop {
let (send_stream, recv_stream) = tokio::select! {
_ = cancel.cancelled() => break,
result = connection.accept_bi() => {
match result {
Ok(streams) => streams,
Err(quinn::ConnectionError::ApplicationClosed(_)) => break,
Err(quinn::ConnectionError::LocallyClosed) => break,
Err(e) => {
debug!("QUIC stream accept error from {}: {}", remote_addr, e);
break;
}
}
}
};
let backend_addr = backend_addr.clone();
let ip_str = remote_addr.ip().to_string();
let stream_metrics = Arc::clone(&metrics_arc);
let stream_route_id = route_id.map(|s| s.to_string());
// Spawn a task for each QUIC stream → TCP bidirectional forwarding
tokio::spawn(async move {
match forward_quic_stream_to_tcp(
send_stream,
recv_stream,
&backend_addr,
).await {
Ok((bytes_in, bytes_out)) => {
stream_metrics.record_bytes(
bytes_in, bytes_out,
stream_route_id.as_deref(),
Some(&ip_str),
);
debug!("QUIC stream forwarded: {}B in, {}B out", bytes_in, bytes_out);
}
Err(e) => {
debug!("QUIC stream forwarding error: {}", e);
}
}
});
}
Ok(())
}
/// Forward a single QUIC bidirectional stream to a TCP backend connection.
async fn forward_quic_stream_to_tcp(
mut quic_send: quinn::SendStream,
mut quic_recv: quinn::RecvStream,
backend_addr: &str,
) -> anyhow::Result<(u64, u64)> {
// Connect to backend TCP
let tcp_stream = tokio::net::TcpStream::connect(backend_addr).await?;
let (mut tcp_read, mut tcp_write) = tcp_stream.into_split();
// Bidirectional copy
let client_to_backend = tokio::io::copy(&mut quic_recv, &mut tcp_write);
let backend_to_client = tokio::io::copy(&mut tcp_read, &mut quic_send);
let (c2b, b2c) = tokio::join!(client_to_backend, backend_to_client);
let bytes_in = c2b.unwrap_or(0);
let bytes_out = b2c.unwrap_or(0);
// Graceful shutdown
let _ = quic_send.finish();
let _ = tcp_write.shutdown().await;
Ok((bytes_in, bytes_out))
}
/// Placeholder for HTTP/3 connection handling (Phase 5).
///
/// Once h3_service is implemented, this will delegate to it.
async fn handle_h3_connection(
connection: quinn::Connection,
_route: RouteConfig,
_port: u16,
_metrics: &MetricsCollector,
cancel: &CancellationToken,
) -> anyhow::Result<()> {
warn!("HTTP/3 handling not yet fully implemented — accepting connection but no request processing");
// Keep the connection alive until cancelled or closed
tokio::select! {
_ = cancel.cancelled() => {}
reason = connection.closed() => {
debug!("HTTP/3 connection closed: {}", reason);
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_quic_endpoint_requires_tls_config() {
// Install the ring crypto provider for tests
let _ = rustls::crypto::ring::default_provider().install_default();
// Generate a single self-signed cert and use its key pair
let self_signed = rcgen::generate_simple_self_signed(vec!["localhost".to_string()])
.unwrap();
let cert_der = self_signed.cert.der().clone();
let key_der = self_signed.key_pair.serialize_der();
let mut tls_config = RustlsServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
vec![cert_der.into()],
rustls::pki_types::PrivateKeyDer::try_from(key_der).unwrap(),
)
.unwrap();
tls_config.alpn_protocols = vec![b"h3".to_vec()];
// Port 0 = OS assigns a free port
let result = create_quic_endpoint(0, Arc::new(tls_config));
assert!(result.is_ok(), "QUIC endpoint creation failed: {:?}", result.err());
}
}

View File

@@ -196,6 +196,7 @@ pub fn is_http(data: &[u8]) -> bool {
b"PATC",
b"OPTI",
b"CONN",
b"PRI ", // HTTP/2 connection preface
];
starts.iter().any(|s| data.starts_with(s))
}

View File

@@ -0,0 +1,19 @@
//! Socket-level options for TCP streams (keepalive, etc.).
//!
//! Uses `socket2::SockRef::from()` to borrow the raw fd without ownership transfer.
use std::io;
use std::time::Duration;
use tokio::net::TcpStream;
/// Apply TCP keepalive to a connected socket.
///
/// Enables SO_KEEPALIVE and sets the initial probe delay.
/// On Linux, also sets the interval between probes to the same value.
pub fn apply_keepalive(stream: &TcpStream, delay: Duration) -> io::Result<()> {
let sock_ref = socket2::SockRef::from(stream);
let ka = socket2::TcpKeepalive::new().with_time(delay);
#[cfg(target_os = "linux")]
let ka = ka.with_interval(delay);
sock_ref.set_tcp_keepalive(&ka)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,121 @@
use std::collections::HashMap;
use std::io::BufReader;
use std::sync::Arc;
use std::sync::{Arc, OnceLock};
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use rustls::server::ResolvesServerCert;
use rustls::sign::CertifiedKey;
use rustls::ServerConfig;
use tokio::net::TcpStream;
use tokio_rustls::{TlsAcceptor, TlsConnector, server::TlsStream as ServerTlsStream};
use tracing::debug;
use tracing::{debug, info};
use crate::tcp_listener::TlsCertConfig;
/// Ensure the default crypto provider is installed.
fn ensure_crypto_provider() {
let _ = rustls::crypto::ring::default_provider().install_default();
}
/// SNI-based certificate resolver with pre-parsed CertifiedKeys.
/// Enables shared ServerConfig across connections — avoids per-connection PEM parsing
/// and enables TLS session resumption.
#[derive(Debug)]
pub struct CertResolver {
certs: HashMap<String, Arc<CertifiedKey>>,
fallback: Option<Arc<CertifiedKey>>,
}
impl CertResolver {
/// Build a resolver from PEM-encoded cert/key configs.
/// Parses all PEM data upfront so connections only do a cheap HashMap lookup.
pub fn new(configs: &HashMap<String, TlsCertConfig>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let provider = rustls::crypto::ring::default_provider();
let mut certs = HashMap::new();
let mut fallback = None;
for (domain, cfg) in configs {
let cert_chain = load_certs(&cfg.cert_pem)?;
let key = load_private_key(&cfg.key_pem)?;
let ck = Arc::new(CertifiedKey::from_der(cert_chain, key, &provider)
.map_err(|e| format!("CertifiedKey for {}: {}", domain, e))?);
if domain == "*" {
fallback = Some(Arc::clone(&ck));
}
certs.insert(domain.clone(), ck);
}
// If no explicit "*" fallback, use the first available cert
if fallback.is_none() {
fallback = certs.values().next().map(Arc::clone);
}
Ok(Self { certs, fallback })
}
}
impl ResolvesServerCert for CertResolver {
fn resolve(&self, client_hello: rustls::server::ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
let domain = match client_hello.server_name() {
Some(name) => name,
None => return self.fallback.clone(),
};
// Exact match
if let Some(ck) = self.certs.get(domain) {
return Some(Arc::clone(ck));
}
// Wildcard: sub.example.com → *.example.com
if let Some(dot) = domain.find('.') {
let wc = format!("*.{}", &domain[dot + 1..]);
if let Some(ck) = self.certs.get(&wc) {
return Some(Arc::clone(ck));
}
}
self.fallback.clone()
}
}
/// Build a shared TLS acceptor with SNI resolution, session cache, and session tickets.
/// The returned acceptor can be reused across all connections (cheap Arc clone).
pub fn build_shared_tls_acceptor(resolver: CertResolver) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let mut config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver));
// ALPN: advertise h2 and http/1.1 for client-facing HTTP/2 support
config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// Shared session cache — enables session ID resumption across connections
config.session_storage = rustls::server::ServerSessionMemoryCache::new(4096);
// Session ticket resumption (12-hour lifetime, Chacha20Poly1305 encrypted)
config.ticketer = rustls::crypto::ring::Ticketer::new()
.map_err(|e| format!("Ticketer: {}", e))?;
info!("Built shared TLS config with session cache (4096), ticket support, and ALPN h2+http/1.1");
Ok(TlsAcceptor::from(Arc::new(config)))
}
/// Build a TLS acceptor from PEM-encoded cert and key data.
/// Advertises both h2 and http/1.1 via ALPN (for client-facing connections).
pub fn build_tls_acceptor(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
build_tls_acceptor_with_config(cert_pem, key_pem, None)
}
/// Build a TLS acceptor for backend servers that only speak HTTP/1.1.
/// Does NOT advertise h2 in ALPN, preventing false h2 auto-detection.
pub fn build_tls_acceptor_h1_only(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let certs = load_certs(cert_pem)?;
let key = load_private_key(key_pem)?;
let mut config = ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)?;
config.alpn_protocols = vec![b"http/1.1".to_vec()];
Ok(TlsAcceptor::from(Arc::new(config)))
}
/// Build a TLS acceptor with optional RouteTls configuration for version/cipher tuning.
pub fn build_tls_acceptor_with_config(
cert_pem: &str,
@@ -40,6 +139,9 @@ pub fn build_tls_acceptor_with_config(
.with_single_cert(certs, key)?
};
// ALPN: advertise h2 and http/1.1 for client-facing HTTP/2 support
config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// Apply session timeout if configured
if let Some(route_tls) = tls_config {
if let Some(timeout_secs) = route_tls.session_timeout {
@@ -97,21 +199,59 @@ pub async fn accept_tls(
Ok(tls_stream)
}
/// Get or create a shared backend TLS `ClientConfig`.
///
/// Uses `OnceLock` to ensure only one config is created across the entire process.
/// The built-in rustls `Resumption` (session tickets + session IDs) is enabled
/// by default, so all outbound backend connections share the same session cache.
static SHARED_CLIENT_CONFIG: OnceLock<Arc<rustls::ClientConfig>> = OnceLock::new();
pub fn shared_backend_tls_config() -> Arc<rustls::ClientConfig> {
SHARED_CLIENT_CONFIG.get_or_init(|| {
ensure_crypto_provider();
let config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
.with_no_client_auth();
info!("Built shared backend TLS client config with session resumption");
Arc::new(config)
}).clone()
}
/// Get or create a shared backend TLS `ClientConfig` with ALPN `h2` + `http/1.1`.
///
/// Used for auto-detection mode: the backend server picks its preferred protocol
/// via ALPN, and the proxy reads the negotiated result to decide h1 vs h2 forwarding.
static SHARED_CLIENT_CONFIG_ALPN: OnceLock<Arc<rustls::ClientConfig>> = OnceLock::new();
pub fn shared_backend_tls_config_alpn() -> Arc<rustls::ClientConfig> {
SHARED_CLIENT_CONFIG_ALPN.get_or_init(|| {
ensure_crypto_provider();
let mut config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
.with_no_client_auth();
config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
info!("Built shared backend TLS client config with ALPN h2+http/1.1 for auto-detection");
Arc::new(config)
}).clone()
}
/// Connect to a backend with TLS (for terminate-and-reencrypt mode).
/// Uses the shared backend TLS config for session resumption.
pub async fn connect_tls(
host: &str,
port: u16,
) -> Result<tokio_rustls::client::TlsStream<TcpStream>, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector = TlsConnector::from(Arc::new(config));
let config = shared_backend_tls_config();
let connector = TlsConnector::from(config);
let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
stream.set_nodelay(true)?;
// Apply keepalive with 60s default (tls_handler doesn't have ConnectionConfig access)
if let Err(e) = crate::socket_opts::apply_keepalive(&stream, std::time::Duration::from_secs(60)) {
debug!("Failed to set keepalive on backend TLS socket: {}", e);
}
let server_name = rustls::pki_types::ServerName::try_from(host.to_string())?;
let tls_stream = connector.connect(server_name, stream).await?;

View File

@@ -0,0 +1,619 @@
//! UDP listener manager.
//!
//! Binds UDP sockets on configured ports, receives datagrams, matches routes,
//! tracks sessions (flows), and forwards datagrams to backend UDP sockets.
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use arc_swap::ArcSwap;
use tokio::net::UdpSocket;
use tokio::task::JoinHandle;
use tokio::sync::{Mutex, RwLock};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use rustproxy_config::{RouteActionType, TransportProtocol};
use rustproxy_metrics::MetricsCollector;
use rustproxy_routing::{MatchContext, RouteManager};
use crate::connection_tracker::ConnectionTracker;
use crate::udp_session::{SessionKey, UdpSession, UdpSessionConfig, UdpSessionTable};
/// Manages UDP listeners across all configured ports.
pub struct UdpListenerManager {
/// Port → recv loop task handle
listeners: HashMap<u16, JoinHandle<()>>,
/// Hot-reloadable route table
route_manager: Arc<ArcSwap<RouteManager>>,
/// Shared metrics collector
metrics: Arc<MetricsCollector>,
/// Per-IP session/rate limiting (shared with TCP)
conn_tracker: Arc<ConnectionTracker>,
/// Shared session table across all ports
session_table: Arc<UdpSessionTable>,
/// Cancellation for graceful shutdown
cancel_token: CancellationToken,
/// Unix socket path for datagram handler relay
datagram_handler_relay: Arc<RwLock<Option<String>>>,
/// Persistent write half of the relay connection
relay_writer: Arc<Mutex<Option<tokio::net::unix::OwnedWriteHalf>>>,
/// Cancel token for the current relay reply reader task
relay_reader_cancel: Option<CancellationToken>,
}
impl Drop for UdpListenerManager {
fn drop(&mut self) {
self.cancel_token.cancel();
for (_, handle) in self.listeners.drain() {
handle.abort();
}
}
}
impl UdpListenerManager {
pub fn new(
route_manager: Arc<RouteManager>,
metrics: Arc<MetricsCollector>,
conn_tracker: Arc<ConnectionTracker>,
cancel_token: CancellationToken,
) -> Self {
Self {
listeners: HashMap::new(),
route_manager: Arc::new(ArcSwap::from(route_manager)),
metrics,
conn_tracker,
session_table: Arc::new(UdpSessionTable::new()),
cancel_token,
datagram_handler_relay: Arc::new(RwLock::new(None)),
relay_writer: Arc::new(Mutex::new(None)),
relay_reader_cancel: None,
}
}
/// Update the route manager (for hot-reload).
pub fn update_routes(&self, route_manager: Arc<RouteManager>) {
self.route_manager.store(route_manager);
}
/// Start listening on a UDP port.
///
/// If any route on this port has QUIC config (`action.udp.quic`), a quinn
/// endpoint is created instead of a raw UDP socket.
pub async fn add_port(&mut self, port: u16) -> anyhow::Result<()> {
self.add_port_with_tls(port, None).await
}
/// Start listening on a UDP port with optional TLS config for QUIC.
pub async fn add_port_with_tls(
&mut self,
port: u16,
tls_config: Option<std::sync::Arc<rustls::ServerConfig>>,
) -> anyhow::Result<()> {
if self.listeners.contains_key(&port) {
debug!("UDP port {} already listening", port);
return Ok(());
}
// Check if any route on this port uses QUIC
let rm = self.route_manager.load();
let has_quic = rm.routes_for_port(port).iter().any(|r| {
r.action.udp.as_ref()
.and_then(|u| u.quic.as_ref())
.is_some()
});
if has_quic {
if let Some(tls) = tls_config {
// Create QUIC endpoint
let endpoint = crate::quic_handler::create_quic_endpoint(port, tls)?;
let handle = tokio::spawn(crate::quic_handler::quic_accept_loop(
endpoint,
port,
Arc::clone(&self.route_manager),
Arc::clone(&self.metrics),
Arc::clone(&self.conn_tracker),
self.cancel_token.child_token(),
));
self.listeners.insert(port, handle);
info!("QUIC endpoint started on port {}", port);
return Ok(());
} else {
warn!("QUIC routes on port {} but no TLS config provided, falling back to raw UDP", port);
}
}
// Raw UDP listener
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
let socket = UdpSocket::bind(addr).await?;
let socket = Arc::new(socket);
info!("UDP listener bound on port {}", port);
let handle = tokio::spawn(Self::recv_loop(
socket,
port,
Arc::clone(&self.route_manager),
Arc::clone(&self.metrics),
Arc::clone(&self.conn_tracker),
Arc::clone(&self.session_table),
Arc::clone(&self.datagram_handler_relay),
Arc::clone(&self.relay_writer),
self.cancel_token.child_token(),
));
self.listeners.insert(port, handle);
// Start the session cleanup task if this is the first port
if self.listeners.len() == 1 {
self.start_cleanup_task();
}
Ok(())
}
/// Stop listening on a UDP port.
pub fn remove_port(&mut self, port: u16) {
if let Some(handle) = self.listeners.remove(&port) {
handle.abort();
info!("UDP listener removed from port {}", port);
}
}
/// Get all listening UDP ports.
pub fn listening_ports(&self) -> Vec<u16> {
let mut ports: Vec<u16> = self.listeners.keys().copied().collect();
ports.sort();
ports
}
/// Stop all listeners and clean up.
pub async fn stop(&mut self) {
self.cancel_token.cancel();
for (port, handle) in self.listeners.drain() {
handle.abort();
debug!("UDP listener stopped on port {}", port);
}
info!("All UDP listeners stopped, {} sessions remaining",
self.session_table.session_count());
}
/// Set the datagram handler relay socket path and establish connection.
pub async fn set_datagram_handler_relay(&mut self, path: String) {
// Cancel previous relay reader task if any
if let Some(old_cancel) = self.relay_reader_cancel.take() {
old_cancel.cancel();
}
// Store the path
{
let mut relay = self.datagram_handler_relay.write().await;
*relay = Some(path.clone());
}
// Connect to the Unix socket
match tokio::net::UnixStream::connect(&path).await {
Ok(stream) => {
let (read_half, write_half) = stream.into_split();
// Store write half for sending datagrams
{
let mut writer = self.relay_writer.lock().await;
*writer = Some(write_half);
}
// Spawn reply reader — reads length-prefixed JSON replies from TS
// and sends them back to clients via the listener sockets
let cancel = self.cancel_token.child_token();
self.relay_reader_cancel = Some(cancel.clone());
tokio::spawn(Self::relay_reply_reader(read_half, cancel));
info!("Datagram handler relay connected to {}", path);
}
Err(e) => {
error!("Failed to connect datagram handler relay to {}: {}", path, e);
}
}
}
/// Start periodic session cleanup task.
fn start_cleanup_task(&self) {
let session_table = Arc::clone(&self.session_table);
let metrics = Arc::clone(&self.metrics);
let conn_tracker = Arc::clone(&self.conn_tracker);
let cancel = self.cancel_token.child_token();
let route_manager = Arc::clone(&self.route_manager);
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(10));
loop {
tokio::select! {
_ = cancel.cancelled() => break,
_ = interval.tick() => {
// Determine the timeout from routes (use the minimum configured timeout,
// or default 60s if none configured)
let rm = route_manager.load();
let timeout_ms = Self::get_min_session_timeout(&rm);
let removed = session_table.cleanup_idle(timeout_ms, &metrics, &conn_tracker);
if removed > 0 {
debug!("UDP session cleanup: removed {} idle sessions, {} remaining",
removed, session_table.session_count());
}
}
}
}
});
}
/// Get the minimum session timeout across all UDP routes.
fn get_min_session_timeout(_rm: &RouteManager) -> u64 {
// Default to 60 seconds; actual per-route timeouts checked during cleanup
60_000
}
/// Main receive loop for a UDP port.
async fn recv_loop(
socket: Arc<UdpSocket>,
port: u16,
route_manager: Arc<ArcSwap<RouteManager>>,
metrics: Arc<MetricsCollector>,
conn_tracker: Arc<ConnectionTracker>,
session_table: Arc<UdpSessionTable>,
_datagram_handler_relay: Arc<RwLock<Option<String>>>,
relay_writer: Arc<Mutex<Option<tokio::net::unix::OwnedWriteHalf>>>,
cancel: CancellationToken,
) {
// Use a reasonably large buffer; actual max is per-route but we need a single buffer
let mut buf = vec![0u8; 65535];
loop {
let (len, client_addr) = tokio::select! {
_ = cancel.cancelled() => {
debug!("UDP recv loop on port {} cancelled", port);
break;
}
result = socket.recv_from(&mut buf) => {
match result {
Ok(r) => r,
Err(e) => {
warn!("UDP recv error on port {}: {}", port, e);
continue;
}
}
}
};
let datagram = &buf[..len];
// Route matching
let rm = route_manager.load();
let ip_str = client_addr.ip().to_string();
let ctx = MatchContext {
port,
domain: None,
path: None,
client_ip: Some(&ip_str),
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
let route_match = match rm.find_route(&ctx) {
Some(m) => m,
None => {
debug!("No UDP route matched for port {} from {}", port, client_addr);
continue;
}
};
let route = route_match.route;
let route_id = route.name.as_deref().or(route.id.as_deref());
// Socket handler routes → relay datagram to TS via persistent Unix socket
if route.action.action_type == RouteActionType::SocketHandler {
if let Err(e) = Self::relay_datagram_via_writer(
&relay_writer,
route_id.unwrap_or("unknown"),
&client_addr,
port,
datagram,
).await {
debug!("Failed to relay UDP datagram to TS: {}", e);
}
continue;
}
// Get UDP config from route
let udp_config = UdpSessionConfig::from_route_udp(route.action.udp.as_ref());
// Check datagram size
if len as u32 > udp_config.max_datagram_size {
debug!("UDP datagram too large ({} > {}) from {}, dropping",
len, udp_config.max_datagram_size, client_addr);
continue;
}
// Session lookup or create
let session_key: SessionKey = (client_addr, port);
let session = match session_table.get(&session_key) {
Some(s) => s,
None => {
// New session — check per-IP limits
if !conn_tracker.try_accept(&client_addr.ip()) {
debug!("UDP session rejected for {} (rate limit)", client_addr);
continue;
}
if !session_table.can_create_session(
&client_addr.ip(),
udp_config.max_sessions_per_ip,
) {
debug!("UDP session rejected for {} (per-IP session limit)", client_addr);
continue;
}
// Resolve target
let target = match route_match.target.or_else(|| {
route.action.targets.as_ref().and_then(|t| t.first())
}) {
Some(t) => t,
None => {
warn!("No target for UDP route {:?}", route_id);
continue;
}
};
let backend_host = target.host.first();
let backend_port = target.port.resolve(port);
let backend_addr = format!("{}:{}", backend_host, backend_port);
// Create backend socket
let backend_socket = match UdpSocket::bind("0.0.0.0:0").await {
Ok(s) => s,
Err(e) => {
error!("Failed to bind backend UDP socket: {}", e);
continue;
}
};
if let Err(e) = backend_socket.connect(&backend_addr).await {
error!("Failed to connect backend UDP socket to {}: {}", backend_addr, e);
continue;
}
let backend_socket = Arc::new(backend_socket);
debug!("New UDP session: {} -> {} (via port {})",
client_addr, backend_addr, port);
// Spawn return-path relay task
let session_cancel = CancellationToken::new();
let return_task = tokio::spawn(Self::return_relay(
Arc::clone(&backend_socket),
Arc::clone(&socket),
client_addr,
Arc::clone(&session_table),
session_key,
Arc::clone(&metrics),
route_id.map(|s| s.to_string()),
session_cancel.child_token(),
));
let session = Arc::new(UdpSession {
backend_socket,
last_activity: std::sync::atomic::AtomicU64::new(session_table.elapsed_ms()),
created_at: std::time::Instant::now(),
route_id: route_id.map(|s| s.to_string()),
source_ip: client_addr.ip(),
client_addr,
return_task,
cancel: session_cancel,
});
if !session_table.insert(session_key, Arc::clone(&session), udp_config.max_sessions_per_ip) {
warn!("Failed to insert UDP session (race condition)");
continue;
}
// Track in metrics
conn_tracker.connection_opened(&client_addr.ip());
metrics.connection_opened(route_id, Some(&ip_str));
metrics.udp_session_opened();
session
}
};
// Forward datagram to backend
match session.backend_socket.send(datagram).await {
Ok(_) => {
session.last_activity.store(session_table.elapsed_ms(), Ordering::Relaxed);
metrics.record_bytes(len as u64, 0, route_id, Some(&ip_str));
metrics.record_datagram_in();
}
Err(e) => {
debug!("Failed to send UDP datagram to backend: {}", e);
}
}
}
}
/// Return-path relay: backend → client.
async fn return_relay(
backend_socket: Arc<UdpSocket>,
listener_socket: Arc<UdpSocket>,
client_addr: SocketAddr,
session_table: Arc<UdpSessionTable>,
session_key: SessionKey,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
cancel: CancellationToken,
) {
let mut buf = vec![0u8; 65535];
let ip_str = client_addr.ip().to_string();
loop {
let len = tokio::select! {
_ = cancel.cancelled() => break,
result = backend_socket.recv(&mut buf) => {
match result {
Ok(len) => len,
Err(e) => {
debug!("UDP backend recv error for {}: {}", client_addr, e);
break;
}
}
}
};
// Send reply back to client
match listener_socket.send_to(&buf[..len], client_addr).await {
Ok(_) => {
// Update session activity
if let Some(session) = session_table.get(&session_key) {
session.last_activity.store(session_table.elapsed_ms(), Ordering::Relaxed);
}
metrics.record_bytes(0, len as u64, route_id.as_deref(), Some(&ip_str));
metrics.record_datagram_out();
}
Err(e) => {
debug!("Failed to send UDP reply to {}: {}", client_addr, e);
break;
}
}
}
}
/// Send a datagram to TS via the persistent relay writer.
async fn relay_datagram_via_writer(
writer: &Mutex<Option<tokio::net::unix::OwnedWriteHalf>>,
route_key: &str,
client_addr: &SocketAddr,
dest_port: u16,
datagram: &[u8],
) -> anyhow::Result<()> {
use base64::Engine;
let payload_b64 = base64::engine::general_purpose::STANDARD.encode(datagram);
let msg = serde_json::json!({
"type": "datagram",
"routeKey": route_key,
"sourceIp": client_addr.ip().to_string(),
"sourcePort": client_addr.port(),
"destPort": dest_port,
"payloadBase64": payload_b64,
});
let json = serde_json::to_vec(&msg)?;
let mut guard = writer.lock().await;
let stream = guard.as_mut()
.ok_or_else(|| anyhow::anyhow!("Datagram relay not connected"))?;
// Length-prefixed frame
let len_bytes = (json.len() as u32).to_be_bytes();
stream.write_all(&len_bytes).await?;
stream.write_all(&json).await?;
stream.flush().await?;
Ok(())
}
/// Background task reading reply frames from the TS datagram handler.
/// Parses replies and sends them back to the original client via UDP.
async fn relay_reply_reader(
mut reader: tokio::net::unix::OwnedReadHalf,
cancel: CancellationToken,
) {
use base64::Engine;
let mut len_buf = [0u8; 4];
loop {
// Read length prefix
let read_result = tokio::select! {
_ = cancel.cancelled() => break,
result = reader.read_exact(&mut len_buf) => result,
};
match read_result {
Ok(_) => {}
Err(e) => {
debug!("Datagram relay reader closed: {}", e);
break;
}
}
let frame_len = u32::from_be_bytes(len_buf) as usize;
if frame_len > 10 * 1024 * 1024 {
error!("Datagram relay frame too large: {} bytes", frame_len);
break;
}
let mut frame_buf = vec![0u8; frame_len];
match reader.read_exact(&mut frame_buf).await {
Ok(_) => {}
Err(e) => {
debug!("Datagram relay reader frame error: {}", e);
break;
}
}
// Parse the reply JSON
let reply: serde_json::Value = match serde_json::from_slice(&frame_buf) {
Ok(v) => v,
Err(e) => {
debug!("Datagram relay reply parse error: {}", e);
continue;
}
};
if reply.get("type").and_then(|v| v.as_str()) != Some("reply") {
continue;
}
let source_ip = reply.get("sourceIp").and_then(|v| v.as_str()).unwrap_or("");
let source_port = reply.get("sourcePort").and_then(|v| v.as_u64()).unwrap_or(0) as u16;
let dest_port = reply.get("destPort").and_then(|v| v.as_u64()).unwrap_or(0) as u16;
let payload_b64 = reply.get("payloadBase64").and_then(|v| v.as_str()).unwrap_or("");
let payload = match base64::engine::general_purpose::STANDARD.decode(payload_b64) {
Ok(p) => p,
Err(e) => {
debug!("Datagram relay reply base64 decode error: {}", e);
continue;
}
};
let client_addr: SocketAddr = match format!("{}:{}", source_ip, source_port).parse() {
Ok(a) => a,
Err(e) => {
debug!("Datagram relay reply address parse error: {}", e);
continue;
}
};
// Send the reply back to the client via a temporary UDP socket bound to the dest_port
// We need the listener socket for this port. For simplicity, use a fresh socket.
let reply_socket = match UdpSocket::bind(format!("0.0.0.0:{}", dest_port)).await {
Ok(s) => s,
Err(_) => {
// Port already bound by the listener — use unbound socket
match UdpSocket::bind("0.0.0.0:0").await {
Ok(s) => s,
Err(e) => {
debug!("Failed to create reply socket: {}", e);
continue;
}
}
}
};
if let Err(e) = reply_socket.send_to(&payload, client_addr).await {
debug!("Failed to send datagram reply to {}: {}", client_addr, e);
}
}
debug!("Datagram relay reply reader stopped");
}
}

View File

@@ -0,0 +1,324 @@
//! UDP session (flow) tracking.
//!
//! A UDP "session" is a flow identified by (client_addr, listening_port).
//! Each session maintains a backend socket bound to an ephemeral port and
//! connected to the backend target, plus a background task that relays
//! return datagrams from the backend back to the client.
use std::net::{IpAddr, SocketAddr};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::Instant;
use dashmap::DashMap;
use tokio::net::UdpSocket;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
use tracing::debug;
use rustproxy_metrics::MetricsCollector;
use crate::connection_tracker::ConnectionTracker;
/// A single UDP session (flow).
pub struct UdpSession {
/// Socket bound to ephemeral port, connected to backend
pub backend_socket: Arc<UdpSocket>,
/// Milliseconds since the session table's epoch
pub last_activity: AtomicU64,
/// When the session was created
pub created_at: Instant,
/// Route ID for metrics
pub route_id: Option<String>,
/// Source IP for metrics/tracking
pub source_ip: IpAddr,
/// Client address (for return path)
pub client_addr: SocketAddr,
/// Handle for the return-path relay task
pub return_task: JoinHandle<()>,
/// Per-session cancellation
pub cancel: CancellationToken,
}
impl Drop for UdpSession {
fn drop(&mut self) {
self.cancel.cancel();
self.return_task.abort();
}
}
/// Configuration for UDP session behavior.
#[derive(Debug, Clone)]
pub struct UdpSessionConfig {
/// Idle timeout in milliseconds. Default: 60000.
pub session_timeout_ms: u64,
/// Max concurrent sessions per source IP. Default: 1000.
pub max_sessions_per_ip: u32,
/// Max accepted datagram size in bytes. Default: 65535.
pub max_datagram_size: u32,
}
impl Default for UdpSessionConfig {
fn default() -> Self {
Self {
session_timeout_ms: 60_000,
max_sessions_per_ip: 1_000,
max_datagram_size: 65_535,
}
}
}
impl UdpSessionConfig {
/// Build from route's UDP config, falling back to defaults.
pub fn from_route_udp(udp: Option<&rustproxy_config::RouteUdp>) -> Self {
match udp {
Some(u) => Self {
session_timeout_ms: u.session_timeout.unwrap_or(60_000),
max_sessions_per_ip: u.max_sessions_per_ip.unwrap_or(1_000),
max_datagram_size: u.max_datagram_size.unwrap_or(65_535),
},
None => Self::default(),
}
}
}
/// Session key: (client address, listening port).
pub type SessionKey = (SocketAddr, u16);
/// Tracks all active UDP sessions across all ports.
pub struct UdpSessionTable {
/// Active sessions keyed by (client_addr, listen_port)
sessions: DashMap<SessionKey, Arc<UdpSession>>,
/// Per-IP session counts for enforcing limits
ip_session_counts: DashMap<IpAddr, u32>,
/// Time reference for last_activity
epoch: Instant,
}
impl UdpSessionTable {
pub fn new() -> Self {
Self {
sessions: DashMap::new(),
ip_session_counts: DashMap::new(),
epoch: Instant::now(),
}
}
/// Get elapsed milliseconds since epoch (for last_activity tracking).
pub fn elapsed_ms(&self) -> u64 {
self.epoch.elapsed().as_millis() as u64
}
/// Look up an existing session.
pub fn get(&self, key: &SessionKey) -> Option<Arc<UdpSession>> {
self.sessions.get(key).map(|entry| Arc::clone(entry.value()))
}
/// Check if we can create a new session for this IP (under the per-IP limit).
pub fn can_create_session(&self, ip: &IpAddr, max_per_ip: u32) -> bool {
let count = self.ip_session_counts
.get(ip)
.map(|c| *c.value())
.unwrap_or(0);
count < max_per_ip
}
/// Insert a new session. Returns false if per-IP limit exceeded.
pub fn insert(
&self,
key: SessionKey,
session: Arc<UdpSession>,
max_per_ip: u32,
) -> bool {
let ip = session.source_ip;
// Atomically check and increment per-IP count
let mut count_entry = self.ip_session_counts.entry(ip).or_insert(0);
if *count_entry.value() >= max_per_ip {
return false;
}
*count_entry.value_mut() += 1;
drop(count_entry);
self.sessions.insert(key, session);
true
}
/// Remove a session and decrement per-IP count.
pub fn remove(&self, key: &SessionKey) -> Option<Arc<UdpSession>> {
if let Some((_, session)) = self.sessions.remove(key) {
let ip = session.source_ip;
if let Some(mut count) = self.ip_session_counts.get_mut(&ip) {
*count.value_mut() = count.value().saturating_sub(1);
if *count.value() == 0 {
drop(count);
self.ip_session_counts.remove(&ip);
}
}
Some(session)
} else {
None
}
}
/// Clean up idle sessions past the given timeout.
/// Returns the number of sessions removed.
pub fn cleanup_idle(
&self,
timeout_ms: u64,
metrics: &MetricsCollector,
conn_tracker: &ConnectionTracker,
) -> usize {
let now_ms = self.elapsed_ms();
let mut removed = 0;
// Collect keys to remove (avoid holding DashMap refs during removal)
let stale_keys: Vec<SessionKey> = self.sessions.iter()
.filter(|entry| {
let last = entry.value().last_activity.load(Ordering::Relaxed);
now_ms.saturating_sub(last) >= timeout_ms
})
.map(|entry| *entry.key())
.collect();
for key in stale_keys {
if let Some(session) = self.remove(&key) {
debug!(
"UDP session expired: {} -> port {} (idle {}ms)",
session.client_addr, key.1,
now_ms.saturating_sub(session.last_activity.load(Ordering::Relaxed))
);
conn_tracker.connection_closed(&session.source_ip);
metrics.connection_closed(
session.route_id.as_deref(),
Some(&session.source_ip.to_string()),
);
metrics.udp_session_closed();
removed += 1;
}
}
removed
}
/// Total number of active sessions.
pub fn session_count(&self) -> usize {
self.sessions.len()
}
/// Number of tracked IPs with active sessions.
pub fn tracked_ips(&self) -> usize {
self.ip_session_counts.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{Ipv4Addr, SocketAddrV4};
fn make_addr(port: u16) -> SocketAddr {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(10, 0, 0, 1), port))
}
fn make_session(client_addr: SocketAddr, cancel: CancellationToken) -> Arc<UdpSession> {
// Create a dummy backend socket for testing
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let backend_socket = rt.block_on(async {
Arc::new(UdpSocket::bind("127.0.0.1:0").await.unwrap())
});
let child_cancel = cancel.child_token();
let return_task = rt.spawn(async move {
child_cancel.cancelled().await;
});
Arc::new(UdpSession {
backend_socket,
last_activity: AtomicU64::new(0),
created_at: Instant::now(),
route_id: None,
source_ip: client_addr.ip(),
client_addr,
return_task,
cancel,
})
}
#[test]
fn test_session_table_insert_and_get() {
let table = UdpSessionTable::new();
let cancel = CancellationToken::new();
let addr = make_addr(12345);
let key: SessionKey = (addr, 53);
let session = make_session(addr, cancel);
assert!(table.insert(key, session, 1000));
assert!(table.get(&key).is_some());
assert_eq!(table.session_count(), 1);
}
#[test]
fn test_session_table_per_ip_limit() {
let table = UdpSessionTable::new();
let ip = Ipv4Addr::new(10, 0, 0, 1);
// Insert 2 sessions from same IP, limit is 2
for port in [12345u16, 12346] {
let addr = SocketAddr::V4(SocketAddrV4::new(ip, port));
let cancel = CancellationToken::new();
let session = make_session(addr, cancel);
assert!(table.insert((addr, 53), session, 2));
}
// Third should be rejected
let addr3 = SocketAddr::V4(SocketAddrV4::new(ip, 12347));
let cancel3 = CancellationToken::new();
let session3 = make_session(addr3, cancel3);
assert!(!table.insert((addr3, 53), session3, 2));
assert_eq!(table.session_count(), 2);
}
#[test]
fn test_session_table_remove() {
let table = UdpSessionTable::new();
let cancel = CancellationToken::new();
let addr = make_addr(12345);
let key: SessionKey = (addr, 53);
let session = make_session(addr, cancel);
table.insert(key, session, 1000);
assert_eq!(table.session_count(), 1);
assert_eq!(table.tracked_ips(), 1);
table.remove(&key);
assert_eq!(table.session_count(), 0);
assert_eq!(table.tracked_ips(), 0);
}
#[test]
fn test_session_config_defaults() {
let config = UdpSessionConfig::default();
assert_eq!(config.session_timeout_ms, 60_000);
assert_eq!(config.max_sessions_per_ip, 1_000);
assert_eq!(config.max_datagram_size, 65_535);
}
#[test]
fn test_session_config_from_route() {
let route_udp = rustproxy_config::RouteUdp {
session_timeout: Some(10_000),
max_sessions_per_ip: Some(500),
max_datagram_size: Some(1400),
quic: None,
};
let config = UdpSessionConfig::from_route_udp(Some(&route_udp));
assert_eq!(config.session_timeout_ms, 10_000);
assert_eq!(config.max_sessions_per_ip, 500);
assert_eq!(config.max_datagram_size, 1400);
}
}

View File

@@ -6,25 +6,28 @@
/// - `example.com` exact match
/// - `**.example.com` matches any depth of subdomain
pub fn domain_matches(pattern: &str, domain: &str) -> bool {
let pattern = pattern.trim().to_lowercase();
let domain = domain.trim().to_lowercase();
let pattern = pattern.trim();
let domain = domain.trim();
if pattern == "*" {
return true;
}
if pattern == domain {
if pattern.eq_ignore_ascii_case(domain) {
return true;
}
// Wildcard patterns
if pattern.starts_with("*.") {
if pattern.starts_with("*.") || pattern.starts_with("*.") {
let suffix = &pattern[2..]; // e.g., "example.com"
// Match exact parent or any single-level subdomain
if domain == suffix {
if domain.eq_ignore_ascii_case(suffix) {
return true;
}
if domain.ends_with(&format!(".{}", suffix)) {
if domain.len() > suffix.len() + 1
&& domain.as_bytes()[domain.len() - suffix.len() - 1] == b'.'
&& domain[domain.len() - suffix.len()..].eq_ignore_ascii_case(suffix)
{
// Check it's a single level subdomain for `*.`
let prefix = &domain[..domain.len() - suffix.len() - 1];
return !prefix.contains('.');
@@ -35,11 +38,22 @@ pub fn domain_matches(pattern: &str, domain: &str) -> bool {
if pattern.starts_with("**.") {
let suffix = &pattern[3..];
// Match exact parent or any depth of subdomain
return domain == suffix || domain.ends_with(&format!(".{}", suffix));
if domain.eq_ignore_ascii_case(suffix) {
return true;
}
if domain.len() > suffix.len() + 1
&& domain.as_bytes()[domain.len() - suffix.len() - 1] == b'.'
&& domain[domain.len() - suffix.len()..].eq_ignore_ascii_case(suffix)
{
return true;
}
return false;
}
// Use glob-match for more complex patterns
glob_match::glob_match(&pattern, &domain)
// Use glob-match for more complex patterns (case-insensitive via lowercasing)
let pattern_lower = pattern.to_lowercase();
let domain_lower = domain.to_lowercase();
glob_match::glob_match(&pattern_lower, &domain_lower)
}
/// Check if a domain matches any of the given patterns.

View File

@@ -1,6 +1,6 @@
use std::collections::HashMap;
use rustproxy_config::{RouteConfig, RouteTarget, TlsMode};
use rustproxy_config::{RouteConfig, RouteTarget, TransportProtocol, TlsMode};
use crate::matchers;
/// Context for route matching (subset of connection info).
@@ -12,6 +12,10 @@ pub struct MatchContext<'a> {
pub tls_version: Option<&'a str>,
pub headers: Option<&'a HashMap<String, String>>,
pub is_tls: bool,
/// Detected protocol: "http", "tcp", "udp", "quic". None when unknown.
pub protocol: Option<&'a str>,
/// Transport protocol of the listener: None = TCP (backward compat), Some(Udp), Some(All).
pub transport: Option<TransportProtocol>,
}
/// Result of a route match.
@@ -58,6 +62,16 @@ impl RouteManager {
manager
}
/// Check if any route on the given port uses header matching.
/// Used to skip expensive header HashMap construction when no route needs it.
pub fn any_route_has_headers(&self, port: u16) -> bool {
if let Some(indices) = self.port_index.get(&port) {
indices.iter().any(|&idx| self.routes[idx].route_match.headers.is_some())
} else {
false
}
}
/// Find the best matching route for the given context.
pub fn find_route<'a>(&'a self, ctx: &MatchContext<'_>) -> Option<RouteMatchResult<'a>> {
// Get routes for this port
@@ -80,6 +94,22 @@ impl RouteManager {
fn matches_route(&self, route: &RouteConfig, ctx: &MatchContext<'_>) -> bool {
let rm = &route.route_match;
// Transport filtering: ensure route transport matches context transport
let route_transport = rm.transport.as_ref();
let ctx_transport = ctx.transport.as_ref();
match (route_transport, ctx_transport) {
// Route requires UDP only — reject non-UDP contexts
(Some(TransportProtocol::Udp), None) |
(Some(TransportProtocol::Udp), Some(TransportProtocol::Tcp)) => return false,
// Route requires TCP only — reject UDP contexts
(Some(TransportProtocol::Tcp), Some(TransportProtocol::Udp)) => return false,
// Route has no transport (default = TCP) — reject UDP contexts
(None, Some(TransportProtocol::Udp)) => return false,
// All other combinations match: All matches everything, same transport matches,
// None + None/Tcp matches (backward compat)
_ => {}
}
// Domain matching
if let Some(ref domains) = rm.domains {
if let Some(domain) = ctx.domain {
@@ -87,9 +117,17 @@ impl RouteManager {
if !matchers::domain_matches_any(&patterns, domain) {
return false;
}
} else if ctx.is_tls {
// TLS connection without SNI cannot match a domain-restricted route.
// This prevents session-ticket resumption from misrouting when clients
// omit SNI (RFC 8446 recommends but doesn't mandate SNI on resumption).
// Wildcard-only routes (domains: ["*"]) still match since they accept all.
let patterns = domains.to_vec();
let is_wildcard_only = patterns.iter().all(|d| *d == "*");
if !is_wildcard_only {
return false;
}
}
// If no domain provided but route requires domain, it depends on context
// For TLS passthrough, we need SNI; for other cases we may still match
}
// Path matching
@@ -137,6 +175,17 @@ impl RouteManager {
}
}
// Protocol matching
if let Some(ref required_protocol) = rm.protocol {
if let Some(protocol) = ctx.protocol {
if required_protocol != protocol {
return false;
}
}
// If protocol not yet known (None), allow match — protocol will be
// validated after detection (post-TLS-termination peek)
}
true
}
@@ -272,11 +321,13 @@ mod tests {
id: None,
route_match: RouteMatch {
ports: PortRange::Single(port),
transport: None,
domains: domain.map(|d| DomainSpec::Single(d.to_string())),
path: None,
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -290,6 +341,7 @@ mod tests {
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
}]),
tls: None,
@@ -300,6 +352,7 @@ mod tests {
forwarding_engine: None,
nftables: None,
send_proxy_protocol: None,
udp: None,
},
headers: None,
security: None,
@@ -327,6 +380,8 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
let result = manager.find_route(&ctx);
@@ -349,6 +404,8 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
let result = manager.find_route(&ctx).unwrap();
@@ -372,6 +429,8 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_none());
@@ -457,6 +516,122 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_tls_no_sni_rejects_domain_restricted_route() {
let routes = vec![make_route(443, Some("example.com"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI should NOT match a domain-restricted route
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_tls_no_sni_rejects_wildcard_subdomain_route() {
let routes = vec![make_route(443, Some("*.example.com"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI should NOT match *.example.com
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_tls_no_sni_matches_wildcard_only_route() {
let routes = vec![make_route(443, Some("*"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI SHOULD match a wildcard-only route
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_tls_no_sni_skips_domain_restricted_matches_fallback() {
// Two routes: first is domain-restricted, second is wildcard catch-all
let routes = vec![
make_route(443, Some("specific.com"), 10),
make_route(443, Some("*"), 0),
];
let manager = RouteManager::new(routes);
// TLS without SNI should skip specific.com and fall through to wildcard
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
transport: None,
};
let result = manager.find_route(&ctx);
assert!(result.is_some());
let matched_domains = result.unwrap().route.route_match.domains.as_ref()
.map(|d| d.to_vec()).unwrap();
assert!(matched_domains.contains(&"*"));
}
#[test]
fn test_non_tls_no_domain_still_matches_domain_restricted() {
// Non-TLS (plain HTTP) without domain should still match domain-restricted routes
// (the HTTP proxy layer handles Host-based routing)
let routes = vec![make_route(80, Some("example.com"), 0)];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
@@ -475,6 +650,8 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
@@ -499,6 +676,7 @@ mod tests {
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: Some(10),
},
RouteTarget {
@@ -511,6 +689,7 @@ mod tests {
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
},
]);
@@ -525,6 +704,8 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
let result = manager.find_route(&ctx).unwrap();
assert_eq!(result.target.unwrap().host.first(), "api-backend");
@@ -538,8 +719,282 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
let result = manager.find_route(&ctx).unwrap();
assert_eq!(result.target.unwrap().host.first(), "default-backend");
}
fn make_route_with_protocol(port: u16, domain: Option<&str>, protocol: Option<&str>) -> RouteConfig {
let mut route = make_route(port, domain, 0);
route.route_match.protocol = protocol.map(|s| s.to_string());
route
}
#[test]
fn test_protocol_http_matches_http() {
let routes = vec![make_route_with_protocol(80, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("http"),
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_protocol_http_rejects_tcp() {
let routes = vec![make_route_with_protocol(80, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("tcp"),
transport: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_protocol_none_matches_any() {
// Route with no protocol restriction matches any protocol
let routes = vec![make_route_with_protocol(80, None, None)];
let manager = RouteManager::new(routes);
let ctx_http = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("http"),
transport: None,
};
assert!(manager.find_route(&ctx_http).is_some());
let ctx_tcp = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("tcp"),
transport: None,
};
assert!(manager.find_route(&ctx_tcp).is_some());
}
#[test]
fn test_protocol_http_matches_when_unknown() {
// Route with protocol: "http" should match when ctx.protocol is None
// (pre-TLS-termination, protocol not yet known)
let routes = vec![make_route_with_protocol(443, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_some());
}
// ===== Transport filtering tests =====
fn make_route_with_transport(port: u16, transport: Option<TransportProtocol>) -> RouteConfig {
let mut route = make_route(port, None, 0);
route.route_match.transport = transport;
route
}
#[test]
fn test_transport_udp_route_matches_udp_context() {
let routes = vec![make_route_with_transport(53, Some(TransportProtocol::Udp))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 53,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_transport_udp_route_rejects_tcp_context() {
let routes = vec![make_route_with_transport(53, Some(TransportProtocol::Udp))];
let manager = RouteManager::new(routes);
// TCP context (transport: None = TCP)
let ctx = MatchContext {
port: 53,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_transport_tcp_route_rejects_udp_context() {
let routes = vec![make_route_with_transport(80, Some(TransportProtocol::Tcp))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_transport_all_matches_both() {
let routes = vec![make_route_with_transport(443, Some(TransportProtocol::All))];
let manager = RouteManager::new(routes);
// TCP context
let tcp_ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&tcp_ctx).is_some());
// UDP context
let udp_ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
assert!(manager.find_route(&udp_ctx).is_some());
}
#[test]
fn test_transport_none_default_matches_tcp_only() {
// Route with no transport field = TCP only (backward compat)
let routes = vec![make_route_with_transport(80, None)];
let manager = RouteManager::new(routes);
// TCP context should match
let tcp_ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
assert!(manager.find_route(&tcp_ctx).is_some());
// UDP context should NOT match
let udp_ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
assert!(manager.find_route(&udp_ctx).is_none());
}
#[test]
fn test_transport_mixed_routes_same_port() {
// TCP and UDP routes on the same port — each matches only its transport
let mut tcp_route = make_route_with_transport(443, Some(TransportProtocol::Tcp));
tcp_route.name = Some("tcp-route".to_string());
let mut udp_route = make_route_with_transport(443, Some(TransportProtocol::Udp));
udp_route.name = Some("udp-route".to_string());
let manager = RouteManager::new(vec![tcp_route, udp_route]);
let tcp_ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
transport: None,
};
let result = manager.find_route(&tcp_ctx).unwrap();
assert_eq!(result.route.name.as_deref(), Some("tcp-route"));
let udp_ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("udp"),
transport: Some(TransportProtocol::Udp),
};
let result = manager.find_route(&udp_ctx).unwrap();
assert_eq!(result.route.name.as_deref(), Some("udp-route"));
}
}

View File

@@ -32,6 +32,7 @@ arc-swap = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
rustls = { workspace = true }
rustls-pemfile = { workspace = true }
tokio-rustls = { workspace = true }
tokio-util = { workspace = true }
dashmap = { workspace = true }
@@ -39,6 +40,7 @@ hyper = { workspace = true }
hyper-util = { workspace = true }
http-body-util = { workspace = true }
bytes = { workspace = true }
mimalloc = { workspace = true }
[dev-dependencies]
rcgen = { workspace = true }

View File

@@ -27,7 +27,7 @@
pub mod challenge_server;
pub mod management;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::Instant;
@@ -47,10 +47,11 @@ pub use rustproxy_security;
use rustproxy_config::{RouteConfig, RustProxyOptions, TlsMode, CertificateSpec, ForwardingEngine};
use rustproxy_routing::RouteManager;
use rustproxy_passthrough::{TcpListenerManager, TlsCertConfig, ConnectionConfig};
use rustproxy_passthrough::{TcpListenerManager, UdpListenerManager, TlsCertConfig, ConnectionConfig};
use rustproxy_metrics::{MetricsCollector, Metrics, Statistics};
use rustproxy_tls::{CertManager, CertStore, CertBundle, CertMetadata, CertSource};
use rustproxy_nftables::{NftManager, rule_builder};
use tokio_util::sync::CancellationToken;
/// Certificate status.
#[derive(Debug, Clone)]
@@ -67,6 +68,7 @@ pub struct RustProxy {
options: RustProxyOptions,
route_table: ArcSwap<RouteManager>,
listener_manager: Option<TcpListenerManager>,
udp_listener_manager: Option<UdpListenerManager>,
metrics: Arc<MetricsCollector>,
cert_manager: Option<Arc<tokio::sync::Mutex<CertManager>>>,
challenge_server: Option<challenge_server::ChallengeServer>,
@@ -77,6 +79,10 @@ pub struct RustProxy {
started_at: Option<Instant>,
/// Shared path to a Unix domain socket for relaying socket-handler connections back to TypeScript.
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
/// Dynamically loaded certificates (via loadCertificate IPC), independent of CertManager.
loaded_certs: HashMap<String, TlsCertConfig>,
/// Cancellation token for cooperative shutdown of background tasks.
cancel_token: CancellationToken,
}
impl RustProxy {
@@ -109,6 +115,7 @@ impl RustProxy {
options,
route_table: ArcSwap::from(Arc::new(route_manager)),
listener_manager: None,
udp_listener_manager: None,
metrics: Arc::new(MetricsCollector::with_retention(retention)),
cert_manager,
challenge_server: None,
@@ -118,6 +125,8 @@ impl RustProxy {
started: false,
started_at: None,
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
loaded_certs: HashMap::new(),
cancel_token: CancellationToken::new(),
})
}
@@ -146,6 +155,7 @@ impl RustProxy {
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
}
]);
@@ -214,6 +224,13 @@ impl RustProxy {
extended_keep_alive_lifetime_ms: options.extended_keep_alive_lifetime,
accept_proxy_protocol: options.accept_proxy_protocol.unwrap_or(false),
send_proxy_protocol: options.send_proxy_protocol.unwrap_or(false),
proxy_ips: options.proxy_ips.as_deref().unwrap_or(&[])
.iter()
.filter_map(|s| s.parse::<std::net::IpAddr>().ok())
.collect(),
keep_alive: options.keep_alive.unwrap_or(true),
keep_alive_initial_delay_ms: options.keep_alive_initial_delay.unwrap_or(60_000),
max_connections: options.max_connections.unwrap_or(100_000),
}
}
@@ -268,32 +285,92 @@ impl RustProxy {
}
}
// Merge dynamically loaded certs (from loadCertificate IPC)
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
// Build QUIC TLS config before set_tls_configs consumes the map
let quic_tls_config = Self::build_quic_tls_config(&tls_configs);
if !tls_configs.is_empty() {
debug!("Loaded TLS certificates for {} domains", tls_configs.len());
listener.set_tls_configs(tls_configs);
}
// Bind all ports
for port in &ports {
// Determine which ports need TCP vs UDP based on route transport config
let mut tcp_ports = std::collections::HashSet::new();
let mut udp_ports = std::collections::HashSet::new();
for route in &self.options.routes {
if !route.is_enabled() { continue; }
let transport = route.route_match.transport.as_ref();
let route_ports = route.route_match.ports.to_ports();
for port in route_ports {
match transport {
Some(rustproxy_config::TransportProtocol::Udp) => {
udp_ports.insert(port);
}
Some(rustproxy_config::TransportProtocol::All) => {
tcp_ports.insert(port);
udp_ports.insert(port);
}
Some(rustproxy_config::TransportProtocol::Tcp) | None => {
tcp_ports.insert(port);
}
}
}
}
// Bind TCP ports
for port in &tcp_ports {
listener.add_port(*port).await?;
}
self.listener_manager = Some(listener);
// Bind UDP ports (if any)
if !udp_ports.is_empty() {
let conn_tracker = self.listener_manager.as_ref().unwrap().conn_tracker().clone();
let mut udp_mgr = UdpListenerManager::new(
Arc::clone(&*self.route_table.load()),
Arc::clone(&self.metrics),
conn_tracker,
self.cancel_token.clone(),
);
for port in &udp_ports {
udp_mgr.add_port_with_tls(*port, quic_tls_config.clone()).await?;
}
info!("UDP listeners started on {} ports: {:?}",
udp_ports.len(), udp_mgr.listening_ports());
self.udp_listener_manager = Some(udp_mgr);
}
self.started = true;
self.started_at = Some(Instant::now());
// Start the throughput sampling task
// Start the throughput sampling task with cooperative cancellation
let metrics = Arc::clone(&self.metrics);
let conn_tracker = self.listener_manager.as_ref().unwrap().conn_tracker().clone();
let interval_ms = self.options.metrics.as_ref()
.and_then(|m| m.sample_interval_ms)
.unwrap_or(1000);
let sampling_cancel = self.cancel_token.clone();
self.sampling_handle = Some(tokio::spawn(async move {
let mut interval = tokio::time::interval(
std::time::Duration::from_millis(interval_ms)
);
loop {
interval.tick().await;
metrics.sample_all();
tokio::select! {
_ = sampling_cancel.cancelled() => break,
_ = interval.tick() => {
metrics.sample_all();
// Periodically clean up stale rate-limit timestamp entries
conn_tracker.cleanup_stale_timestamps();
}
}
}
}));
@@ -440,51 +517,59 @@ impl RustProxy {
.unwrap_or(80);
let interval = std::time::Duration::from_secs(check_interval_hours as u64 * 3600);
let renewal_cancel = self.cancel_token.clone();
let handle = tokio::spawn(async move {
loop {
tokio::time::sleep(interval).await;
debug!("Certificate renewal check triggered (interval: {}h)", check_interval_hours);
tokio::select! {
_ = renewal_cancel.cancelled() => {
debug!("Renewal timer shutting down");
break;
}
_ = tokio::time::sleep(interval) => {
debug!("Certificate renewal check triggered (interval: {}h)", check_interval_hours);
// Check which domains need renewal
let domains = {
let cm = cm_arc.lock().await;
cm.check_renewals()
};
// Check which domains need renewal
let domains = {
let cm = cm_arc.lock().await;
cm.check_renewals()
};
if domains.is_empty() {
debug!("No certificates need renewal");
continue;
}
info!("Renewing {} certificate(s)", domains.len());
// Start challenge server for renewals
let mut cs = challenge_server::ChallengeServer::new();
if let Err(e) = cs.start(acme_port).await {
error!("Failed to start challenge server for renewal: {}", e);
continue;
}
for domain in &domains {
let cs_ref = &cs;
let mut cm = cm_arc.lock().await;
let result = cm.renew_domain(domain, |token, key_auth| {
cs_ref.set_challenge(token, key_auth);
async {}
}).await;
match result {
Ok(_bundle) => {
info!("Successfully renewed certificate for {}", domain);
if domains.is_empty() {
debug!("No certificates need renewal");
continue;
}
Err(e) => {
error!("Failed to renew certificate for {}: {}", domain, e);
info!("Renewing {} certificate(s)", domains.len());
// Start challenge server for renewals
let mut cs = challenge_server::ChallengeServer::new();
if let Err(e) = cs.start(acme_port).await {
error!("Failed to start challenge server for renewal: {}", e);
continue;
}
for domain in &domains {
let cs_ref = &cs;
let mut cm = cm_arc.lock().await;
let result = cm.renew_domain(domain, |token, key_auth| {
cs_ref.set_challenge(token, key_auth);
async {}
}).await;
match result {
Ok(_bundle) => {
info!("Successfully renewed certificate for {}", domain);
}
Err(e) => {
error!("Failed to renew certificate for {}: {}", domain, e);
}
}
}
cs.stop().await;
}
}
cs.stop().await;
}
});
@@ -499,14 +584,17 @@ impl RustProxy {
info!("Stopping RustProxy...");
// Stop sampling task
// Signal all background tasks to stop cooperatively
self.cancel_token.cancel();
// Await sampling task (cooperative shutdown)
if let Some(handle) = self.sampling_handle.take() {
handle.abort();
let _ = handle.await;
}
// Stop renewal timer
// Await renewal timer (cooperative shutdown)
if let Some(handle) = self.renewal_handle.take() {
handle.abort();
let _ = handle.await;
}
// Stop challenge server if running
@@ -527,7 +615,16 @@ impl RustProxy {
listener.graceful_stop().await;
}
self.listener_manager = None;
// Stop UDP listeners
if let Some(ref mut udp_mgr) = self.udp_listener_manager {
udp_mgr.stop().await;
}
self.udp_listener_manager = None;
self.started = false;
// Reset cancel token so proxy can be restarted
self.cancel_token = CancellationToken::new();
info!("RustProxy stopped");
Ok(())
@@ -555,15 +652,48 @@ impl RustProxy {
vec![]
};
// Prune per-route metrics for route IDs that no longer exist
let active_route_ids: HashSet<String> = routes.iter()
.filter_map(|r| r.id.clone())
.collect();
self.metrics.retain_routes(&active_route_ids);
// Prune per-backend metrics for backends no longer in any route target.
// For PortSpec::Preserve routes, expand across all listening ports since
// the actual runtime port depends on the incoming connection.
let listening_ports = self.get_listening_ports();
let active_backends: HashSet<String> = routes.iter()
.filter_map(|r| r.action.targets.as_ref())
.flat_map(|targets| targets.iter())
.flat_map(|target| {
let hosts: Vec<String> = target.host.to_vec().into_iter().map(|s| s.to_string()).collect();
match &target.port {
rustproxy_config::PortSpec::Fixed(p) => {
hosts.into_iter().map(|h| format!("{}:{}", h, p)).collect::<Vec<_>>()
}
_ => {
// Preserve/special: expand across all listening ports
let lp = &listening_ports;
hosts.into_iter()
.flat_map(|h| lp.iter().map(move |p| format!("{}:{}", h, *p)))
.collect::<Vec<_>>()
}
}
})
.collect();
self.metrics.retain_backends(&active_backends);
// Atomically swap the route table
let new_manager = Arc::new(new_manager);
self.route_table.store(Arc::clone(&new_manager));
// Update listener manager
// Update listener manager.
// IMPORTANT: TLS configs must be swapped BEFORE the route manager so that
// new routes only become visible after their certs are loaded. The reverse
// order (routes first) creates a window where connections match new routes
// but get the old TLS acceptor, causing cert mismatches.
if let Some(ref mut listener) = self.listener_manager {
listener.update_route_manager(Arc::clone(&new_manager));
// Update TLS configs
// 1. Update TLS configs first (so new certs are available before new routes)
let mut tls_configs = Self::extract_tls_configs(&routes);
if let Some(ref cm_arc) = self.cert_manager {
let cm = cm_arc.lock().await;
@@ -576,8 +706,21 @@ impl RustProxy {
}
}
}
// Merge dynamically loaded certs (from loadCertificate IPC)
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
listener.set_tls_configs(tls_configs);
// 2. Now swap the route manager (new routes become visible with certs already loaded)
listener.update_route_manager(Arc::clone(&new_manager));
// Cancel connections on routes that were removed or disabled
listener.invalidate_removed_routes(&active_route_ids);
// Prune HTTP proxy caches (rate limiters, regex cache, round-robin counters)
listener.prune_http_proxy_caches(&active_route_ids);
// Add new ports
for port in &new_ports {
if !old_ports.contains(port) {
@@ -593,6 +736,67 @@ impl RustProxy {
}
}
// Reconcile UDP ports
{
let mut new_udp_ports = HashSet::new();
for route in &routes {
if !route.is_enabled() { continue; }
let transport = route.route_match.transport.as_ref();
match transport {
Some(rustproxy_config::TransportProtocol::Udp) |
Some(rustproxy_config::TransportProtocol::All) => {
for port in route.route_match.ports.to_ports() {
new_udp_ports.insert(port);
}
}
_ => {}
}
}
let old_udp_ports: HashSet<u16> = self.udp_listener_manager
.as_ref()
.map(|u| u.listening_ports().into_iter().collect())
.unwrap_or_default();
if !new_udp_ports.is_empty() {
// Ensure UDP manager exists
if self.udp_listener_manager.is_none() {
if let Some(ref listener) = self.listener_manager {
let conn_tracker = listener.conn_tracker().clone();
self.udp_listener_manager = Some(UdpListenerManager::new(
Arc::clone(&new_manager),
Arc::clone(&self.metrics),
conn_tracker,
self.cancel_token.clone(),
));
}
}
if let Some(ref mut udp_mgr) = self.udp_listener_manager {
udp_mgr.update_routes(Arc::clone(&new_manager));
// Add new UDP ports
for port in &new_udp_ports {
if !old_udp_ports.contains(port) {
udp_mgr.add_port(*port).await?;
}
}
// Remove old UDP ports
for port in &old_udp_ports {
if !new_udp_ports.contains(port) {
udp_mgr.remove_port(*port);
}
}
}
} else if self.udp_listener_manager.is_some() {
// All UDP routes removed — shut down UDP manager
if let Some(ref mut udp_mgr) = self.udp_listener_manager {
udp_mgr.stop().await;
}
self.udp_listener_manager = None;
}
}
// Update NFTables rules: remove old, apply new
self.update_nftables_rules(&routes).await;
@@ -752,6 +956,65 @@ impl RustProxy {
self.socket_handler_relay.read().unwrap().clone()
}
/// Build a rustls ServerConfig suitable for QUIC (TLS 1.3 only, h3 ALPN).
/// Uses the first available cert from tls_configs, or returns None if no certs available.
fn build_quic_tls_config(
tls_configs: &HashMap<String, TlsCertConfig>,
) -> Option<Arc<rustls::ServerConfig>> {
// Find the first available cert (prefer wildcard, then any)
let cert_config = tls_configs.get("*")
.or_else(|| tls_configs.values().next());
let cert_config = match cert_config {
Some(c) => c,
None => return None,
};
// Parse cert chain from PEM
let mut cert_reader = std::io::BufReader::new(cert_config.cert_pem.as_bytes());
let certs: Vec<rustls::pki_types::CertificateDer<'static>> =
rustls_pemfile::certs(&mut cert_reader)
.filter_map(|r| r.ok())
.collect();
if certs.is_empty() {
return None;
}
// Parse private key from PEM
let mut key_reader = std::io::BufReader::new(cert_config.key_pem.as_bytes());
let key = match rustls_pemfile::private_key(&mut key_reader) {
Ok(Some(key)) => key,
_ => return None,
};
let mut tls_config = match rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)
{
Ok(c) => c,
Err(e) => {
warn!("Failed to build QUIC TLS config: {}", e);
return None;
}
};
// QUIC requires h3 ALPN
tls_config.alpn_protocols = vec![b"h3".to_vec()];
Some(Arc::new(tls_config))
}
/// Set the Unix domain socket path for relaying UDP datagrams to TypeScript datagramHandler callbacks.
pub async fn set_datagram_handler_relay_path(&mut self, path: Option<String>) {
info!("Datagram handler relay path set to: {:?}", path);
if let Some(ref mut udp_mgr) = self.udp_listener_manager {
if let Some(ref p) = path {
udp_mgr.set_datagram_handler_relay(p.clone()).await;
}
}
}
/// Load a certificate for a domain and hot-swap the TLS configuration.
pub async fn load_certificate(
&mut self,
@@ -786,6 +1049,12 @@ impl RustProxy {
cm.load_static(domain.to_string(), bundle);
}
// Persist in loaded_certs so future rebuild calls include this cert
self.loaded_certs.insert(domain.to_string(), TlsCertConfig {
cert_pem: cert_pem.clone(),
key_pem: key_pem.clone(),
});
// Hot-swap TLS config on the listener
if let Some(ref mut listener) = self.listener_manager {
let mut tls_configs = Self::extract_tls_configs(&self.options.routes);
@@ -809,6 +1078,13 @@ impl RustProxy {
}
}
// Merge dynamically loaded certs from previous loadCertificate calls
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
listener.set_tls_configs(tls_configs);
}
@@ -941,3 +1217,21 @@ impl RustProxy {
configs
}
}
/// Safety net: abort background tasks if RustProxy is dropped without calling stop().
/// Normal shutdown should still use stop() for graceful behavior.
impl Drop for RustProxy {
fn drop(&mut self) {
self.cancel_token.cancel();
if let Some(handle) = self.sampling_handle.take() {
handle.abort();
}
if let Some(handle) = self.renewal_handle.take() {
handle.abort();
}
// Cancel the listener manager's token and abort accept loops
if let Some(ref mut listener) = self.listener_manager {
listener.stop_all();
}
}
}

View File

@@ -1,3 +1,6 @@
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
use clap::Parser;
use tracing_subscriber::EnvFilter;
use anyhow::Result;

View File

@@ -149,6 +149,7 @@ async fn handle_request(
"getListeningPorts" => handle_get_listening_ports(&id, proxy),
"getNftablesStatus" => handle_get_nftables_status(&id, proxy).await,
"setSocketHandlerRelay" => handle_set_socket_handler_relay(&id, &request.params, proxy).await,
"setDatagramHandlerRelay" => handle_set_datagram_handler_relay(&id, &request.params, proxy).await,
"addListeningPort" => handle_add_listening_port(&id, &request.params, proxy).await,
"removeListeningPort" => handle_remove_listening_port(&id, &request.params, proxy).await,
"loadCertificate" => handle_load_certificate(&id, &request.params, proxy).await,
@@ -391,6 +392,26 @@ async fn handle_set_socket_handler_relay(
ManagementResponse::ok(id.to_string(), serde_json::json!({}))
}
async fn handle_set_datagram_handler_relay(
id: &str,
params: &serde_json::Value,
proxy: &mut Option<RustProxy>,
) -> ManagementResponse {
let p = match proxy.as_mut() {
Some(p) => p,
None => return ManagementResponse::err(id.to_string(), "Proxy is not running".to_string()),
};
let socket_path = params.get("socketPath")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
info!("setDatagramHandlerRelay: socket_path={:?}", socket_path);
p.set_datagram_handler_relay_path(socket_path).await;
ManagementResponse::ok(id.to_string(), serde_json::json!({}))
}
async fn handle_add_listening_port(
id: &str,
params: &serde_json::Value,

View File

@@ -185,6 +185,79 @@ pub async fn wait_for_port(port: u16, timeout_ms: u64) -> bool {
false
}
/// Start a TLS HTTP echo backend: accepts TLS, then responds with HTTP JSON
/// containing request details. Combines TLS acceptance with HTTP echo behavior.
pub async fn start_tls_http_backend(
port: u16,
backend_name: &str,
cert_pem: &str,
key_pem: &str,
) -> JoinHandle<()> {
use std::sync::Arc;
// Use h1-only acceptor: test backends speak raw HTTP/1.1 text,
// so they must NOT advertise h2 via ALPN (which would cause
// auto-detect to attempt h2 binary framing and fail).
let acceptor = rustproxy_passthrough::build_tls_acceptor_h1_only(cert_pem, key_pem)
.expect("Failed to build TLS acceptor");
let acceptor = Arc::new(acceptor);
let name = backend_name.to_string();
let listener = TcpListener::bind(format!("127.0.0.1:{}", port))
.await
.unwrap_or_else(|_| panic!("Failed to bind TLS HTTP backend on port {}", port));
tokio::spawn(async move {
loop {
let (stream, _) = match listener.accept().await {
Ok(conn) => conn,
Err(_) => break,
};
let acc = acceptor.clone();
let backend = name.clone();
tokio::spawn(async move {
let mut tls_stream = match acc.accept(stream).await {
Ok(s) => s,
Err(_) => return,
};
let mut buf = vec![0u8; 16384];
let n = match tls_stream.read(&mut buf).await {
Ok(0) | Err(_) => return,
Ok(n) => n,
};
let req_str = String::from_utf8_lossy(&buf[..n]);
// Parse first line: METHOD PATH HTTP/x.x
let first_line = req_str.lines().next().unwrap_or("");
let parts: Vec<&str> = first_line.split_whitespace().collect();
let method = parts.first().copied().unwrap_or("UNKNOWN");
let path = parts.get(1).copied().unwrap_or("/");
// Extract Host header
let host = req_str
.lines()
.find(|l| l.to_lowercase().starts_with("host:"))
.map(|l| l[5..].trim())
.unwrap_or("unknown");
let body = format!(
r#"{{"method":"{}","path":"{}","host":"{}","backend":"{}"}}"#,
method, path, host, backend
);
let response = format!(
"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
body.len(),
body,
);
let _ = tls_stream.write_all(response.as_bytes()).await;
let _ = tls_stream.shutdown().await;
});
}
})
}
/// Helper to create a minimal route config for testing.
pub fn make_test_route(
port: u16,
@@ -196,11 +269,13 @@ pub fn make_test_route(
id: None,
route_match: rustproxy_config::RouteMatch {
ports: rustproxy_config::PortRange::Single(port),
transport: None,
domains: domain.map(|d| rustproxy_config::DomainSpec::Single(d.to_string())),
path: None,
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: rustproxy_config::RouteAction {
action_type: rustproxy_config::RouteActionType::Forward,
@@ -214,6 +289,7 @@ pub fn make_test_route(
send_proxy_protocol: None,
headers: None,
advanced: None,
backend_transport: None,
priority: None,
}]),
tls: None,
@@ -224,6 +300,7 @@ pub fn make_test_route(
forwarding_engine: None,
nftables: None,
send_proxy_protocol: None,
udp: None,
},
headers: None,
security: None,
@@ -381,6 +458,86 @@ pub fn make_tls_terminate_route(
route
}
/// Start a TLS WebSocket echo backend: accepts TLS, performs WS handshake, then echoes data.
/// Combines TLS acceptance (like `start_tls_http_backend`) with WebSocket echo (like `start_ws_echo_backend`).
pub async fn start_tls_ws_echo_backend(
port: u16,
cert_pem: &str,
key_pem: &str,
) -> JoinHandle<()> {
use std::sync::Arc;
let acceptor = rustproxy_passthrough::build_tls_acceptor(cert_pem, key_pem)
.expect("Failed to build TLS acceptor");
let acceptor = Arc::new(acceptor);
let listener = TcpListener::bind(format!("127.0.0.1:{}", port))
.await
.unwrap_or_else(|_| panic!("Failed to bind TLS WS echo backend on port {}", port));
tokio::spawn(async move {
loop {
let (stream, _) = match listener.accept().await {
Ok(conn) => conn,
Err(_) => break,
};
let acc = acceptor.clone();
tokio::spawn(async move {
let mut tls_stream = match acc.accept(stream).await {
Ok(s) => s,
Err(_) => return,
};
// Read the HTTP upgrade request
let mut buf = vec![0u8; 4096];
let n = match tls_stream.read(&mut buf).await {
Ok(0) | Err(_) => return,
Ok(n) => n,
};
let req_str = String::from_utf8_lossy(&buf[..n]);
// Extract Sec-WebSocket-Key for handshake
let ws_key = req_str
.lines()
.find(|l| l.to_lowercase().starts_with("sec-websocket-key:"))
.map(|l| l.split(':').nth(1).unwrap_or("").trim().to_string())
.unwrap_or_default();
// Send 101 Switching Protocols
let accept_response = format!(
"HTTP/1.1 101 Switching Protocols\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Accept: {}\r\n\
\r\n",
ws_key
);
if tls_stream
.write_all(accept_response.as_bytes())
.await
.is_err()
{
return;
}
// Echo all data back (raw TCP after upgrade)
let mut echo_buf = vec![0u8; 65536];
loop {
let n = match tls_stream.read(&mut echo_buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
};
if tls_stream.write_all(&echo_buf[..n]).await.is_err() {
break;
}
}
});
}
})
}
/// Helper to create a TLS passthrough route for testing.
pub fn make_tls_passthrough_route(
port: u16,

View File

@@ -407,6 +407,305 @@ async fn test_websocket_through_proxy() {
proxy.stop().await.unwrap();
}
/// Test that terminate-and-reencrypt mode routes HTTP traffic through the
/// full HTTP proxy with per-request Host-based routing.
///
/// This verifies the new behavior: after TLS termination, HTTP data is detected
/// and routed through HttpProxyService (like nginx) instead of being blindly tunneled.
#[tokio::test]
async fn test_terminate_and_reencrypt_http_routing() {
let backend1_port = next_port();
let backend2_port = next_port();
let proxy_port = next_port();
let (cert1, key1) = generate_self_signed_cert("alpha.example.com");
let (cert2, key2) = generate_self_signed_cert("beta.example.com");
// Generate separate backend certs (backends are independent TLS servers)
let (backend_cert1, backend_key1) = generate_self_signed_cert("localhost");
let (backend_cert2, backend_key2) = generate_self_signed_cert("localhost");
// Start TLS HTTP echo backends (proxy re-encrypts to these)
let _b1 = start_tls_http_backend(backend1_port, "alpha", &backend_cert1, &backend_key1).await;
let _b2 = start_tls_http_backend(backend2_port, "beta", &backend_cert2, &backend_key2).await;
// Create terminate-and-reencrypt routes
let mut route1 = make_tls_terminate_route(
proxy_port, "alpha.example.com", "127.0.0.1", backend1_port, &cert1, &key1,
);
route1.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let mut route2 = make_tls_terminate_route(
proxy_port, "beta.example.com", "127.0.0.1", backend2_port, &cert2, &key2,
);
route2.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let options = RustProxyOptions {
routes: vec![route1, route2],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
// Test alpha domain - HTTP request through TLS terminate-and-reencrypt
let alpha_result = with_timeout(async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector = tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name = rustls::pki_types::ServerName::try_from("alpha.example.com".to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
let request = "GET /api/data HTTP/1.1\r\nHost: alpha.example.com\r\nConnection: close\r\n\r\n";
tls_stream.write_all(request.as_bytes()).await.unwrap();
let mut response = Vec::new();
tls_stream.read_to_end(&mut response).await.unwrap();
String::from_utf8_lossy(&response).to_string()
}, 10)
.await
.unwrap();
let alpha_body = extract_body(&alpha_result);
assert!(
alpha_body.contains(r#""backend":"alpha"#),
"Expected alpha backend, got: {}",
alpha_body
);
assert!(
alpha_body.contains(r#""method":"GET"#),
"Expected GET method, got: {}",
alpha_body
);
assert!(
alpha_body.contains(r#""path":"/api/data"#),
"Expected /api/data path, got: {}",
alpha_body
);
// Verify original Host header is preserved (not replaced with backend IP:port)
assert!(
alpha_body.contains(r#""host":"alpha.example.com"#),
"Expected original Host header alpha.example.com, got: {}",
alpha_body
);
// Test beta domain - different host goes to different backend
let beta_result = with_timeout(async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector = tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name = rustls::pki_types::ServerName::try_from("beta.example.com".to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
let request = "GET /other HTTP/1.1\r\nHost: beta.example.com\r\nConnection: close\r\n\r\n";
tls_stream.write_all(request.as_bytes()).await.unwrap();
let mut response = Vec::new();
tls_stream.read_to_end(&mut response).await.unwrap();
String::from_utf8_lossy(&response).to_string()
}, 10)
.await
.unwrap();
let beta_body = extract_body(&beta_result);
assert!(
beta_body.contains(r#""backend":"beta"#),
"Expected beta backend, got: {}",
beta_body
);
assert!(
beta_body.contains(r#""path":"/other"#),
"Expected /other path, got: {}",
beta_body
);
// Verify original Host header is preserved for beta too
assert!(
beta_body.contains(r#""host":"beta.example.com"#),
"Expected original Host header beta.example.com, got: {}",
beta_body
);
proxy.stop().await.unwrap();
}
/// Test that WebSocket upgrade works through terminate-and-reencrypt mode.
///
/// Verifies the full chain: client→TLS→proxy terminates→re-encrypts→TLS→backend WebSocket.
/// The proxy's `handle_websocket_upgrade` checks `upstream.use_tls` and calls
/// `connect_tls_backend()` when true. This test covers that path.
#[tokio::test]
async fn test_terminate_and_reencrypt_websocket() {
let backend_port = next_port();
let proxy_port = next_port();
let domain = "ws.example.com";
// Frontend cert (client→proxy TLS)
let (frontend_cert, frontend_key) = generate_self_signed_cert(domain);
// Backend cert (proxy→backend TLS)
let (backend_cert, backend_key) = generate_self_signed_cert("localhost");
// Start TLS WebSocket echo backend
let _backend = start_tls_ws_echo_backend(backend_port, &backend_cert, &backend_key).await;
// Create terminate-and-reencrypt route
let mut route = make_tls_terminate_route(
proxy_port,
domain,
"127.0.0.1",
backend_port,
&frontend_cert,
&frontend_key,
);
route.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let options = RustProxyOptions {
routes: vec![route],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
let result = with_timeout(
async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector =
tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name =
rustls::pki_types::ServerName::try_from(domain.to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
// Send WebSocket upgrade request through TLS
let request = format!(
"GET /ws HTTP/1.1\r\n\
Host: {}\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n\
Sec-WebSocket-Version: 13\r\n\
\r\n",
domain
);
tls_stream.write_all(request.as_bytes()).await.unwrap();
// Read the 101 response (byte-by-byte until \r\n\r\n)
let mut response_buf = Vec::with_capacity(4096);
let mut temp = [0u8; 1];
loop {
let n = tls_stream.read(&mut temp).await.unwrap();
if n == 0 {
break;
}
response_buf.push(temp[0]);
if response_buf.len() >= 4 {
let len = response_buf.len();
if response_buf[len - 4..] == *b"\r\n\r\n" {
break;
}
}
}
let response_str = String::from_utf8_lossy(&response_buf).to_string();
assert!(
response_str.contains("101"),
"Expected 101 Switching Protocols, got: {}",
response_str
);
assert!(
response_str.to_lowercase().contains("upgrade: websocket"),
"Expected Upgrade header, got: {}",
response_str
);
// After upgrade, send data and verify echo
let test_data = b"Hello TLS WebSocket!";
tls_stream.write_all(test_data).await.unwrap();
// Read echoed data
let mut echo_buf = vec![0u8; 256];
let n = tls_stream.read(&mut echo_buf).await.unwrap();
let echoed = &echo_buf[..n];
assert_eq!(echoed, test_data, "Expected echo of sent data");
"ok".to_string()
},
10,
)
.await
.unwrap();
assert_eq!(result, "ok");
proxy.stop().await.unwrap();
}
/// Test that the protocol field on route config is accepted and processed.
#[tokio::test]
async fn test_protocol_field_in_route_config() {
let backend_port = next_port();
let proxy_port = next_port();
let _backend = start_http_echo_backend(backend_port, "main").await;
// Create a route with protocol: "http" - should only match HTTP traffic
let mut route = make_test_route(proxy_port, None, "127.0.0.1", backend_port);
route.route_match.protocol = Some("http".to_string());
let options = RustProxyOptions {
routes: vec![route],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
// HTTP request should match the route and get proxied
let result = with_timeout(async {
let response = send_http_request(proxy_port, "example.com", "GET", "/test").await;
extract_body(&response).to_string()
}, 10)
.await
.unwrap();
assert!(
result.contains(r#""backend":"main"#),
"Expected main backend, got: {}",
result
);
assert!(
result.contains(r#""path":"/test"#),
"Expected /test path, got: {}",
result
);
proxy.stop().await.unwrap();
}
/// InsecureVerifier for test TLS client connections.
#[derive(Debug)]
struct InsecureVerifier;

View File

@@ -0,0 +1,70 @@
import * as net from 'net';
/**
* Finds `count` free ports by binding to port 0 and reading the OS-assigned port.
* All servers are opened simultaneously to guarantee uniqueness.
* Returns an array of guaranteed-free ports.
*/
export async function findFreePorts(count: number): Promise<number[]> {
const servers: net.Server[] = [];
const ports: number[] = [];
// Open all servers simultaneously on port 0
await Promise.all(
Array.from({ length: count }, () =>
new Promise<void>((resolve, reject) => {
const server = net.createServer();
server.listen(0, '127.0.0.1', () => {
const addr = server.address() as net.AddressInfo;
ports.push(addr.port);
servers.push(server);
resolve();
});
server.on('error', reject);
})
)
);
// Close all servers
await Promise.all(
servers.map(
(server) => new Promise<void>((resolve) => server.close(() => resolve()))
)
);
return ports;
}
/**
* Verifies that all given ports are free (not listening).
* Useful as a cleanup assertion at the end of tests.
* Throws if any port is still in use.
*/
export async function assertPortsFree(ports: number[]): Promise<void> {
const results = await Promise.all(
ports.map(
(port) =>
new Promise<{ port: number; free: boolean }>((resolve) => {
const client = net.connect({ port, host: '127.0.0.1' });
client.on('connect', () => {
client.destroy();
resolve({ port, free: false });
});
client.on('error', () => {
resolve({ port, free: true });
});
client.setTimeout(1000, () => {
client.destroy();
resolve({ port, free: true });
});
})
)
);
const occupied = results.filter((r) => !r.free);
if (occupied.length > 0) {
throw new Error(
`Ports still in use after cleanup: ${occupied.map((r) => r.port).join(', ')}`
);
}
}

View File

@@ -1,9 +1,12 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import { SmartProxy, SocketHandlers } from '../ts/index.js';
import * as net from 'net';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test that HTTP-01 challenges are properly processed when the initial data arrives
tap.test('should correctly handle HTTP-01 challenge requests with initial data chunk', async (tapTest) => {
const [PORT] = await findFreePorts(1);
// Prepare test data
const challengeToken = 'test-acme-http01-challenge-token';
const challengeResponse = 'mock-response-for-challenge';
@@ -37,7 +40,7 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
routes: [{
name: 'acme-challenge-route',
match: {
ports: 8080,
ports: PORT,
path: '/.well-known/acme-challenge/*'
},
action: {
@@ -60,7 +63,7 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
// Connect to the proxy and send the HTTP-01 challenge request
await new Promise<void>((resolve, reject) => {
testClient.connect(8080, 'localhost', () => {
testClient.connect(PORT, 'localhost', () => {
// Send HTTP request for the challenge token
testClient.write(
`GET ${challengePath} HTTP/1.1\r\n` +
@@ -86,10 +89,13 @@ tap.test('should correctly handle HTTP-01 challenge requests with initial data c
// Cleanup
testClient.destroy();
await proxy.stop();
await assertPortsFree([PORT]);
});
// Test that non-existent challenge tokens return 404
tap.test('should return 404 for non-existent challenge tokens', async (tapTest) => {
const [PORT] = await findFreePorts(1);
// Create a socket handler that behaves like a real ACME handler
const acmeHandler = SocketHandlers.httpServer((req, res) => {
if (req.url?.startsWith('/.well-known/acme-challenge/')) {
@@ -113,7 +119,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
routes: [{
name: 'acme-challenge-route',
match: {
ports: 8081,
ports: PORT,
path: '/.well-known/acme-challenge/*'
},
action: {
@@ -135,7 +141,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
// Connect and send a request for a non-existent token
await new Promise<void>((resolve, reject) => {
testClient.connect(8081, 'localhost', () => {
testClient.connect(PORT, 'localhost', () => {
testClient.write(
'GET /.well-known/acme-challenge/invalid-token HTTP/1.1\r\n' +
'Host: test.example.com\r\n' +
@@ -157,6 +163,7 @@ tap.test('should return 404 for non-existent challenge tokens', async (tapTest)
// Cleanup
testClient.destroy();
await proxy.stop();
await assertPortsFree([PORT]);
});
export default tap.start();

View File

@@ -5,6 +5,7 @@ import * as fs from 'fs';
import * as path from 'path';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Setup test infrastructure
const testCertPath = path.join(process.cwd(), 'test', 'helpers', 'test-cert.pem');
@@ -13,8 +14,14 @@ const testKeyPath = path.join(process.cwd(), 'test', 'helpers', 'test-key.pem');
let testServer: net.Server;
let tlsTestServer: tls.Server;
let smartProxy: SmartProxy;
let PROXY_TCP_PORT: number;
let PROXY_TLS_PORT: number;
let TCP_SERVER_PORT: number;
let TLS_SERVER_PORT: number;
tap.test('setup test servers', async () => {
[PROXY_TCP_PORT, PROXY_TLS_PORT, TCP_SERVER_PORT, TLS_SERVER_PORT] = await findFreePorts(4);
// Create TCP test server
testServer = net.createServer((socket) => {
socket.write('Connected to TCP test server\n');
@@ -24,8 +31,8 @@ tap.test('setup test servers', async () => {
});
await new Promise<void>((resolve) => {
testServer.listen(7001, '127.0.0.1', () => {
console.log('TCP test server listening on port 7001');
testServer.listen(TCP_SERVER_PORT, '127.0.0.1', () => {
console.log(`TCP test server listening on port ${TCP_SERVER_PORT}`);
resolve();
});
});
@@ -45,8 +52,8 @@ tap.test('setup test servers', async () => {
);
await new Promise<void>((resolve) => {
tlsTestServer.listen(7002, '127.0.0.1', () => {
console.log('TLS test server listening on port 7002');
tlsTestServer.listen(TLS_SERVER_PORT, '127.0.0.1', () => {
console.log(`TLS test server listening on port ${TLS_SERVER_PORT}`);
resolve();
});
});
@@ -60,13 +67,13 @@ tap.test('should forward TCP connections correctly', async () => {
{
name: 'tcp-forward',
match: {
ports: 8080,
ports: PROXY_TCP_PORT,
},
action: {
type: 'forward',
targets: [{
host: '127.0.0.1',
port: 7001,
port: TCP_SERVER_PORT,
}],
},
},
@@ -77,7 +84,7 @@ tap.test('should forward TCP connections correctly', async () => {
// Test TCP forwarding
const client = await new Promise<net.Socket>((resolve, reject) => {
const socket = net.connect(8080, '127.0.0.1', () => {
const socket = net.connect(PROXY_TCP_PORT, '127.0.0.1', () => {
console.log('Connected to proxy');
resolve(socket);
});
@@ -106,7 +113,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
{
name: 'tls-passthrough',
match: {
ports: 8443,
ports: PROXY_TLS_PORT,
domains: 'test.example.com',
},
action: {
@@ -116,7 +123,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
},
targets: [{
host: '127.0.0.1',
port: 7002,
port: TLS_SERVER_PORT,
}],
},
},
@@ -129,7 +136,7 @@ tap.test('should handle TLS passthrough correctly', async () => {
const client = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect(
{
port: 8443,
port: PROXY_TLS_PORT,
host: '127.0.0.1',
servername: 'test.example.com',
rejectUnauthorized: false,
@@ -164,7 +171,7 @@ tap.test('should handle SNI-based forwarding', async () => {
{
name: 'domain-a',
match: {
ports: 8443,
ports: PROXY_TLS_PORT,
domains: 'a.example.com',
},
action: {
@@ -174,14 +181,14 @@ tap.test('should handle SNI-based forwarding', async () => {
},
targets: [{
host: '127.0.0.1',
port: 7002,
port: TLS_SERVER_PORT,
}],
},
},
{
name: 'domain-b',
match: {
ports: 8443,
ports: PROXY_TLS_PORT,
domains: 'b.example.com',
},
action: {
@@ -191,7 +198,7 @@ tap.test('should handle SNI-based forwarding', async () => {
},
targets: [{
host: '127.0.0.1',
port: 7002,
port: TLS_SERVER_PORT,
}],
},
},
@@ -204,7 +211,7 @@ tap.test('should handle SNI-based forwarding', async () => {
const clientA = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect(
{
port: 8443,
port: PROXY_TLS_PORT,
host: '127.0.0.1',
servername: 'a.example.com',
rejectUnauthorized: false,
@@ -231,7 +238,7 @@ tap.test('should handle SNI-based forwarding', async () => {
const clientB = await new Promise<tls.TLSSocket>((resolve, reject) => {
const socket = tls.connect(
{
port: 8443,
port: PROXY_TLS_PORT,
host: '127.0.0.1',
servername: 'b.example.com',
rejectUnauthorized: false,
@@ -261,6 +268,7 @@ tap.test('should handle SNI-based forwarding', async () => {
tap.test('cleanup', async () => {
testServer.close();
tlsTestServer.close();
await assertPortsFree([PROXY_TCP_PORT, PROXY_TLS_PORT, TCP_SERVER_PORT, TLS_SERVER_PORT]);
});
export default tap.start();

View File

@@ -0,0 +1,125 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as dgram from 'dgram';
import { SmartProxy } from '../ts/index.js';
import type { TDatagramHandler, IDatagramInfo } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let smartProxy: SmartProxy;
let PROXY_PORT: number;
// Helper: send a single UDP datagram and wait for a response
function sendDatagram(port: number, msg: string, timeoutMs = 5000): Promise<string> {
return new Promise((resolve, reject) => {
const client = dgram.createSocket('udp4');
const timeout = setTimeout(() => {
client.close();
reject(new Error(`UDP response timeout after ${timeoutMs}ms`));
}, timeoutMs);
client.send(Buffer.from(msg), port, '127.0.0.1');
client.on('message', (data) => {
clearTimeout(timeout);
client.close();
resolve(data.toString());
});
client.on('error', (err) => {
clearTimeout(timeout);
client.close();
reject(err);
});
});
}
tap.test('setup: start SmartProxy with datagramHandler', async () => {
[PROXY_PORT] = await findFreePorts(1);
const handler: TDatagramHandler = (datagram, info, reply) => {
reply(Buffer.from(`Handled: ${datagram.toString()}`));
};
smartProxy = new SmartProxy({
routes: [
{
name: 'dgram-handler-test',
match: {
ports: PROXY_PORT,
transport: 'udp' as const,
},
action: {
type: 'socket-handler',
datagramHandler: handler,
},
},
],
defaults: {
security: {
ipAllowList: ['127.0.0.1', '::1', '::ffff:127.0.0.1'],
},
},
});
await smartProxy.start();
});
tap.test('datagram handler: receives and replies to datagram', async () => {
const response = await sendDatagram(PROXY_PORT, 'Hello Handler');
expect(response).toEqual('Handled: Hello Handler');
});
tap.test('datagram handler: async handler works', async () => {
// Stop and restart with async handler
await smartProxy.stop();
[PROXY_PORT] = await findFreePorts(1);
const asyncHandler: TDatagramHandler = async (datagram, info, reply) => {
// Simulate async work
await new Promise<void>((resolve) => setTimeout(resolve, 10));
reply(Buffer.from(`Async: ${datagram.toString()}`));
};
smartProxy = new SmartProxy({
routes: [
{
name: 'dgram-async-handler',
match: {
ports: PROXY_PORT,
transport: 'udp' as const,
},
action: {
type: 'socket-handler',
datagramHandler: asyncHandler,
},
},
],
defaults: {
security: {
ipAllowList: ['127.0.0.1', '::1', '::ffff:127.0.0.1'],
},
},
});
await smartProxy.start();
const response = await sendDatagram(PROXY_PORT, 'Test Async');
expect(response).toEqual('Async: Test Async');
});
tap.test('datagram handler: multiple rapid datagrams', async () => {
const promises: Promise<string>[] = [];
for (let i = 0; i < 5; i++) {
promises.push(sendDatagram(PROXY_PORT, `msg-${i}`));
}
const responses = await Promise.all(promises);
for (let i = 0; i < 5; i++) {
expect(responses).toContain(`Async: msg-${i}`);
}
});
tap.test('cleanup: stop SmartProxy', async () => {
await smartProxy.stop();
await assertPortsFree([PROXY_PORT]);
});
export default tap.start();

View File

@@ -1,9 +1,12 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test to verify port forwarding works correctly
tap.test('forward connections should not be immediately closed', async (t) => {
const [PROXY_PORT, SERVER_PORT] = await findFreePorts(2);
// Create a backend server that accepts connections
const testServer = net.createServer((socket) => {
console.log('Client connected to test server');
@@ -21,8 +24,8 @@ tap.test('forward connections should not be immediately closed', async (t) => {
// Listen on a non-privileged port
await new Promise<void>((resolve) => {
testServer.listen(9090, '127.0.0.1', () => {
console.log('Test server listening on port 9090');
testServer.listen(SERVER_PORT, '127.0.0.1', () => {
console.log(`Test server listening on port ${SERVER_PORT}`);
resolve();
});
});
@@ -34,13 +37,13 @@ tap.test('forward connections should not be immediately closed', async (t) => {
{
name: 'forward-test',
match: {
ports: 8080,
ports: PROXY_PORT,
},
action: {
type: 'forward',
targets: [{
host: '127.0.0.1',
port: 9090,
port: SERVER_PORT,
}],
},
},
@@ -51,7 +54,7 @@ tap.test('forward connections should not be immediately closed', async (t) => {
// Create a client connection through the proxy
const client = net.createConnection({
port: 8080,
port: PROXY_PORT,
host: '127.0.0.1',
});
@@ -105,6 +108,7 @@ tap.test('forward connections should not be immediately closed', async (t) => {
client.end();
await smartProxy.stop();
testServer.close();
await assertPortsFree([PROXY_PORT, SERVER_PORT]);
});
export default tap.start();

View File

@@ -1,10 +1,13 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/index.js';
import * as http from 'http';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
const [PROXY_PORT, TARGET_PORT] = await findFreePorts(2);
// Create a mock HTTP server to act as our target
const targetPort = 8181;
const targetPort = TARGET_PORT;
let receivedRequest = false;
let receivedPath = '';
@@ -36,7 +39,7 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
routes: [{
name: 'test-route',
match: {
ports: 8080
ports: PROXY_PORT
// Remove domain restriction for HTTP connections
// Domain matching happens after HTTP headers are received
},
@@ -46,16 +49,16 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
}
}]
});
await proxy.start();
// Give the proxy a moment to fully initialize
await new Promise(resolve => setTimeout(resolve, 500));
// Make an HTTP request to port 8080
const options = {
hostname: 'localhost',
port: 8080,
port: PROXY_PORT,
path: '/.well-known/acme-challenge/test-token',
method: 'GET',
headers: {
@@ -97,14 +100,17 @@ tap.test('should forward HTTP connections on port 8080', async (tapTest) => {
await new Promise<void>((resolve) => {
targetServer.close(() => resolve());
});
// Wait a bit to ensure port is fully released
await new Promise(resolve => setTimeout(resolve, 500));
await assertPortsFree([PROXY_PORT, TARGET_PORT]);
});
tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
const [PROXY_PORT, TARGET_PORT] = await findFreePorts(2);
// Create a simple target server
const targetPort = 8182;
const targetPort = TARGET_PORT;
let receivedRequest = false;
const targetServer = http.createServer((req, res) => {
@@ -126,7 +132,7 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
routes: [{
name: 'simple-forward',
match: {
ports: 8081
ports: PROXY_PORT
// Remove domain restriction for HTTP connections
},
action: {
@@ -142,7 +148,7 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
// Make request
const options = {
hostname: 'localhost',
port: 8081,
port: PROXY_PORT,
path: '/test',
method: 'GET',
headers: {
@@ -184,9 +190,10 @@ tap.test('should handle basic HTTP request forwarding', async (tapTest) => {
await new Promise<void>((resolve) => {
targetServer.close(() => resolve());
});
// Wait a bit to ensure port is fully released
await new Promise(resolve => setTimeout(resolve, 500));
await assertPortsFree([PROXY_PORT, TARGET_PORT]);
});
export default tap.start();

View File

@@ -2,15 +2,17 @@ import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import * as tls from 'tls';
import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let testProxy: SmartProxy;
let targetServer: net.Server;
const ECHO_PORT = 47200;
const PROXY_PORT = 47201;
let ECHO_PORT: number;
let PROXY_PORT: number;
// Create a simple echo server as target
tap.test('setup test environment', async () => {
[ECHO_PORT, PROXY_PORT] = await findFreePorts(2);
// Create target server that echoes data back
targetServer = net.createServer((socket) => {
console.log('Target server: client connected');
@@ -148,6 +150,8 @@ tap.test('cleanup', async () => {
resolve();
});
});
await assertPortsFree([ECHO_PORT, PROXY_PORT]);
});
export default tap.start();

View File

@@ -2,14 +2,16 @@ import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as plugins from '../ts/plugins.js';
import { SmartProxy } from '../ts/index.js';
import * as net from 'net';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let smartProxyInstance: SmartProxy;
let echoServer: net.Server;
const echoServerPort = 47300;
const proxyPort = 47301;
let echoServerPort: number;
let proxyPort: number;
// Create an echo server for testing
tap.test('should create echo server for testing', async () => {
[echoServerPort, proxyPort] = await findFreePorts(2);
echoServer = net.createServer((socket) => {
socket.on('data', (data) => {
socket.write(data); // Echo back the data
@@ -267,6 +269,8 @@ tap.test('should clean up resources', async () => {
resolve();
});
});
await assertPortsFree([echoServerPort, proxyPort]);
});
export default tap.start();

View File

@@ -0,0 +1,477 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/index.js';
import * as http from 'http';
import * as https from 'https';
import * as http2 from 'http2';
import * as net from 'net';
import * as tls from 'tls';
import * as fs from 'fs';
import * as path from 'path';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// ---------------------------------------------------------------------------
// Port assignments (dynamically allocated to avoid conflicts)
// ---------------------------------------------------------------------------
let HTTP_ECHO_PORT: number;
let PROXY_HTTP_PORT: number;
let PROXY_HTTPS_PORT: number;
let TCP_ECHO_PORT: number;
let PROXY_TCP_PORT: number;
// ---------------------------------------------------------------------------
// Shared state
// ---------------------------------------------------------------------------
let httpEchoServer: http.Server;
let tcpEchoServer: net.Server;
let proxy: SmartProxy;
const certPem = fs.readFileSync(path.join(import.meta.dirname, '..', 'assets', 'certs', 'cert.pem'), 'utf8');
const keyPem = fs.readFileSync(path.join(import.meta.dirname, '..', 'assets', 'certs', 'key.pem'), 'utf8');
// ---------------------------------------------------------------------------
// Helper: make an HTTP request and return { status, body }
// ---------------------------------------------------------------------------
function httpRequest(
options: http.RequestOptions,
body?: string,
): Promise<{ status: number; body: string }> {
return new Promise((resolve, reject) => {
const req = http.request(options, (res) => {
let data = '';
res.on('data', (chunk: string) => (data += chunk));
res.on('end', () => resolve({ status: res.statusCode!, body: data }));
});
req.on('error', reject);
req.setTimeout(5000, () => {
req.destroy(new Error('timeout'));
});
if (body) req.end(body);
else req.end();
});
}
// Same but for HTTPS
function httpsRequest(
options: https.RequestOptions,
body?: string,
): Promise<{ status: number; body: string }> {
return new Promise((resolve, reject) => {
const req = https.request(options, (res) => {
let data = '';
res.on('data', (chunk: string) => (data += chunk));
res.on('end', () => resolve({ status: res.statusCode!, body: data }));
});
req.on('error', reject);
req.setTimeout(5000, () => {
req.destroy(new Error('timeout'));
});
if (body) req.end(body);
else req.end();
});
}
// Helper: wait for metrics to settle on a condition
async function waitForMetrics(
metrics: ReturnType<SmartProxy['getMetrics']>,
condition: () => boolean,
maxWaitMs = 3000,
): Promise<void> {
const start = Date.now();
while (Date.now() - start < maxWaitMs) {
// Force a fresh poll
await (proxy as any).metricsAdapter.poll();
if (condition()) return;
await new Promise((r) => setTimeout(r, 100));
}
}
// ===========================================================================
// 1. Setup backend servers
// ===========================================================================
tap.test('setup - backend servers', async () => {
[HTTP_ECHO_PORT, PROXY_HTTP_PORT, PROXY_HTTPS_PORT, TCP_ECHO_PORT, PROXY_TCP_PORT] = await findFreePorts(5);
// HTTP echo server: POST → echo:<body>, GET → ok
httpEchoServer = http.createServer((req, res) => {
if (req.method === 'POST') {
let body = '';
req.on('data', (chunk: string) => (body += chunk));
req.on('end', () => {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end(`echo:${body}`);
});
} else {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end('ok');
}
});
await new Promise<void>((resolve, reject) => {
httpEchoServer.on('error', reject);
httpEchoServer.listen(HTTP_ECHO_PORT, () => {
console.log(`HTTP echo server on port ${HTTP_ECHO_PORT}`);
resolve();
});
});
// TCP echo server
tcpEchoServer = net.createServer((socket) => {
socket.on('data', (data) => socket.write(data));
});
await new Promise<void>((resolve, reject) => {
tcpEchoServer.on('error', reject);
tcpEchoServer.listen(TCP_ECHO_PORT, () => {
console.log(`TCP echo server on port ${TCP_ECHO_PORT}`);
resolve();
});
});
});
// ===========================================================================
// 2. Setup SmartProxy
// ===========================================================================
tap.test('setup - SmartProxy with 3 routes', async () => {
proxy = new SmartProxy({
routes: [
// Plain HTTP forward: 47601 → 47600
{
name: 'http-forward',
match: { ports: PROXY_HTTP_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: HTTP_ECHO_PORT }],
},
},
// TLS-terminate HTTPS: 47602 → 47600
{
name: 'https-terminate',
match: { ports: PROXY_HTTPS_PORT, domains: 'localhost' },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: HTTP_ECHO_PORT }],
tls: {
mode: 'terminate',
certificate: {
key: keyPem,
cert: certPem,
},
},
},
},
// Plain TCP forward: 47604 → 47603
{
name: 'tcp-forward',
match: { ports: PROXY_TCP_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: TCP_ECHO_PORT }],
},
},
],
metrics: {
enabled: true,
sampleIntervalMs: 100,
},
enableDetailedLogging: false,
});
await proxy.start();
// Give the proxy a moment to fully bind
await new Promise((r) => setTimeout(r, 500));
});
// ===========================================================================
// 3. HTTP/1.1 connection pooling: sequential requests reuse connections
// ===========================================================================
tap.test('HTTP/1.1 connection pooling: sequential requests reuse connections', async (tools) => {
tools.timeout(30000);
const metrics = proxy.getMetrics();
const REQUEST_COUNT = 20;
// Use a non-keepalive agent so each request closes the client→proxy socket
// (Rust's backend connection pool still reuses proxy→backend connections)
const agent = new http.Agent({ keepAlive: false });
for (let i = 0; i < REQUEST_COUNT; i++) {
const result = await httpRequest(
{
hostname: 'localhost',
port: PROXY_HTTP_PORT,
path: '/echo',
method: 'POST',
headers: { 'Content-Type': 'text/plain' },
agent,
},
`msg-${i}`,
);
expect(result.status).toEqual(200);
expect(result.body).toEqual(`echo:msg-${i}`);
}
agent.destroy();
// Wait for all connections to settle and metrics to update
await waitForMetrics(metrics, () => metrics.connections.active() === 0, 5000);
expect(metrics.connections.active()).toEqual(0);
// Bytes should have been transferred
await waitForMetrics(metrics, () => metrics.totals.bytesIn() > 0);
expect(metrics.totals.bytesIn()).toBeGreaterThan(0);
expect(metrics.totals.bytesOut()).toBeGreaterThan(0);
console.log(`HTTP pooling test: ${REQUEST_COUNT} requests completed. bytesIn=${metrics.totals.bytesIn()}, bytesOut=${metrics.totals.bytesOut()}`);
});
// ===========================================================================
// 4. HTTPS with TLS termination: multiple requests through TLS
// ===========================================================================
tap.test('HTTPS with TLS termination: multiple requests through TLS', async (tools) => {
tools.timeout(30000);
const REQUEST_COUNT = 10;
const agent = new https.Agent({ keepAlive: false, rejectUnauthorized: false });
for (let i = 0; i < REQUEST_COUNT; i++) {
const result = await httpsRequest(
{
hostname: 'localhost',
port: PROXY_HTTPS_PORT,
path: '/echo',
method: 'POST',
headers: { 'Content-Type': 'text/plain' },
rejectUnauthorized: false,
servername: 'localhost',
agent,
},
`tls-${i}`,
);
expect(result.status).toEqual(200);
expect(result.body).toEqual(`echo:tls-${i}`);
}
agent.destroy();
console.log(`HTTPS termination test: ${REQUEST_COUNT} requests completed successfully`);
});
// ===========================================================================
// 5. TLS ALPN negotiation verification
// ===========================================================================
tap.test('HTTP/2 end-to-end: ALPN h2 with multiplexed requests', async (tools) => {
tools.timeout(15000);
// Connect an HTTP/2 session over TLS
const session = http2.connect(`https://localhost:${PROXY_HTTPS_PORT}`, {
rejectUnauthorized: false,
});
await new Promise<void>((resolve, reject) => {
session.on('connect', () => resolve());
session.on('error', reject);
setTimeout(() => reject(new Error('h2 connect timeout')), 5000);
});
// Verify ALPN negotiated h2
const alpnProtocol = (session.socket as tls.TLSSocket).alpnProtocol;
console.log(`TLS ALPN negotiated protocol: ${alpnProtocol}`);
expect(alpnProtocol).toEqual('h2');
// Send 5 multiplexed POST requests on the same h2 session
const REQUEST_COUNT = 5;
const promises: Promise<{ status: number; body: string }>[] = [];
for (let i = 0; i < REQUEST_COUNT; i++) {
promises.push(
new Promise<{ status: number; body: string }>((resolve, reject) => {
const reqStream = session.request({
':method': 'POST',
':path': '/echo',
'content-type': 'text/plain',
});
let data = '';
let status = 0;
reqStream.on('response', (headers) => {
status = headers[':status'] as number;
});
reqStream.on('data', (chunk: Buffer) => {
data += chunk.toString();
});
reqStream.on('end', () => resolve({ status, body: data }));
reqStream.on('error', reject);
reqStream.end(`h2-msg-${i}`);
}),
);
}
const results = await Promise.all(promises);
for (let i = 0; i < REQUEST_COUNT; i++) {
expect(results[i].status).toEqual(200);
expect(results[i].body).toEqual(`echo:h2-msg-${i}`);
}
await new Promise<void>((resolve) => session.close(() => resolve()));
console.log(`HTTP/2 end-to-end: ${REQUEST_COUNT} multiplexed requests completed successfully`);
});
// ===========================================================================
// 6. Connection stability: no leaked connections after repeated open/close
// ===========================================================================
tap.test('connection stability: no leaked connections after repeated open/close', async (tools) => {
tools.timeout(60000);
const metrics = proxy.getMetrics();
const BATCH_SIZE = 50;
// Ensure we start clean
await waitForMetrics(metrics, () => metrics.connections.active() === 0);
// Record total connections before
await (proxy as any).metricsAdapter.poll();
const totalBefore = metrics.connections.total();
// --- Batch 1: 50 sequential TCP connections ---
for (let i = 0; i < BATCH_SIZE; i++) {
await new Promise<void>((resolve, reject) => {
const client = new net.Socket();
client.connect(PROXY_TCP_PORT, 'localhost', () => {
const msg = `batch1-${i}`;
client.write(msg);
client.once('data', (data) => {
expect(data.toString()).toEqual(msg);
client.end();
});
});
client.on('close', () => resolve());
client.on('error', reject);
client.setTimeout(5000, () => {
client.destroy(new Error('timeout'));
});
});
}
// Wait for all connections to drain
await waitForMetrics(metrics, () => metrics.connections.active() === 0, 5000);
expect(metrics.connections.active()).toEqual(0);
console.log(`Batch 1 done: active=${metrics.connections.active()}, total=${metrics.connections.total()}`);
// --- Batch 2: another 50 ---
for (let i = 0; i < BATCH_SIZE; i++) {
await new Promise<void>((resolve, reject) => {
const client = new net.Socket();
client.connect(PROXY_TCP_PORT, 'localhost', () => {
const msg = `batch2-${i}`;
client.write(msg);
client.once('data', (data) => {
expect(data.toString()).toEqual(msg);
client.end();
});
});
client.on('close', () => resolve());
client.on('error', reject);
client.setTimeout(5000, () => {
client.destroy(new Error('timeout'));
});
});
}
// Wait for all connections to drain again
await waitForMetrics(metrics, () => metrics.connections.active() === 0, 5000);
expect(metrics.connections.active()).toEqual(0);
// Total should reflect ~100 new connections
await (proxy as any).metricsAdapter.poll();
const totalAfter = metrics.connections.total();
const newConnections = totalAfter - totalBefore;
console.log(`Batch 2 done: active=${metrics.connections.active()}, total=${totalAfter}, new=${newConnections}`);
expect(newConnections).toBeGreaterThanOrEqual(BATCH_SIZE * 2);
});
// ===========================================================================
// 7. Concurrent connections: burst and drain
// ===========================================================================
tap.test('concurrent connections: burst and drain', async (tools) => {
tools.timeout(30000);
const metrics = proxy.getMetrics();
const CONCURRENT = 20;
// Ensure we start clean
await waitForMetrics(metrics, () => metrics.connections.active() === 0, 5000);
// Open 20 TCP connections simultaneously
const clients: net.Socket[] = [];
const connectPromises: Promise<void>[] = [];
for (let i = 0; i < CONCURRENT; i++) {
const client = new net.Socket();
clients.push(client);
connectPromises.push(
new Promise<void>((resolve, reject) => {
client.connect(PROXY_TCP_PORT, 'localhost', () => resolve());
client.on('error', reject);
client.setTimeout(5000, () => {
client.destroy(new Error('timeout'));
});
}),
);
}
await Promise.all(connectPromises);
// Send data on all connections and wait for echo
const echoPromises = clients.map((client, i) => {
return new Promise<void>((resolve, reject) => {
const msg = `concurrent-${i}`;
client.once('data', (data) => {
expect(data.toString()).toEqual(msg);
resolve();
});
client.write(msg);
client.on('error', reject);
});
});
await Promise.all(echoPromises);
// Poll metrics — active connections should be CONCURRENT
await waitForMetrics(metrics, () => metrics.connections.active() >= CONCURRENT, 3000);
const activeWhileOpen = metrics.connections.active();
console.log(`Burst: active connections while open = ${activeWhileOpen}`);
expect(activeWhileOpen).toBeGreaterThanOrEqual(CONCURRENT);
// Close all connections
for (const client of clients) {
client.end();
}
// Wait for drain
await waitForMetrics(metrics, () => metrics.connections.active() === 0, 5000);
expect(metrics.connections.active()).toEqual(0);
console.log('Drain: all connections closed, active=0');
});
// ===========================================================================
// 8. Cleanup
// ===========================================================================
tap.test('cleanup', async () => {
await proxy.stop();
await new Promise<void>((resolve) => {
httpEchoServer.close(() => {
console.log('HTTP echo server closed');
resolve();
});
});
await new Promise<void>((resolve) => {
tcpEchoServer.close(() => {
console.log('TCP echo server closed');
resolve();
});
});
await assertPortsFree([HTTP_ECHO_PORT, PROXY_HTTP_PORT, PROXY_HTTPS_PORT, TCP_ECHO_PORT, PROXY_TCP_PORT]);
});
export default tap.start();

View File

@@ -1,17 +1,19 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let echoServer: net.Server;
let proxy: SmartProxy;
const ECHO_PORT = 47400;
const PROXY_PORT_1 = 47401;
const PROXY_PORT_2 = 47402;
let ECHO_PORT: number;
let PROXY_PORT_1: number;
let PROXY_PORT_2: number;
tap.test('port forwarding should not immediately close connections', async (tools) => {
// Set a timeout for this test
tools.timeout(10000); // 10 seconds
[ECHO_PORT, PROXY_PORT_1, PROXY_PORT_2] = await findFreePorts(3);
// Create an echo server
echoServer = await new Promise<net.Server>((resolve, reject) => {
const server = net.createServer((socket) => {
@@ -96,6 +98,7 @@ tap.test('cleanup', async () => {
});
});
}
await assertPortsFree([ECHO_PORT, PROXY_PORT_1, PROXY_PORT_2]);
});
export default tap.start();

View File

@@ -9,13 +9,14 @@ import {
createPortOffset
} from '../ts/proxies/smart-proxy/utils/route-helpers.js';
import type { IRouteConfig, IRouteContext } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
// Test server and client utilities
let testServers: Array<{ server: net.Server; port: number }> = [];
let smartProxy: SmartProxy;
const TEST_PORT_START = 4000;
const PROXY_PORT_START = 5000;
let TEST_PORTS: number[]; // 3 test server ports
let PROXY_PORTS: number[]; // 6 proxy ports
const TEST_DATA = 'Hello through dynamic port mapper!';
// Cleanup function to close all servers and proxies
@@ -101,53 +102,60 @@ function createTestClient(port: number, data: string): Promise<string> {
// Set up test environment
tap.test('setup port mapping test environment', async () => {
const allPorts = await findFreePorts(9);
TEST_PORTS = allPorts.slice(0, 3);
PROXY_PORTS = allPorts.slice(3, 9);
// Create multiple test servers on different ports
await Promise.all([
createTestServer(TEST_PORT_START), // Server on port 4000
createTestServer(TEST_PORT_START + 1), // Server on port 4001
createTestServer(TEST_PORT_START + 2), // Server on port 4002
createTestServer(TEST_PORTS[0]),
createTestServer(TEST_PORTS[1]),
createTestServer(TEST_PORTS[2]),
]);
// Compute dynamic offset between proxy and test ports
const portOffset = TEST_PORTS[1] - PROXY_PORTS[1];
// Create a SmartProxy with dynamic port mapping routes
smartProxy = new SmartProxy({
routes: [
// Simple function that returns the same port (identity mapping)
createPortMappingRoute({
sourcePortRange: PROXY_PORT_START,
sourcePortRange: PROXY_PORTS[0],
targetHost: 'localhost',
portMapper: (context) => TEST_PORT_START,
portMapper: (context) => TEST_PORTS[0],
name: 'Identity Port Mapping'
}),
// Offset port mapping from 5001 to 4001 (offset -1000)
// Offset port mapping using dynamic offset
createOffsetPortMappingRoute({
ports: PROXY_PORT_START + 1,
ports: PROXY_PORTS[1],
targetHost: 'localhost',
offset: -1000,
name: 'Offset Port Mapping (-1000)'
offset: portOffset,
name: `Offset Port Mapping (${portOffset})`
}),
// Dynamic route with conditional port mapping
createDynamicRoute({
ports: [PROXY_PORT_START + 2, PROXY_PORT_START + 3],
ports: [PROXY_PORTS[2], PROXY_PORTS[3]],
targetHost: (context) => {
// Dynamic host selection based on port
return context.port === PROXY_PORT_START + 2 ? 'localhost' : '127.0.0.1';
return context.port === PROXY_PORTS[2] ? 'localhost' : '127.0.0.1';
},
portMapper: (context) => {
// Port mapping logic based on incoming port
if (context.port === PROXY_PORT_START + 2) {
return TEST_PORT_START;
if (context.port === PROXY_PORTS[2]) {
return TEST_PORTS[0];
} else {
return TEST_PORT_START + 2;
return TEST_PORTS[2];
}
},
name: 'Dynamic Host and Port Mapping'
}),
// Smart load balancer for domain-based routing
createSmartLoadBalancer({
ports: PROXY_PORT_START + 4,
ports: PROXY_PORTS[4],
domainTargets: {
'test1.example.com': 'localhost',
'test2.example.com': '127.0.0.1'
@@ -155,9 +163,9 @@ tap.test('setup port mapping test environment', async () => {
portMapper: (context) => {
// Use different backend ports based on domain
if (context.domain === 'test1.example.com') {
return TEST_PORT_START;
return TEST_PORTS[0];
} else {
return TEST_PORT_START + 1;
return TEST_PORTS[1];
}
},
defaultTarget: 'localhost',
@@ -165,44 +173,45 @@ tap.test('setup port mapping test environment', async () => {
})
]
});
// Start the SmartProxy
await smartProxy.start();
});
// Test 1: Simple identity port mapping (5000 -> 4000)
// Test 1: Simple identity port mapping
tap.test('should map port using identity function', async () => {
const response = await createTestClient(PROXY_PORT_START, TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START} says: ${TEST_DATA}`);
const response = await createTestClient(PROXY_PORTS[0], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORTS[0]} says: ${TEST_DATA}`);
});
// Test 2: Offset port mapping (5001 -> 4001)
// Test 2: Offset port mapping
tap.test('should map port using offset function', async () => {
const response = await createTestClient(PROXY_PORT_START + 1, TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START + 1} says: ${TEST_DATA}`);
const response = await createTestClient(PROXY_PORTS[1], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORTS[1]} says: ${TEST_DATA}`);
});
// Test 3: Dynamic port and host mapping (conditional logic)
tap.test('should map port using dynamic logic', async () => {
const response = await createTestClient(PROXY_PORT_START + 2, TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORT_START} says: ${TEST_DATA}`);
const response = await createTestClient(PROXY_PORTS[2], TEST_DATA);
expect(response).toEqual(`Server ${TEST_PORTS[0]} says: ${TEST_DATA}`);
});
// Test 4: Test reuse of createPortOffset helper
tap.test('should use createPortOffset helper for port mapping', async () => {
// Test the createPortOffset helper
const offsetFn = createPortOffset(-1000);
// Test the createPortOffset helper with dynamic offset
const portOffset = TEST_PORTS[1] - PROXY_PORTS[1];
const offsetFn = createPortOffset(portOffset);
const context = {
port: PROXY_PORT_START + 1,
port: PROXY_PORTS[1],
clientIp: '127.0.0.1',
serverIp: '127.0.0.1',
isTls: false,
timestamp: Date.now(),
connectionId: 'test-connection'
} as IRouteContext;
const mappedPort = offsetFn(context);
expect(mappedPort).toEqual(TEST_PORT_START + 1);
expect(mappedPort).toEqual(TEST_PORTS[1]);
});
// Test 5: Test error handling for invalid port mapping functions
@@ -210,7 +219,7 @@ tap.test('should handle errors in port mapping functions', async () => {
// Create a route with a function that throws an error
const errorRoute: IRouteConfig = {
match: {
ports: PROXY_PORT_START + 5
ports: PROXY_PORTS[5]
},
action: {
type: 'forward',
@@ -229,7 +238,7 @@ tap.test('should handle errors in port mapping functions', async () => {
// The connection should fail or timeout
try {
await createTestClient(PROXY_PORT_START + 5, TEST_DATA);
await createTestClient(PROXY_PORTS[5], TEST_DATA);
// Connection should not succeed
expect(false).toBeTrue();
} catch (error) {
@@ -254,6 +263,8 @@ tap.test('cleanup port mapping test environment', async () => {
testServers = [];
smartProxy = null as any;
}
await assertPortsFree([...TEST_PORTS, ...PROXY_PORTS]);
});
export default tap.start();

View File

@@ -1,133 +0,0 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as smartproxy from '../ts/index.js';
import { ProxyProtocolParser } from '../ts/core/utils/proxy-protocol.js';
tap.test('PROXY protocol v1 parser - valid headers', async () => {
// Test TCP4 format
const tcp4Header = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii');
const tcp4Result = ProxyProtocolParser.parse(tcp4Header);
expect(tcp4Result.proxyInfo).property('protocol').toEqual('TCP4');
expect(tcp4Result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
expect(tcp4Result.proxyInfo).property('sourcePort').toEqual(56324);
expect(tcp4Result.proxyInfo).property('destinationIP').toEqual('10.0.0.1');
expect(tcp4Result.proxyInfo).property('destinationPort').toEqual(443);
expect(tcp4Result.remainingData.length).toEqual(0);
// Test TCP6 format
const tcp6Header = Buffer.from('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n', 'ascii');
const tcp6Result = ProxyProtocolParser.parse(tcp6Header);
expect(tcp6Result.proxyInfo).property('protocol').toEqual('TCP6');
expect(tcp6Result.proxyInfo).property('sourceIP').toEqual('2001:db8::1');
expect(tcp6Result.proxyInfo).property('sourcePort').toEqual(56324);
expect(tcp6Result.proxyInfo).property('destinationIP').toEqual('2001:db8::2');
expect(tcp6Result.proxyInfo).property('destinationPort').toEqual(443);
// Test UNKNOWN protocol
const unknownHeader = Buffer.from('PROXY UNKNOWN\r\n', 'ascii');
const unknownResult = ProxyProtocolParser.parse(unknownHeader);
expect(unknownResult.proxyInfo).property('protocol').toEqual('UNKNOWN');
expect(unknownResult.proxyInfo).property('sourceIP').toEqual('');
expect(unknownResult.proxyInfo).property('sourcePort').toEqual(0);
});
tap.test('PROXY protocol v1 parser - with remaining data', async () => {
const headerWithData = Buffer.concat([
Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii'),
Buffer.from('GET / HTTP/1.1\r\n', 'ascii')
]);
const result = ProxyProtocolParser.parse(headerWithData);
expect(result.proxyInfo).property('protocol').toEqual('TCP4');
expect(result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
expect(result.remainingData.toString()).toEqual('GET / HTTP/1.1\r\n');
});
tap.test('PROXY protocol v1 parser - invalid headers', async () => {
// Not a PROXY protocol header
const notProxy = Buffer.from('GET / HTTP/1.1\r\n', 'ascii');
const notProxyResult = ProxyProtocolParser.parse(notProxy);
expect(notProxyResult.proxyInfo).toBeNull();
expect(notProxyResult.remainingData).toEqual(notProxy);
// Invalid protocol
expect(() => {
ProxyProtocolParser.parse(Buffer.from('PROXY INVALID 1.1.1.1 2.2.2.2 80 443\r\n', 'ascii'));
}).toThrow();
// Wrong number of fields
expect(() => {
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324\r\n', 'ascii'));
}).toThrow();
// Invalid port
expect(() => {
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 99999 443\r\n', 'ascii'));
}).toThrow();
// Invalid IP for protocol
expect(() => {
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 2001:db8::1 10.0.0.1 56324 443\r\n', 'ascii'));
}).toThrow();
});
tap.test('PROXY protocol v1 parser - incomplete headers', async () => {
// Header without terminator
const incomplete = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443', 'ascii');
const result = ProxyProtocolParser.parse(incomplete);
expect(result.proxyInfo).toBeNull();
expect(result.remainingData).toEqual(incomplete);
// Header exceeding max length - create a buffer that actually starts with PROXY
const longHeader = Buffer.from('PROXY TCP4 ' + '1'.repeat(100), 'ascii');
expect(() => {
ProxyProtocolParser.parse(longHeader);
}).toThrow();
});
tap.test('PROXY protocol v1 generator', async () => {
// Generate TCP4 header
const tcp4Info = {
protocol: 'TCP4' as const,
sourceIP: '192.168.1.1',
sourcePort: 56324,
destinationIP: '10.0.0.1',
destinationPort: 443
};
const tcp4Header = ProxyProtocolParser.generate(tcp4Info);
expect(tcp4Header.toString('ascii')).toEqual('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n');
// Generate TCP6 header
const tcp6Info = {
protocol: 'TCP6' as const,
sourceIP: '2001:db8::1',
sourcePort: 56324,
destinationIP: '2001:db8::2',
destinationPort: 443
};
const tcp6Header = ProxyProtocolParser.generate(tcp6Info);
expect(tcp6Header.toString('ascii')).toEqual('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n');
// Generate UNKNOWN header
const unknownInfo = {
protocol: 'UNKNOWN' as const,
sourceIP: '',
sourcePort: 0,
destinationIP: '',
destinationPort: 0
};
const unknownHeader = ProxyProtocolParser.generate(unknownInfo);
expect(unknownHeader.toString('ascii')).toEqual('PROXY UNKNOWN\r\n');
});
// Skipping integration tests for now - focus on unit tests
// Integration tests would require more complex setup and teardown
export default tap.start();

View File

@@ -562,4 +562,168 @@ tap.test('Route Integration - Combining Multiple Route Types', async () => {
}
});
// --------------------------------- Protocol Match Field Tests ---------------------------------
tap.test('Routes: Should accept protocol field on route match', async () => {
// Create a route with protocol: 'http'
const httpOnlyRoute: IRouteConfig = {
match: {
ports: 443,
domains: 'api.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 8080 }],
tls: {
mode: 'terminate',
certificate: 'auto',
},
},
name: 'HTTP-only Route',
};
// Validate the route - protocol field should not cause errors
const validation = validateRouteConfig(httpOnlyRoute);
expect(validation.valid).toBeTrue();
// Verify the protocol field is preserved
expect(httpOnlyRoute.match.protocol).toEqual('http');
});
tap.test('Routes: Should accept protocol tcp on route match', async () => {
// Create a route with protocol: 'tcp'
const tcpOnlyRoute: IRouteConfig = {
match: {
ports: 443,
domains: 'db.example.com',
protocol: 'tcp',
},
action: {
type: 'forward',
targets: [{ host: 'db-server', port: 5432 }],
tls: {
mode: 'passthrough',
},
},
name: 'TCP-only Route',
};
const validation = validateRouteConfig(tcpOnlyRoute);
expect(validation.valid).toBeTrue();
expect(tcpOnlyRoute.match.protocol).toEqual('tcp');
});
tap.test('Routes: Protocol field should work with terminate-and-reencrypt', async () => {
// Create a terminate-and-reencrypt route that only accepts HTTP
const reencryptRoute = createHttpsTerminateRoute(
'secure.example.com',
{ host: 'backend', port: 443 },
{ reencrypt: true, certificate: 'auto', name: 'Reencrypt HTTP Route' }
);
// Set protocol restriction to http
reencryptRoute.match.protocol = 'http';
// Validate the route
const validation = validateRouteConfig(reencryptRoute);
expect(validation.valid).toBeTrue();
// Verify TLS mode
expect(reencryptRoute.action.tls?.mode).toEqual('terminate-and-reencrypt');
// Verify protocol field is preserved
expect(reencryptRoute.match.protocol).toEqual('http');
});
tap.test('Routes: Protocol field should not affect domain/port matching', async () => {
// Routes with and without protocol field should both match the same domain/port
const routeWithProtocol: IRouteConfig = {
match: {
ports: 443,
domains: 'example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 8080 }],
tls: { mode: 'terminate', certificate: 'auto' },
},
name: 'With Protocol',
priority: 10,
};
const routeWithoutProtocol: IRouteConfig = {
match: {
ports: 443,
domains: 'example.com',
},
action: {
type: 'forward',
targets: [{ host: 'fallback', port: 8081 }],
tls: { mode: 'terminate', certificate: 'auto' },
},
name: 'Without Protocol',
priority: 5,
};
const routes = [routeWithProtocol, routeWithoutProtocol];
// Both routes should match the domain/port (protocol is a hint for Rust-side matching)
const matches = findMatchingRoutes(routes, { domain: 'example.com', port: 443 });
expect(matches.length).toEqual(2);
// The one with higher priority should be first
const best = findBestMatchingRoute(routes, { domain: 'example.com', port: 443 });
expect(best).not.toBeUndefined();
expect(best!.name).toEqual('With Protocol');
});
tap.test('Routes: Protocol field preserved through route cloning', async () => {
const original: IRouteConfig = {
match: {
ports: 8443,
domains: 'clone-test.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 3000 }],
tls: { mode: 'terminate-and-reencrypt', certificate: 'auto' },
},
name: 'Clone Test',
};
const cloned = cloneRoute(original);
// Verify protocol is preserved in clone
expect(cloned.match.protocol).toEqual('http');
expect(cloned.action.tls?.mode).toEqual('terminate-and-reencrypt');
// Modify clone should not affect original
cloned.match.protocol = 'tcp';
expect(original.match.protocol).toEqual('http');
});
tap.test('Routes: Protocol field preserved through route merging', async () => {
const base: IRouteConfig = {
match: {
ports: 443,
domains: 'merge-test.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 3000 }],
tls: { mode: 'terminate-and-reencrypt', certificate: 'auto' },
},
name: 'Merge Base',
};
// Merge with override that changes name but not protocol
const merged = mergeRouteConfigs(base, { name: 'Merged Route' });
expect(merged.match.protocol).toEqual('http');
expect(merged.name).toEqual('Merged Route');
});
export default tap.start();

View File

@@ -174,7 +174,7 @@ tap.test('Route Validation - validateRouteAction', async () => {
const invalidSocketResult = validateRouteAction(invalidSocketAction);
expect(invalidSocketResult.valid).toBeFalse();
expect(invalidSocketResult.errors.length).toBeGreaterThan(0);
expect(invalidSocketResult.errors[0]).toInclude('Socket handler function is required');
expect(invalidSocketResult.errors[0]).toInclude('handler function is required');
});
tap.test('Route Validation - validateRouteConfig', async () => {

View File

@@ -1,11 +1,19 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import { SmartProxy } from '../ts/proxies/smart-proxy/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let testServer: net.Server;
let smartProxy: SmartProxy;
const TEST_SERVER_PORT = 4000;
const PROXY_PORT = 4001;
let TEST_SERVER_PORT: number;
let PROXY_PORT: number;
let CUSTOM_HOST_PORT: number;
let CUSTOM_IP_PROXY_PORT: number;
let CUSTOM_IP_TARGET_PORT: number;
let CHAIN_DEFAULT_1_PORT: number;
let CHAIN_DEFAULT_2_PORT: number;
let CHAIN_PRESERVED_1_PORT: number;
let CHAIN_PRESERVED_2_PORT: number;
const TEST_DATA = 'Hello through port proxy!';
// Track all created servers and proxies for proper cleanup
@@ -64,6 +72,7 @@ function createTestClient(port: number, data: string): Promise<string> {
// SETUP: Create a test server and a PortProxy instance.
tap.test('setup port proxy test environment', async () => {
[TEST_SERVER_PORT, PROXY_PORT, CUSTOM_HOST_PORT, CUSTOM_IP_PROXY_PORT, CUSTOM_IP_TARGET_PORT, CHAIN_DEFAULT_1_PORT, CHAIN_DEFAULT_2_PORT, CHAIN_PRESERVED_1_PORT, CHAIN_PRESERVED_2_PORT] = await findFreePorts(9);
testServer = await createTestServer(TEST_SERVER_PORT);
smartProxy = new SmartProxy({
routes: [
@@ -110,7 +119,7 @@ tap.test('should forward TCP connections to custom host', async () => {
{
name: 'custom-host-route',
match: {
ports: PROXY_PORT + 1
ports: CUSTOM_HOST_PORT
},
action: {
type: 'forward',
@@ -128,9 +137,9 @@ tap.test('should forward TCP connections to custom host', async () => {
}
});
allProxies.push(customHostProxy); // Track this proxy
await customHostProxy.start();
const response = await createTestClient(PROXY_PORT + 1, TEST_DATA);
const response = await createTestClient(CUSTOM_HOST_PORT, TEST_DATA);
expect(response).toEqual(`Echo: ${TEST_DATA}`);
await customHostProxy.stop();
@@ -143,8 +152,8 @@ tap.test('should forward TCP connections to custom host', async () => {
// Modified to work in Docker/CI environments without needing 127.0.0.2
tap.test('should forward connections to custom IP', async () => {
// Set up ports that are FAR apart to avoid any possible confusion
const forcedProxyPort = PROXY_PORT + 2; // 4003 - The port that our proxy listens on
const targetServerPort = TEST_SERVER_PORT + 200; // 4200 - Target test server on different port
const forcedProxyPort = CUSTOM_IP_PROXY_PORT;
const targetServerPort = CUSTOM_IP_TARGET_PORT;
// Create a test server listening on a unique port on 127.0.0.1 (works in all environments)
const testServer2 = await createTestServer(targetServerPort, '127.0.0.1');
@@ -252,13 +261,13 @@ tap.test('should support optional source IP preservation in chained proxies', as
{
name: 'first-proxy-default-route',
match: {
ports: PROXY_PORT + 4
ports: CHAIN_DEFAULT_1_PORT
},
action: {
type: 'forward',
targets: [{
host: 'localhost',
port: PROXY_PORT + 5
port: CHAIN_DEFAULT_2_PORT
}]
}
}
@@ -274,7 +283,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
{
name: 'second-proxy-default-route',
match: {
ports: PROXY_PORT + 5
ports: CHAIN_DEFAULT_2_PORT
},
action: {
type: 'forward',
@@ -296,7 +305,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
await secondProxyDefault.start();
await firstProxyDefault.start();
const response1 = await createTestClient(PROXY_PORT + 4, TEST_DATA);
const response1 = await createTestClient(CHAIN_DEFAULT_1_PORT, TEST_DATA);
expect(response1).toEqual(`Echo: ${TEST_DATA}`);
await firstProxyDefault.stop();
await secondProxyDefault.stop();
@@ -313,13 +322,13 @@ tap.test('should support optional source IP preservation in chained proxies', as
{
name: 'first-proxy-preserved-route',
match: {
ports: PROXY_PORT + 6
ports: CHAIN_PRESERVED_1_PORT
},
action: {
type: 'forward',
targets: [{
host: 'localhost',
port: PROXY_PORT + 7
port: CHAIN_PRESERVED_2_PORT
}]
}
}
@@ -337,7 +346,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
{
name: 'second-proxy-preserved-route',
match: {
ports: PROXY_PORT + 7
ports: CHAIN_PRESERVED_2_PORT
},
action: {
type: 'forward',
@@ -361,7 +370,7 @@ tap.test('should support optional source IP preservation in chained proxies', as
await secondProxyPreserved.start();
await firstProxyPreserved.start();
const response2 = await createTestClient(PROXY_PORT + 6, TEST_DATA);
const response2 = await createTestClient(CHAIN_PRESERVED_1_PORT, TEST_DATA);
expect(response2).toEqual(`Echo: ${TEST_DATA}`);
await firstProxyPreserved.stop();
await secondProxyPreserved.stop();
@@ -446,6 +455,8 @@ tap.test('cleanup port proxy test environment', async () => {
// Verify all resources are cleaned up
expect(allProxies.length).toEqual(0);
expect(allServers.length).toEqual(0);
await assertPortsFree([TEST_SERVER_PORT, PROXY_PORT, CUSTOM_HOST_PORT, CUSTOM_IP_PROXY_PORT, CUSTOM_IP_TARGET_PORT, CHAIN_DEFAULT_1_PORT, CHAIN_DEFAULT_2_PORT, CHAIN_PRESERVED_1_PORT, CHAIN_PRESERVED_2_PORT]);
});
export default tap.start();

View File

@@ -7,10 +7,15 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/proxies/smart-proxy/index.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
import { findFreePorts } from './helpers/port-allocator.js';
// Use unique high ports for each test to avoid conflicts
let testPort = 20000;
const getNextPort = () => testPort++;
let testPorts: number[];
let portIndex = 0;
const getNextPort = () => testPorts[portIndex++];
tap.test('setup - allocate ports', async () => {
testPorts = await findFreePorts(16);
});
// --------------------------------- Single Route, No Domain Restriction ---------------------------------

View File

@@ -1,12 +1,15 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
tap.test('should handle async handler that sets up listeners after delay', async () => {
const [PORT] = await findFreePorts(1);
const proxy = new SmartProxy({
routes: [{
name: 'delayed-setup-handler',
match: { ports: 7777 },
match: { ports: PORT },
action: {
type: 'socket-handler',
socketHandler: async (socket, context) => {
@@ -41,7 +44,7 @@ tap.test('should handle async handler that sets up listeners after delay', async
});
await new Promise<void>((resolve, reject) => {
client.connect(7777, 'localhost', () => {
client.connect(PORT, 'localhost', () => {
// Send initial data immediately - this tests the race condition
client.write('initial-message\n');
resolve();
@@ -78,6 +81,7 @@ tap.test('should handle async handler that sets up listeners after delay', async
expect(response).toContain('RECEIVED: test-message');
await proxy.stop();
await assertPortsFree([PORT]);
});
export default tap.start();

View File

@@ -2,15 +2,19 @@ import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as net from 'net';
import { SmartProxy } from '../ts/index.js';
import type { IRouteConfig } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let proxy: SmartProxy;
let PORT: number;
tap.test('setup socket handler test', async () => {
[PORT] = await findFreePorts(1);
// Create a simple socket handler route
const routes: IRouteConfig[] = [{
name: 'echo-handler',
match: {
ports: 9999
match: {
ports: PORT
// No domains restriction - matches all connections
},
action: {
@@ -43,11 +47,11 @@ tap.test('should handle socket with custom function', async () => {
let response = '';
await new Promise<void>((resolve, reject) => {
client.connect(9999, 'localhost', () => {
client.connect(PORT, 'localhost', () => {
console.log('Client connected to proxy');
resolve();
});
client.on('error', reject);
});
@@ -78,7 +82,7 @@ tap.test('should handle async socket handler', async () => {
// Update route with async handler
await proxy.updateRoutes([{
name: 'async-handler',
match: { ports: 9999 },
match: { ports: PORT },
action: {
type: 'socket-handler',
socketHandler: async (socket, context) => {
@@ -108,12 +112,12 @@ tap.test('should handle async socket handler', async () => {
});
await new Promise<void>((resolve, reject) => {
client.connect(9999, 'localhost', () => {
client.connect(PORT, 'localhost', () => {
// Send initial data to trigger the handler
client.write('test data\n');
resolve();
});
client.on('error', reject);
});
@@ -131,7 +135,7 @@ tap.test('should handle errors in socket handler', async () => {
// Update route with error-throwing handler
await proxy.updateRoutes([{
name: 'error-handler',
match: { ports: 9999 },
match: { ports: PORT },
action: {
type: 'socket-handler',
socketHandler: (socket, context) => {
@@ -148,12 +152,12 @@ tap.test('should handle errors in socket handler', async () => {
});
await new Promise<void>((resolve, reject) => {
client.connect(9999, 'localhost', () => {
client.connect(PORT, 'localhost', () => {
// Connection established - send data to trigger handler
client.write('trigger\n');
resolve();
});
client.on('error', () => {
// Ignore client errors - we expect the connection to be closed
});
@@ -168,6 +172,7 @@ tap.test('should handle errors in socket handler', async () => {
tap.test('cleanup', async () => {
await proxy.stop();
await assertPortsFree([PORT]);
});
export default tap.start();

View File

@@ -8,24 +8,25 @@ import * as https from 'https';
import * as fs from 'fs';
import * as path from 'path';
import { fileURLToPath } from 'url';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// ────────────────────────────────────────────────────────────────────────────
// Port assignments (unique to avoid conflicts with other tests)
// Port assignments (dynamically allocated to avoid conflicts)
// ────────────────────────────────────────────────────────────────────────────
const TCP_ECHO_PORT = 47500;
const HTTP_ECHO_PORT = 47501;
const TLS_ECHO_PORT = 47502;
const PROXY_TCP_PORT = 47510;
const PROXY_HTTP_PORT = 47511;
const PROXY_TLS_PASS_PORT = 47512;
const PROXY_TLS_TERM_PORT = 47513;
const PROXY_SOCKET_PORT = 47514;
const PROXY_MULTI_A_PORT = 47515;
const PROXY_MULTI_B_PORT = 47516;
const PROXY_TP_HTTP_PORT = 47517;
let TCP_ECHO_PORT: number;
let HTTP_ECHO_PORT: number;
let TLS_ECHO_PORT: number;
let PROXY_TCP_PORT: number;
let PROXY_HTTP_PORT: number;
let PROXY_TLS_PASS_PORT: number;
let PROXY_TLS_TERM_PORT: number;
let PROXY_SOCKET_PORT: number;
let PROXY_MULTI_A_PORT: number;
let PROXY_MULTI_B_PORT: number;
let PROXY_TP_HTTP_PORT: number;
// ────────────────────────────────────────────────────────────────────────────
// Test certificates
@@ -49,6 +50,8 @@ async function pollMetrics(proxy: SmartProxy): Promise<void> {
// Setup: backend servers
// ════════════════════════════════════════════════════════════════════════════
tap.test('setup - TCP echo server', async () => {
[TCP_ECHO_PORT, HTTP_ECHO_PORT, TLS_ECHO_PORT, PROXY_TCP_PORT, PROXY_HTTP_PORT, PROXY_TLS_PASS_PORT, PROXY_TLS_TERM_PORT, PROXY_SOCKET_PORT, PROXY_MULTI_A_PORT, PROXY_MULTI_B_PORT, PROXY_TP_HTTP_PORT] = await findFreePorts(11);
tcpEchoServer = net.createServer((socket) => {
socket.on('data', (data) => socket.write(data));
socket.on('error', () => {});
@@ -151,11 +154,28 @@ tap.test('TCP forward - real-time byte tracking', async (tools) => {
console.log(`TCP forward (during) — recent throughput: in=${tpDuring.in}, out=${tpDuring.out}`);
expect(tpDuring.in + tpDuring.out).toBeGreaterThan(0);
// ── v25.2.0: Per-IP tracking (TCP connections) ──
// Must check WHILE connection is active — per-IP data is evicted on last close
const byIP = mDuring.connections.byIP();
console.log('TCP forward — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = mDuring.connections.topIPs(10);
console.log('TCP forward — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
// ── v25.2.0: Throughput history ──
const history = mDuring.throughput.history(10);
console.log('TCP forward — throughput history length:', history.length);
expect(history.length).toBeGreaterThan(0);
expect(history[0].timestamp).toBeGreaterThan(0);
// Close connection
client.destroy();
await tools.delayFor(500);
// Final check
// Final check — totals persist even after connection close
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
@@ -168,6 +188,11 @@ tap.test('TCP forward - real-time byte tracking', async (tools) => {
const byRoute = m.throughput.byRoute();
console.log('TCP forward — throughput byRoute:', Array.from(byRoute.entries()));
// After close, per-IP data should be evicted (memory leak fix)
const byIPAfter = m.connections.byIP();
console.log('TCP forward — connections byIP after close:', Array.from(byIPAfter.entries()));
expect(byIPAfter.size).toEqual(0);
await proxy.stop();
await tools.delayFor(200);
});
@@ -233,6 +258,22 @@ tap.test('HTTP forward - byte totals tracking', async (tools) => {
expect(bytesIn).toBeGreaterThan(0);
expect(bytesOut).toBeGreaterThan(0);
// ── v25.2.0: Per-IP tracking (HTTP connections) ──
const byIP = m.connections.byIP();
console.log('HTTP forward — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = m.connections.topIPs(10);
console.log('HTTP forward — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
// ── v25.2.0: HTTP request counting ──
const totalReqs = m.requests.total();
const rps = m.requests.perSecond();
console.log(`HTTP forward — requests total: ${totalReqs}, perSecond: ${rps}`);
expect(totalReqs).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
@@ -607,6 +648,37 @@ tap.test('Throughput sampling - values appear during active HTTP traffic', async
console.log(`Sampling test — recent throughput: in=${tpRecent.in}, out=${tpRecent.out}`);
expect(tpRecent.in + tpRecent.out).toBeGreaterThan(0);
// ── v25.2.0: Per-IP tracking ──
const byIP = m.connections.byIP();
console.log('Sampling test — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = m.connections.topIPs(10);
console.log('Sampling test — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
expect(topIPs[0].count).toBeGreaterThanOrEqual(0);
// ── v25.2.0: Throughput history ──
const history = m.throughput.history(10);
console.log(`Sampling test — throughput history: ${history.length} points`);
if (history.length > 0) {
console.log(' first:', history[0], 'last:', history[history.length - 1]);
}
expect(history.length).toBeGreaterThan(0);
expect(history[0].timestamp).toBeGreaterThan(0);
// ── v25.2.0: Per-IP throughput ──
const tpByIP = m.throughput.byIP();
console.log('Sampling test — throughput byIP:', Array.from(tpByIP.entries()));
// ── v25.2.0: HTTP request counting ──
const totalReqs = m.requests.total();
const rps = m.requests.perSecond();
const rpm = m.requests.perMinute();
console.log(`Sampling test — HTTP requests: total=${totalReqs}, perSecond=${rps}, perMinute=${rpm}`);
expect(totalReqs).toBeGreaterThan(0);
// Stop sending
sending = false;
await sendLoop;
@@ -631,6 +703,7 @@ tap.test('cleanup - close backend servers', async () => {
await new Promise<void>((resolve) => httpEchoServer.close(() => resolve()));
await new Promise<void>((resolve) => tlsEchoServer.close(() => resolve()));
console.log('All backend servers closed');
await assertPortsFree([TCP_ECHO_PORT, HTTP_ECHO_PORT, TLS_ECHO_PORT, PROXY_TCP_PORT, PROXY_HTTP_PORT, PROXY_TLS_PASS_PORT, PROXY_TLS_TERM_PORT, PROXY_SOCKET_PORT, PROXY_MULTI_A_PORT, PROXY_MULTI_B_PORT, PROXY_TP_HTTP_PORT]);
});
export default tap.start();

142
test/test.udp-forwarding.ts Normal file
View File

@@ -0,0 +1,142 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as dgram from 'dgram';
import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let smartProxy: SmartProxy;
let backendServer: dgram.Socket;
let PROXY_PORT: number;
let BACKEND_PORT: number;
// Helper: send a single UDP datagram and wait for a response
function sendDatagram(port: number, msg: string, timeoutMs = 5000): Promise<string> {
return new Promise((resolve, reject) => {
const client = dgram.createSocket('udp4');
const timeout = setTimeout(() => {
client.close();
reject(new Error(`UDP response timeout after ${timeoutMs}ms`));
}, timeoutMs);
client.send(Buffer.from(msg), port, '127.0.0.1');
client.on('message', (data) => {
clearTimeout(timeout);
client.close();
resolve(data.toString());
});
client.on('error', (err) => {
clearTimeout(timeout);
client.close();
reject(err);
});
});
}
// Helper: create a UDP echo server
function createUdpEchoServer(port: number): Promise<dgram.Socket> {
return new Promise((resolve) => {
const server = dgram.createSocket('udp4');
server.on('message', (msg, rinfo) => {
server.send(Buffer.from(`Echo: ${msg.toString()}`), rinfo.port, rinfo.address);
});
server.bind(port, '127.0.0.1', () => resolve(server));
});
}
tap.test('setup: start UDP echo server and SmartProxy', async () => {
[PROXY_PORT, BACKEND_PORT] = await findFreePorts(2);
// Start backend UDP echo server
backendServer = await createUdpEchoServer(BACKEND_PORT);
// Start SmartProxy with a UDP forwarding route
smartProxy = new SmartProxy({
routes: [
{
name: 'udp-forward-test',
match: {
ports: PROXY_PORT,
transport: 'udp' as const,
},
action: {
type: 'forward',
targets: [{ host: '127.0.0.1', port: BACKEND_PORT }],
udp: {
sessionTimeout: 5000,
},
},
},
],
defaults: {
security: {
ipAllowList: ['127.0.0.1', '::1', '::ffff:127.0.0.1'],
},
},
});
await smartProxy.start();
});
tap.test('UDP forwarding: basic datagram round-trip', async () => {
const response = await sendDatagram(PROXY_PORT, 'Hello UDP');
expect(response).toEqual('Echo: Hello UDP');
});
tap.test('UDP forwarding: multiple datagrams same session', async () => {
// Use a single client socket for session reuse
const client = dgram.createSocket('udp4');
const responses: string[] = [];
const done = new Promise<void>((resolve, reject) => {
const timeout = setTimeout(() => {
client.close();
reject(new Error('Timeout waiting for 3 responses'));
}, 5000);
client.on('message', (data) => {
responses.push(data.toString());
if (responses.length === 3) {
clearTimeout(timeout);
client.close();
resolve();
}
});
client.on('error', (err) => {
clearTimeout(timeout);
client.close();
reject(err);
});
});
client.send(Buffer.from('msg1'), PROXY_PORT, '127.0.0.1');
client.send(Buffer.from('msg2'), PROXY_PORT, '127.0.0.1');
client.send(Buffer.from('msg3'), PROXY_PORT, '127.0.0.1');
await done;
expect(responses).toContain('Echo: msg1');
expect(responses).toContain('Echo: msg2');
expect(responses).toContain('Echo: msg3');
});
tap.test('UDP forwarding: multiple clients', async () => {
const [resp1, resp2] = await Promise.all([
sendDatagram(PROXY_PORT, 'client1'),
sendDatagram(PROXY_PORT, 'client2'),
]);
expect(resp1).toEqual('Echo: client1');
expect(resp2).toEqual('Echo: client2');
});
tap.test('UDP forwarding: large datagram (1400 bytes)', async () => {
const payload = 'X'.repeat(1400);
const response = await sendDatagram(PROXY_PORT, payload);
expect(response).toEqual(`Echo: ${payload}`);
});
tap.test('cleanup: stop SmartProxy and backend', async () => {
await smartProxy.stop();
await new Promise<void>((resolve) => backendServer.close(() => resolve()));
await assertPortsFree([PROXY_PORT, BACKEND_PORT]);
});
export default tap.start();

114
test/test.udp-metrics.ts Normal file
View File

@@ -0,0 +1,114 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import * as dgram from 'dgram';
import { SmartProxy } from '../ts/index.js';
import { findFreePorts, assertPortsFree } from './helpers/port-allocator.js';
let smartProxy: SmartProxy;
let backendServer: dgram.Socket;
let PROXY_PORT: number;
let BACKEND_PORT: number;
// Helper: send a single UDP datagram and wait for a response
function sendDatagram(port: number, msg: string, timeoutMs = 5000): Promise<string> {
return new Promise((resolve, reject) => {
const client = dgram.createSocket('udp4');
const timeout = setTimeout(() => {
client.close();
reject(new Error(`UDP response timeout after ${timeoutMs}ms`));
}, timeoutMs);
client.send(Buffer.from(msg), port, '127.0.0.1');
client.on('message', (data) => {
clearTimeout(timeout);
client.close();
resolve(data.toString());
});
client.on('error', (err) => {
clearTimeout(timeout);
client.close();
reject(err);
});
});
}
// Helper: create a UDP echo server
function createUdpEchoServer(port: number): Promise<dgram.Socket> {
return new Promise((resolve) => {
const server = dgram.createSocket('udp4');
server.on('message', (msg, rinfo) => {
server.send(Buffer.from(`Echo: ${msg.toString()}`), rinfo.port, rinfo.address);
});
server.bind(port, '127.0.0.1', () => resolve(server));
});
}
tap.test('setup: start UDP echo server and SmartProxy with metrics', async () => {
[PROXY_PORT, BACKEND_PORT] = await findFreePorts(2);
backendServer = await createUdpEchoServer(BACKEND_PORT);
smartProxy = new SmartProxy({
routes: [
{
name: 'udp-metrics-test',
match: {
ports: PROXY_PORT,
transport: 'udp' as const,
},
action: {
type: 'forward',
targets: [{ host: '127.0.0.1', port: BACKEND_PORT }],
udp: {
sessionTimeout: 10000,
},
},
},
],
defaults: {
security: {
ipAllowList: ['127.0.0.1', '::1', '::ffff:127.0.0.1'],
},
},
metrics: {
enabled: true,
sampleIntervalMs: 1000,
retentionSeconds: 60,
},
});
await smartProxy.start();
});
tap.test('UDP metrics: counters increase after traffic', async () => {
// Send a few datagrams
const resp1 = await sendDatagram(PROXY_PORT, 'metrics-test-1');
expect(resp1).toEqual('Echo: metrics-test-1');
const resp2 = await sendDatagram(PROXY_PORT, 'metrics-test-2');
expect(resp2).toEqual('Echo: metrics-test-2');
// Wait for metrics to propagate and cache to refresh
await new Promise<void>((resolve) => setTimeout(resolve, 2000));
// Get metrics (returns the adapter, need to ensure cache is fresh)
const metrics = smartProxy.getMetrics();
// The udp property reads from the Rust JSON snapshot
expect(metrics.udp).toBeDefined();
const totalSessions = metrics.udp.totalSessions();
const datagramsIn = metrics.udp.datagramsIn();
const datagramsOut = metrics.udp.datagramsOut();
console.log(`UDP metrics: sessions=${totalSessions}, in=${datagramsIn}, out=${datagramsOut}`);
expect(totalSessions).toBeGreaterThan(0);
expect(datagramsIn).toBeGreaterThan(0);
expect(datagramsOut).toBeGreaterThan(0);
});
tap.test('cleanup: stop SmartProxy and backend', async () => {
await smartProxy.stop();
await new Promise<void>((resolve) => backendServer.close(() => resolve()));
await assertPortsFree([PROXY_PORT, BACKEND_PORT]);
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartproxy',
version: '25.1.0',
version: '25.15.0',
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
}

View File

@@ -15,4 +15,3 @@ export * from './lifecycle-component.js';
export * from './binary-heap.js';
export * from './enhanced-connection-pool.js';
export * from './socket-utils.js';
export * from './proxy-protocol.js';

View File

@@ -354,17 +354,17 @@ export class LogDeduplicator {
// Global instance for connection-related log deduplication
export const connectionLogDeduplicator = new LogDeduplicator(5000); // 5 second batches
// Ensure logs are flushed on process exit
// Ensure logs are flushed on process exit.
// Only use beforeExit — do NOT call process.exit() from SIGINT/SIGTERM handlers
// as that kills the host process's graceful shutdown (e.g., dcrouter connection draining).
process.on('beforeExit', () => {
connectionLogDeduplicator.flushAll();
});
process.on('SIGINT', () => {
connectionLogDeduplicator.cleanup();
process.exit(0);
});
process.on('SIGTERM', () => {
connectionLogDeduplicator.cleanup();
process.exit(0);
});

View File

@@ -1,129 +0,0 @@
import * as plugins from '../../plugins.js';
import { logger } from './logger.js';
import { ProxyProtocolParser as ProtocolParser, type IProxyInfo, type IProxyParseResult } from '../../protocols/proxy/index.js';
// Re-export types from protocols for backward compatibility
export type { IProxyInfo, IProxyParseResult } from '../../protocols/proxy/index.js';
/**
* Parser for PROXY protocol v1 (text format)
* Spec: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
*
* This class now delegates to the protocol parser but adds
* smartproxy-specific features like socket reading and logging
*/
export class ProxyProtocolParser {
static readonly PROXY_V1_SIGNATURE = ProtocolParser.PROXY_V1_SIGNATURE;
static readonly MAX_HEADER_LENGTH = ProtocolParser.MAX_HEADER_LENGTH;
static readonly HEADER_TERMINATOR = ProtocolParser.HEADER_TERMINATOR;
/**
* Parse PROXY protocol v1 header from buffer
* Returns proxy info and remaining data after header
*/
static parse(data: Buffer): IProxyParseResult {
// Delegate to protocol parser
return ProtocolParser.parse(data);
}
/**
* Generate PROXY protocol v1 header
*/
static generate(info: IProxyInfo): Buffer {
// Delegate to protocol parser
return ProtocolParser.generate(info);
}
/**
* Validate IP address format
*/
private static isValidIP(ip: string, protocol: 'TCP4' | 'TCP6' | 'UNKNOWN'): boolean {
return ProtocolParser.isValidIP(ip, protocol);
}
/**
* Attempt to read a complete PROXY protocol header from a socket
* Returns null if no PROXY protocol detected or incomplete
*/
static async readFromSocket(socket: plugins.net.Socket, timeout: number = 5000): Promise<IProxyParseResult | null> {
return new Promise((resolve) => {
let buffer = Buffer.alloc(0);
let resolved = false;
const cleanup = () => {
socket.removeListener('data', onData);
socket.removeListener('error', onError);
clearTimeout(timer);
};
const timer = setTimeout(() => {
if (!resolved) {
resolved = true;
cleanup();
resolve({
proxyInfo: null,
remainingData: buffer
});
}
}, timeout);
const onData = (chunk: Buffer) => {
buffer = Buffer.concat([buffer, chunk]);
// Check if we have enough data
if (!buffer.toString('ascii', 0, Math.min(6, buffer.length)).startsWith(this.PROXY_V1_SIGNATURE)) {
// Not PROXY protocol
resolved = true;
cleanup();
resolve({
proxyInfo: null,
remainingData: buffer
});
return;
}
// Try to parse
try {
const result = this.parse(buffer);
if (result.proxyInfo) {
// Successfully parsed
resolved = true;
cleanup();
resolve(result);
} else if (buffer.length > this.MAX_HEADER_LENGTH) {
// Header too long
resolved = true;
cleanup();
resolve({
proxyInfo: null,
remainingData: buffer
});
}
// Otherwise continue reading
} catch (error) {
// Parse error
logger.log('error', `PROXY protocol parse error: ${error.message}`);
resolved = true;
cleanup();
resolve({
proxyInfo: null,
remainingData: buffer
});
}
};
const onError = (error: Error) => {
logger.log('error', `Socket error while reading PROXY protocol: ${error.message}`);
resolved = true;
cleanup();
resolve({
proxyInfo: null,
remainingData: buffer
});
};
socket.on('data', onData);
socket.on('error', onError);
});
}
}

View File

@@ -18,8 +18,8 @@ export class ProtocolDetector {
private fragmentManager: DetectionFragmentManager;
private tlsDetector: TlsDetector;
private httpDetector: HttpDetector;
private connectionProtocols: Map<string, 'tls' | 'http'> = new Map();
private connectionProtocols: Map<string, { protocol: 'tls' | 'http'; createdAt: number }> = new Map();
constructor() {
this.fragmentManager = new DetectionFragmentManager();
this.tlsDetector = new TlsDetector();
@@ -124,8 +124,9 @@ export class ProtocolDetector {
const connectionId = DetectionFragmentManager.createConnectionId(context);
// Check if we already know the protocol for this connection
const knownProtocol = this.connectionProtocols.get(connectionId);
const knownEntry = this.connectionProtocols.get(connectionId);
const knownProtocol = knownEntry?.protocol;
if (knownProtocol === 'http') {
const result = this.httpDetector.detectWithContext(buffer, context, options);
if (result) {
@@ -163,7 +164,7 @@ export class ProtocolDetector {
if (!knownProtocol) {
// First peek to determine protocol type
if (this.tlsDetector.canHandle(buffer)) {
this.connectionProtocols.set(connectionId, 'tls');
this.connectionProtocols.set(connectionId, { protocol: 'tls', createdAt: Date.now() });
// Handle TLS with fragment accumulation
const handler = this.fragmentManager.getHandler('tls');
const fragmentResult = handler.addFragment(connectionId, buffer);
@@ -189,7 +190,7 @@ export class ProtocolDetector {
}
if (this.httpDetector.canHandle(buffer)) {
this.connectionProtocols.set(connectionId, 'http');
this.connectionProtocols.set(connectionId, { protocol: 'http', createdAt: Date.now() });
const result = this.httpDetector.detectWithContext(buffer, context, options);
if (result) {
if (result.isComplete) {
@@ -221,6 +222,14 @@ export class ProtocolDetector {
private cleanupInstance(): void {
this.fragmentManager.cleanup();
// Remove stale connectionProtocols entries (abandoned handshakes, port scanners)
const maxAge = 30_000; // 30 seconds
const now = Date.now();
for (const [id, entry] of this.connectionProtocols) {
if (now - entry.createdAt > maxAge) {
this.connectionProtocols.delete(id);
}
}
}
/**
@@ -242,8 +251,7 @@ export class ProtocolDetector {
* @param _maxAge Maximum age in milliseconds (default: 30 seconds)
*/
static cleanupConnections(_maxAge: number = 30000): void {
// Cleanup is now handled internally by the fragment manager
this.getInstance().fragmentManager.cleanup();
this.getInstance().cleanupInstance();
}
/**

View File

@@ -2,14 +2,13 @@
import { EventEmitter } from 'node:events';
import * as fs from 'node:fs';
import * as http from 'node:http';
import * as https from 'node:https';
import * as net from 'node:net';
import * as path from 'node:path';
import * as tls from 'node:tls';
import * as url from 'node:url';
import * as http2 from 'node:http2';
export { EventEmitter, fs, http, https, net, path, tls, url, http2 };
export { EventEmitter, fs, http, net, path, tls, url, http2 };
// tsclass scope
import * as tsclass from '@tsclass/tsclass';
@@ -17,44 +16,19 @@ import * as tsclass from '@tsclass/tsclass';
export { tsclass };
// pushrocks scope
import * as lik from '@push.rocks/lik';
import * as smartdelay from '@push.rocks/smartdelay';
import * as smartpromise from '@push.rocks/smartpromise';
import * as smartrequest from '@push.rocks/smartrequest';
import * as smartstring from '@push.rocks/smartstring';
import * as smartfile from '@push.rocks/smartfile';
import * as smartcrypto from '@push.rocks/smartcrypto';
import * as smartacme from '@push.rocks/smartacme';
import * as smartacmePlugins from '@push.rocks/smartacme/dist_ts/smartacme.plugins.js';
import * as smartacmeHandlers from '@push.rocks/smartacme/dist_ts/handlers/index.js';
import * as smartlog from '@push.rocks/smartlog';
import * as smartlogDestinationLocal from '@push.rocks/smartlog/destination-local';
import * as taskbuffer from '@push.rocks/taskbuffer';
import * as smartrx from '@push.rocks/smartrx';
import * as smartrust from '@push.rocks/smartrust';
export {
lik,
smartdelay,
smartrequest,
smartpromise,
smartstring,
smartfile,
smartcrypto,
smartacme,
smartacmePlugins,
smartacmeHandlers,
smartlog,
smartlogDestinationLocal,
taskbuffer,
smartrx,
smartrust,
};
// third party scope
import prettyMs from 'pretty-ms';
import * as ws from 'ws';
import wsDefault from 'ws';
import { minimatch } from 'minimatch';
export { prettyMs, ws, wsDefault, minimatch };
export { minimatch };

View File

@@ -1,7 +1,6 @@
/**
* PROXY Protocol Module
* HAProxy PROXY protocol implementation
* Type definitions for HAProxy PROXY protocol v1/v2
*/
export * from './types.js';
export * from './parser.js';
export * from './types.js';

View File

@@ -1,183 +0,0 @@
/**
* PROXY Protocol Parser
* Implementation of HAProxy PROXY protocol v1 (text format)
* Spec: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
*/
import type { IProxyInfo, IProxyParseResult, TProxyProtocol } from './types.js';
/**
* PROXY protocol parser
*/
export class ProxyProtocolParser {
static readonly PROXY_V1_SIGNATURE = 'PROXY ';
static readonly MAX_HEADER_LENGTH = 107; // Max length for v1 header
static readonly HEADER_TERMINATOR = '\r\n';
/**
* Parse PROXY protocol v1 header from buffer
* Returns proxy info and remaining data after header
*/
static parse(data: Buffer): IProxyParseResult {
// Check if buffer starts with PROXY signature
if (!data.toString('ascii', 0, 6).startsWith(this.PROXY_V1_SIGNATURE)) {
return {
proxyInfo: null,
remainingData: data
};
}
// Find header terminator
const headerEndIndex = data.indexOf(this.HEADER_TERMINATOR);
if (headerEndIndex === -1) {
// Header incomplete, need more data
if (data.length > this.MAX_HEADER_LENGTH) {
// Header too long, invalid
throw new Error('PROXY protocol header exceeds maximum length');
}
return {
proxyInfo: null,
remainingData: data
};
}
// Extract header line
const headerLine = data.toString('ascii', 0, headerEndIndex);
const remainingData = data.slice(headerEndIndex + 2); // Skip \r\n
// Parse header
const parts = headerLine.split(' ');
if (parts.length < 2) {
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
}
const [signature, protocol] = parts;
// Validate protocol
if (!['TCP4', 'TCP6', 'UNKNOWN'].includes(protocol)) {
throw new Error(`Invalid PROXY protocol: ${protocol}`);
}
// For UNKNOWN protocol, ignore addresses
if (protocol === 'UNKNOWN') {
return {
proxyInfo: {
protocol: 'UNKNOWN',
sourceIP: '',
sourcePort: 0,
destinationIP: '',
destinationPort: 0
},
remainingData
};
}
// For TCP4/TCP6, we need all 6 parts
if (parts.length !== 6) {
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
}
const [, , srcIP, dstIP, srcPort, dstPort] = parts;
// Validate and parse ports
const sourcePort = parseInt(srcPort, 10);
const destinationPort = parseInt(dstPort, 10);
if (isNaN(sourcePort) || sourcePort < 0 || sourcePort > 65535) {
throw new Error(`Invalid source port: ${srcPort}`);
}
if (isNaN(destinationPort) || destinationPort < 0 || destinationPort > 65535) {
throw new Error(`Invalid destination port: ${dstPort}`);
}
// Validate IP addresses
const protocolType = protocol as TProxyProtocol;
if (!this.isValidIP(srcIP, protocolType)) {
throw new Error(`Invalid source IP for ${protocol}: ${srcIP}`);
}
if (!this.isValidIP(dstIP, protocolType)) {
throw new Error(`Invalid destination IP for ${protocol}: ${dstIP}`);
}
return {
proxyInfo: {
protocol: protocolType,
sourceIP: srcIP,
sourcePort,
destinationIP: dstIP,
destinationPort
},
remainingData
};
}
/**
* Generate PROXY protocol v1 header
*/
static generate(info: IProxyInfo): Buffer {
if (info.protocol === 'UNKNOWN') {
return Buffer.from(`PROXY UNKNOWN\r\n`, 'ascii');
}
const header = `PROXY ${info.protocol} ${info.sourceIP} ${info.destinationIP} ${info.sourcePort} ${info.destinationPort}\r\n`;
if (header.length > this.MAX_HEADER_LENGTH) {
throw new Error('Generated PROXY protocol header exceeds maximum length');
}
return Buffer.from(header, 'ascii');
}
/**
* Validate IP address format
*/
static isValidIP(ip: string, protocol: TProxyProtocol): boolean {
if (protocol === 'TCP4') {
return this.isIPv4(ip);
} else if (protocol === 'TCP6') {
return this.isIPv6(ip);
}
return false;
}
/**
* Check if string is valid IPv4
*/
static isIPv4(ip: string): boolean {
const parts = ip.split('.');
if (parts.length !== 4) return false;
for (const part of parts) {
const num = parseInt(part, 10);
if (isNaN(num) || num < 0 || num > 255 || part !== num.toString()) {
return false;
}
}
return true;
}
/**
* Check if string is valid IPv6
*/
static isIPv6(ip: string): boolean {
// Basic IPv6 validation
const ipv6Regex = /^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$/;
return ipv6Regex.test(ip);
}
/**
* Create a connection ID string for tracking
*/
static createConnectionId(connectionInfo: {
sourceIp?: string;
sourcePort?: number;
destIp?: string;
destPort?: number;
}): string {
const { sourceIp, sourcePort, destIp, destPort } = connectionInfo;
return `${sourceIp}:${sourcePort}-${destIp}:${destPort}`;
}
}

View File

@@ -11,7 +11,7 @@ export type TProxyProtocolVersion = 'v1' | 'v2';
/**
* Connection protocol type
*/
export type TProxyProtocol = 'TCP4' | 'TCP6' | 'UNKNOWN';
export type TProxyProtocol = 'TCP4' | 'TCP6' | 'UDP4' | 'UDP6' | 'UNKNOWN';
/**
* Interface representing parsed PROXY protocol information

View File

@@ -0,0 +1,239 @@
import * as plugins from '../../plugins.js';
import { logger } from '../../core/utils/logger.js';
import type { IRouteContext } from '../../core/models/route-context.js';
import type { RoutePreprocessor } from './route-preprocessor.js';
import type { TDatagramHandler, IDatagramInfo } from './models/route-types.js';
/**
* Framed message for datagram relay IPC.
* Each message is length-prefixed: [4 bytes big-endian u32 length][JSON payload]
*/
interface IDatagramRelayMessage {
type: 'datagram' | 'reply';
routeKey: string;
sourceIp: string;
sourcePort: number;
destPort: number;
payloadBase64: string;
}
/**
* Server that receives UDP datagrams from Rust via Unix stream socket
* and dispatches them to TypeScript datagramHandler callbacks.
*
* Protocol: length-prefixed JSON frames over a persistent Unix stream socket.
* - Rust→TS: { type: "datagram", routeKey, sourceIp, sourcePort, destPort, payloadBase64 }
* - TS→Rust: { type: "reply", sourceIp, sourcePort, destPort, payloadBase64 }
*/
export class DatagramHandlerServer {
private server: plugins.net.Server | null = null;
private connection: plugins.net.Socket | null = null;
private socketPath: string;
private preprocessor: RoutePreprocessor;
private readBuffer: Buffer = Buffer.alloc(0);
constructor(socketPath: string, preprocessor: RoutePreprocessor) {
this.socketPath = socketPath;
this.preprocessor = preprocessor;
}
/**
* Start listening on the Unix socket.
*/
public async start(): Promise<void> {
// Clean up stale socket file
try {
await plugins.fs.promises.unlink(this.socketPath);
} catch {
// Ignore if doesn't exist
}
return new Promise((resolve, reject) => {
this.server = plugins.net.createServer((socket) => {
this.handleConnection(socket);
});
this.server.on('error', (err) => {
logger.log('error', `DatagramHandlerServer error: ${err.message}`);
reject(err);
});
this.server.listen(this.socketPath, () => {
logger.log('info', `DatagramHandlerServer listening on ${this.socketPath}`);
resolve();
});
});
}
/**
* Stop the server and clean up.
*/
public async stop(): Promise<void> {
if (this.connection) {
this.connection.destroy();
this.connection = null;
}
if (this.server) {
await new Promise<void>((resolve) => {
this.server!.close(() => resolve());
});
this.server = null;
}
try {
await plugins.fs.promises.unlink(this.socketPath);
} catch {
// Ignore
}
}
/**
* Handle a new connection from Rust.
* Only one connection at a time (Rust maintains a persistent connection).
*/
private handleConnection(socket: plugins.net.Socket): void {
if (this.connection) {
logger.log('warn', 'DatagramHandlerServer: replacing existing connection');
this.connection.destroy();
}
this.connection = socket;
this.readBuffer = Buffer.alloc(0);
socket.on('data', (chunk: Buffer) => {
this.readBuffer = Buffer.concat([this.readBuffer, chunk]);
this.processFrames();
});
socket.on('error', (err) => {
logger.log('error', `DatagramHandlerServer connection error: ${err.message}`);
});
socket.on('close', () => {
if (this.connection === socket) {
this.connection = null;
}
});
logger.log('info', 'DatagramHandlerServer: Rust relay connected');
}
/**
* Process length-prefixed frames from the read buffer.
*/
private processFrames(): void {
while (this.readBuffer.length >= 4) {
const frameLen = this.readBuffer.readUInt32BE(0);
// Safety: reject absurdly large frames
if (frameLen > 10 * 1024 * 1024) {
logger.log('error', `DatagramHandlerServer: frame too large (${frameLen} bytes), resetting`);
this.readBuffer = Buffer.alloc(0);
return;
}
if (this.readBuffer.length < 4 + frameLen) {
// Incomplete frame, wait for more data
return;
}
const frameData = this.readBuffer.subarray(4, 4 + frameLen);
this.readBuffer = this.readBuffer.subarray(4 + frameLen);
try {
const msg: IDatagramRelayMessage = JSON.parse(frameData.toString('utf8'));
this.handleMessage(msg);
} catch (err) {
logger.log('error', `DatagramHandlerServer: failed to parse frame: ${err}`);
}
}
}
/**
* Handle a received datagram message from Rust.
*/
private handleMessage(msg: IDatagramRelayMessage): void {
if (msg.type !== 'datagram') {
return;
}
const originalRoute = this.preprocessor.getOriginalRoute(msg.routeKey);
if (!originalRoute) {
logger.log('warn', `DatagramHandlerServer: no handler for route '${msg.routeKey}'`);
return;
}
const handler: TDatagramHandler | undefined = originalRoute.action.datagramHandler;
if (!handler) {
logger.log('warn', `DatagramHandlerServer: route '${msg.routeKey}' has no datagramHandler`);
return;
}
const datagram = Buffer.from(msg.payloadBase64, 'base64');
const context: IRouteContext = {
port: msg.destPort,
domain: undefined,
clientIp: msg.sourceIp,
serverIp: '0.0.0.0',
path: undefined,
isTls: false,
tlsVersion: undefined,
routeName: originalRoute.name,
routeId: originalRoute.id,
timestamp: Date.now(),
connectionId: `udp-${msg.sourceIp}:${msg.sourcePort}-${Date.now()}`,
};
const info: IDatagramInfo = {
sourceIp: msg.sourceIp,
sourcePort: msg.sourcePort,
destPort: msg.destPort,
context,
};
const reply = (data: Buffer): void => {
this.sendReply({
type: 'reply',
routeKey: msg.routeKey,
sourceIp: msg.sourceIp,
sourcePort: msg.sourcePort,
destPort: msg.destPort,
payloadBase64: data.toString('base64'),
});
};
try {
const result = handler(datagram, info, reply);
if (result && typeof (result as any).catch === 'function') {
(result as Promise<void>).catch((err) => {
logger.log('error', `DatagramHandler error for route '${msg.routeKey}': ${err}`);
});
}
} catch (err) {
logger.log('error', `DatagramHandler threw for route '${msg.routeKey}': ${err}`);
}
}
/**
* Send a reply frame back to Rust.
*/
private sendReply(msg: IDatagramRelayMessage): void {
if (!this.connection || this.connection.destroyed) {
logger.log('warn', 'DatagramHandlerServer: cannot send reply, no connection');
return;
}
const json = JSON.stringify(msg);
const payload = Buffer.from(json, 'utf8');
const header = Buffer.alloc(4);
header.writeUInt32BE(payload.length, 0);
this.connection.write(Buffer.concat([header, payload]));
}
/**
* Get the socket path for passing to Rust via IPC.
*/
public getSocketPath(): string {
return this.socketPath;
}
}

View File

@@ -112,12 +112,12 @@ export interface ISmartProxyOptions {
maxVersion?: string;
// Timeout settings
connectionTimeout?: number; // Timeout for establishing connection to backend (ms), default: 30000 (30s)
connectionTimeout?: number; // Timeout for establishing connection to backend (ms), default: 60000 (60s)
initialDataTimeout?: number; // Timeout for initial data/SNI (ms), default: 60000 (60s)
socketTimeout?: number; // Socket inactivity timeout (ms), default: 3600000 (1h)
socketTimeout?: number; // Socket inactivity timeout (ms), default: 60000 (60s)
inactivityCheckInterval?: number; // How often to check for inactive connections (ms), default: 60000 (60s)
maxConnectionLifetime?: number; // Default max connection lifetime (ms), default: 86400000 (24h)
inactivityTimeout?: number; // Inactivity timeout (ms), default: 14400000 (4h)
maxConnectionLifetime?: number; // Max connection lifetime (ms), default: 3600000 (1h)
inactivityTimeout?: number; // Inactivity timeout (ms), default: 75000 (75s)
gracefulShutdownTimeout?: number; // (ms) maximum time to wait for connections to close during shutdown
@@ -180,6 +180,21 @@ export interface ISmartProxyOptions {
*/
certProvisionFallbackToAcme?: boolean;
/**
* Per-domain timeout in ms for certProvisionFunction calls.
* If a single domain's provisioning takes longer than this, it's aborted
* and a certificate-failed event is emitted.
* Default: 300000 (5 minutes)
*/
certProvisionTimeout?: number;
/**
* Maximum number of domains to provision certificates for concurrently.
* Prevents overwhelming ACME providers when many domains provision at once.
* Default: 4
*/
certProvisionConcurrency?: number;
/**
* Disable the default self-signed fallback certificate.
* When false (default), a self-signed cert is generated at startup and loaded

View File

@@ -67,6 +67,21 @@ export interface IMetrics {
connections(): number;
};
// Backend metrics
backends: {
byBackend(): Map<string, IBackendMetrics>;
protocols(): Map<string, string>;
topByErrors(limit?: number): Array<{ backend: string; errors: number }>;
};
// UDP metrics
udp: {
activeSessions(): number;
totalSessions(): number;
datagramsIn(): number;
datagramsOut(): number;
};
// Performance metrics
percentiles: {
connectionDuration(): { p50: number; p95: number; p99: number };
@@ -98,6 +113,21 @@ export interface IMetricsConfig {
prometheusPrefix: string; // Default: smartproxy_
}
/**
* Per-backend metrics
*/
export interface IBackendMetrics {
protocol: string;
activeConnections: number;
totalConnections: number;
connectErrors: number;
handshakeErrors: number;
requestErrors: number;
avgConnectTimeMs: number;
poolHitRate: number;
h2Failures: number;
}
/**
* Internal interface for connection byte tracking
*/

View File

@@ -20,9 +20,15 @@ export type TSocketHandler = (socket: plugins.net.Socket, context: IRouteContext
export type TTlsMode = 'passthrough' | 'terminate' | 'terminate-and-reencrypt';
/**
* Port range specification format
* Transport protocol for route matching
*/
export type TPortRange = number | number[] | Array<{ from: number; to: number }>;
export type TTransportProtocol = 'tcp' | 'udp' | 'all';
/**
* Port range specification format.
* Supports: single number, array of numbers, array of ranges, or mixed arrays.
*/
export type TPortRange = number | Array<number | { from: number; to: number }>;
/**
* Route match criteria for incoming requests
@@ -31,6 +37,9 @@ export interface IRouteMatch {
// Listen on these ports (required)
ports: TPortRange;
// Transport protocol: 'tcp' (default), 'udp', or 'all' (both TCP and UDP)
transport?: TTransportProtocol;
// Optional domain patterns to match (default: all domains)
domains?: string | string[];
@@ -39,6 +48,7 @@ export interface IRouteMatch {
clientIp?: string[]; // Match specific client IPs
tlsVersion?: string[]; // Match specific TLS versions
headers?: Record<string, string | RegExp>; // Match specific HTTP headers
protocol?: 'http' | 'tcp' | 'udp' | 'quic' | 'http3'; // Match specific protocol
}
@@ -71,6 +81,9 @@ export interface IRouteTarget {
headers?: IRouteHeaders; // Override route-level headers
advanced?: IRouteAdvanced; // Override route-level advanced settings
// Override transport for backend connection (e.g., receive QUIC but forward as HTTP/1.1 via TCP)
backendTransport?: 'tcp' | 'udp';
// Priority for matching (higher values are checked first, default: 0)
priority?: number;
}
@@ -261,7 +274,7 @@ export interface IRouteAction {
// Additional options for backend-specific settings
options?: {
backendProtocol?: 'http1' | 'http2';
backendProtocol?: 'http1' | 'http2' | 'http3' | 'auto';
[key: string]: any;
};
@@ -273,9 +286,15 @@ export interface IRouteAction {
// Socket handler function (when type is 'socket-handler')
socketHandler?: TSocketHandler;
// Datagram handler function for UDP (when type is 'socket-handler' and transport is 'udp')
datagramHandler?: TDatagramHandler;
// PROXY protocol support (default for all targets, can be overridden per target)
sendProxyProtocol?: boolean;
// UDP-specific settings (session tracking, datagram limits, QUIC config)
udp?: IRouteUdp;
}
/**
@@ -355,4 +374,64 @@ export interface IRouteConfig {
enabled?: boolean; // Whether the route is active (default: true)
}
// ─── UDP & QUIC Types ─────────────────────────────────────────────────
/**
* Handler for individual UDP datagrams.
* Called for each incoming datagram on a socket-handler route with UDP transport.
*/
export type TDatagramHandler = (
datagram: Buffer,
info: IDatagramInfo,
reply: (data: Buffer) => void
) => void | Promise<void>;
/**
* Metadata for a received UDP datagram
*/
export interface IDatagramInfo {
/** Source IP address */
sourceIp: string;
/** Source port */
sourcePort: number;
/** Destination (local) port the datagram arrived on */
destPort: number;
/** Route context */
context: IRouteContext;
}
/**
* UDP-specific settings for route actions
*/
export interface IRouteUdp {
/** Idle timeout for a UDP session/flow (keyed by src IP:port), in ms. Default: 60000 */
sessionTimeout?: number;
/** Max concurrent UDP sessions per source IP. Default: 1000 */
maxSessionsPerIP?: number;
/** Max accepted datagram size in bytes. Oversized datagrams are dropped. Default: 65535 */
maxDatagramSize?: number;
/** QUIC-specific configuration. When present, traffic is treated as QUIC. */
quic?: IRouteQuic;
}
/**
* QUIC and HTTP/3 settings
*/
export interface IRouteQuic {
/** QUIC connection idle timeout in ms. Default: 30000 */
maxIdleTimeout?: number;
/** Max concurrent bidirectional streams per QUIC connection. Default: 100 */
maxConcurrentBidiStreams?: number;
/** Max concurrent unidirectional streams per QUIC connection. Default: 100 */
maxConcurrentUniStreams?: number;
/** Enable HTTP/3 over this QUIC endpoint. Default: false */
enableHttp3?: boolean;
/** Port to advertise in Alt-Svc header on TCP HTTP responses. Default: listening port */
altSvcPort?: number;
/** Max age for Alt-Svc advertisement in seconds. Default: 86400 */
altSvcMaxAge?: number;
/** Initial congestion window size in bytes. Default: implementation-defined */
initialCongestionWindow?: number;
}
// Configuration moved to models/interfaces.ts as ISmartProxyOptions

View File

@@ -74,6 +74,11 @@ export class RoutePreprocessor {
return true;
}
// Datagram handler routes always need TS
if (route.action.type === 'socket-handler' && route.action.datagramHandler) {
return true;
}
// Routes with dynamic host/port functions need TS
if (route.action.targets) {
for (const target of route.action.targets) {
@@ -92,8 +97,9 @@ export class RoutePreprocessor {
if (needsTsHandling) {
// Convert to socket-handler type for Rust (Rust will relay back to TS)
cleanAction.type = 'socket-handler';
// Remove the JS handler (not serializable)
// Remove the JS handlers (not serializable)
delete (cleanAction as any).socketHandler;
delete (cleanAction as any).datagramHandler;
}
// Clean targets - replace functions with static values

View File

@@ -1,4 +1,4 @@
import type { IMetrics, IThroughputData, IThroughputHistoryPoint } from './models/metrics-types.js';
import type { IMetrics, IBackendMetrics, IThroughputData, IThroughputHistoryPoint } from './models/metrics-types.js';
import type { RustProxyBridge } from './rust-proxy-bridge.js';
/**
@@ -72,12 +72,23 @@ export class RustMetricsAdapter implements IMetrics {
return result;
},
byIP: (): Map<string, number> => {
// Per-IP tracking not yet available from Rust
return new Map();
const result = new Map<string, number>();
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.set(ip, (im as any).activeConnections ?? 0);
}
}
return result;
},
topIPs: (_limit?: number): Array<{ ip: string; count: number }> => {
// Per-IP tracking not yet available from Rust
return [];
topIPs: (limit: number = 10): Array<{ ip: string; count: number }> => {
const result: Array<{ ip: string; count: number }> = [];
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.push({ ip, count: (im as any).activeConnections ?? 0 });
}
}
result.sort((a, b) => b.count - a.count);
return result.slice(0, limit);
},
};
@@ -100,9 +111,13 @@ export class RustMetricsAdapter implements IMetrics {
custom: (_seconds: number): IThroughputData => {
return this.throughput.instant();
},
history: (_seconds: number): Array<IThroughputHistoryPoint> => {
// Throughput history not yet available from Rust
return [];
history: (seconds: number): Array<IThroughputHistoryPoint> => {
if (!this.cache?.throughputHistory) return [];
return this.cache.throughputHistory.slice(-seconds).map((p: any) => ({
timestamp: p.timestampMs,
in: p.bytesIn,
out: p.bytesOut,
}));
},
byRoute: (_windowSeconds?: number): Map<string, IThroughputData> => {
const result = new Map<string, IThroughputData>();
@@ -117,21 +132,28 @@ export class RustMetricsAdapter implements IMetrics {
return result;
},
byIP: (_windowSeconds?: number): Map<string, IThroughputData> => {
return new Map();
const result = new Map<string, IThroughputData>();
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.set(ip, {
in: (im as any).throughputInBytesPerSec ?? 0,
out: (im as any).throughputOutBytesPerSec ?? 0,
});
}
}
return result;
},
};
public requests = {
perSecond: (): number => {
// Rust tracks connections, not HTTP requests (TCP-level proxy)
return 0;
return this.cache?.httpRequestsPerSec ?? 0;
},
perMinute: (): number => {
return 0;
return (this.cache?.httpRequestsPerSecRecent ?? 0) * 60;
},
total: (): number => {
// Use total connections as a proxy for total requests
return this.cache?.totalConnections ?? 0;
return this.cache?.totalHttpRequests ?? this.cache?.totalConnections ?? 0;
},
};
@@ -147,6 +169,62 @@ export class RustMetricsAdapter implements IMetrics {
},
};
public backends = {
byBackend: (): Map<string, IBackendMetrics> => {
const result = new Map<string, IBackendMetrics>();
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
const m = bm as any;
const totalTimeUs = m.totalConnectTimeUs ?? 0;
const count = m.connectCount ?? 0;
const poolHits = m.poolHits ?? 0;
const poolMisses = m.poolMisses ?? 0;
const poolTotal = poolHits + poolMisses;
result.set(key, {
protocol: m.protocol ?? 'unknown',
activeConnections: m.activeConnections ?? 0,
totalConnections: m.totalConnections ?? 0,
connectErrors: m.connectErrors ?? 0,
handshakeErrors: m.handshakeErrors ?? 0,
requestErrors: m.requestErrors ?? 0,
avgConnectTimeMs: count > 0 ? (totalTimeUs / count) / 1000 : 0,
poolHitRate: poolTotal > 0 ? poolHits / poolTotal : 0,
h2Failures: m.h2Failures ?? 0,
});
}
}
return result;
},
protocols: (): Map<string, string> => {
const result = new Map<string, string>();
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
result.set(key, (bm as any).protocol ?? 'unknown');
}
}
return result;
},
topByErrors: (limit: number = 10): Array<{ backend: string; errors: number }> => {
const result: Array<{ backend: string; errors: number }> = [];
if (this.cache?.backends) {
for (const [key, bm] of Object.entries(this.cache.backends)) {
const m = bm as any;
const errors = (m.connectErrors ?? 0) + (m.handshakeErrors ?? 0) + (m.requestErrors ?? 0);
if (errors > 0) result.push({ backend: key, errors });
}
}
result.sort((a, b) => b.errors - a.errors);
return result.slice(0, limit);
},
};
public udp = {
activeSessions: (): number => this.cache?.activeUdpSessions ?? 0,
totalSessions: (): number => this.cache?.totalUdpSessions ?? 0,
datagramsIn: (): number => this.cache?.totalDatagramsIn ?? 0,
datagramsOut: (): number => this.cache?.totalDatagramsOut ?? 0,
};
public percentiles = {
connectionDuration: (): { p50: number; p95: number; p99: number } => {
return { p50: 0, p95: 0, p99: 0 };

View File

@@ -20,6 +20,7 @@ type TSmartProxyCommands = {
addListeningPort: { params: { port: number }; result: void };
removeListeningPort: { params: { port: number }; result: void };
loadCertificate: { params: { domain: string; cert: string; key: string; ca?: string }; result: void };
setDatagramHandlerRelay: { params: { socketPath: string }; result: void };
};
/**
@@ -177,4 +178,8 @@ export class RustProxyBridge extends plugins.EventEmitter {
public async loadCertificate(domain: string, cert: string, key: string, ca?: string): Promise<void> {
await this.bridge.sendCommand('loadCertificate', { domain, cert, key, ca });
}
public async setDatagramHandlerRelay(socketPath: string): Promise<void> {
await this.bridge.sendCommand('setDatagramHandlerRelay', { socketPath });
}
}

View File

@@ -5,6 +5,7 @@ import { logger } from '../../core/utils/logger.js';
import { RustProxyBridge } from './rust-proxy-bridge.js';
import { RoutePreprocessor } from './route-preprocessor.js';
import { SocketHandlerServer } from './socket-handler-server.js';
import { DatagramHandlerServer } from './datagram-handler-server.js';
import { RustMetricsAdapter } from './rust-metrics-adapter.js';
// Route management
@@ -12,6 +13,7 @@ import { SharedRouteManager as RouteManager } from '../../core/routing/route-man
import { RouteValidator } from './utils/route-validator.js';
import { generateDefaultCertificate } from './utils/default-cert-generator.js';
import { Mutex } from './utils/mutex.js';
import { ConcurrencySemaphore } from './utils/concurrency-semaphore.js';
// Types
import type { ISmartProxyOptions, TSmartProxyCertProvisionObject, IAcmeOptions, ICertProvisionEventComms, ICertificateIssuedEvent, ICertificateFailedEvent } from './models/interfaces.js';
@@ -35,9 +37,11 @@ export class SmartProxy extends plugins.EventEmitter {
private bridge: RustProxyBridge;
private preprocessor: RoutePreprocessor;
private socketHandlerServer: SocketHandlerServer | null = null;
private datagramHandlerServer: DatagramHandlerServer | null = null;
private metricsAdapter: RustMetricsAdapter;
private routeUpdateLock: Mutex;
private stopping = false;
private certProvisionPromise: Promise<void> | null = null;
constructor(settingsArg: ISmartProxyOptions) {
super();
@@ -45,16 +49,16 @@ export class SmartProxy extends plugins.EventEmitter {
// Apply defaults
this.settings = {
...settingsArg,
initialDataTimeout: settingsArg.initialDataTimeout || 120000,
socketTimeout: settingsArg.socketTimeout || 3600000,
maxConnectionLifetime: settingsArg.maxConnectionLifetime || 86400000,
inactivityTimeout: settingsArg.inactivityTimeout || 14400000,
gracefulShutdownTimeout: settingsArg.gracefulShutdownTimeout || 30000,
initialDataTimeout: settingsArg.initialDataTimeout || 60_000,
socketTimeout: settingsArg.socketTimeout || 60_000,
maxConnectionLifetime: settingsArg.maxConnectionLifetime || 3_600_000,
inactivityTimeout: settingsArg.inactivityTimeout || 75_000,
gracefulShutdownTimeout: settingsArg.gracefulShutdownTimeout || 30_000,
maxConnectionsPerIP: settingsArg.maxConnectionsPerIP || 100,
connectionRateLimitPerMinute: settingsArg.connectionRateLimitPerMinute || 300,
keepAliveTreatment: settingsArg.keepAliveTreatment || 'extended',
keepAliveInactivityMultiplier: settingsArg.keepAliveInactivityMultiplier || 6,
extendedKeepAliveLifetime: settingsArg.extendedKeepAliveLifetime || 7 * 24 * 60 * 60 * 1000,
keepAliveTreatment: settingsArg.keepAliveTreatment || 'standard',
keepAliveInactivityMultiplier: settingsArg.keepAliveInactivityMultiplier || 4,
extendedKeepAliveLifetime: settingsArg.extendedKeepAliveLifetime || 3_600_000,
};
// Normalize ACME options
@@ -143,6 +147,16 @@ export class SmartProxy extends plugins.EventEmitter {
await this.socketHandlerServer.start();
}
// Check if any routes need datagram handler relay (UDP socket-handler routes)
const hasDatagramHandlers = this.settings.routes.some(
(r) => r.action.type === 'socket-handler' && r.action.datagramHandler
);
if (hasDatagramHandlers) {
const dgPath = `/tmp/smartproxy-dgram-relay-${process.pid}.sock`;
this.datagramHandlerServer = new DatagramHandlerServer(dgPath, this.preprocessor);
await this.datagramHandlerServer.start();
}
// Preprocess routes (strip JS functions, convert socket-handler routes)
const rustRoutes = this.preprocessor.preprocessForRust(this.settings.routes);
@@ -165,6 +179,11 @@ export class SmartProxy extends plugins.EventEmitter {
await this.bridge.setSocketHandlerRelay(this.socketHandlerServer.getSocketPath());
}
// Configure datagram handler relay
if (this.datagramHandlerServer) {
await this.bridge.setDatagramHandlerRelay(this.datagramHandlerServer.getSocketPath());
}
// Load default self-signed fallback certificate (domain: '*')
if (!this.settings.disableDefaultCert) {
try {
@@ -191,13 +210,18 @@ export class SmartProxy extends plugins.EventEmitter {
}
}
// Handle certProvisionFunction
await this.provisionCertificatesViaCallback(preloadedDomains);
// Start metrics polling
// Start metrics polling BEFORE cert provisioning — the Rust engine is already
// running and accepting connections, so metrics should be available immediately.
// Cert provisioning can hang indefinitely (e.g. DNS-01 ACME timeouts) and must
// not block metrics collection.
this.metricsAdapter.startPolling();
logger.log('info', 'SmartProxy started (Rust engine)', { component: 'smart-proxy' });
// Fire-and-forget cert provisioning — Rust engine is already running and serving traffic.
// Events (certificate-issued / certificate-failed) fire independently per domain.
this.certProvisionPromise = this.provisionCertificatesViaCallback(preloadedDomains)
.catch((err) => logger.log('error', `Unexpected error in cert provisioning: ${err.message}`, { component: 'smart-proxy' }));
}
/**
@@ -207,6 +231,12 @@ export class SmartProxy extends plugins.EventEmitter {
logger.log('info', 'SmartProxy shutting down...', { component: 'smart-proxy' });
this.stopping = true;
// Wait for in-flight cert provisioning to bail out (it checks this.stopping)
if (this.certProvisionPromise) {
await this.certProvisionPromise;
this.certProvisionPromise = null;
}
// Stop metrics polling
this.metricsAdapter.stopPolling();
@@ -227,6 +257,12 @@ export class SmartProxy extends plugins.EventEmitter {
this.socketHandlerServer = null;
}
// Stop datagram handler relay
if (this.datagramHandlerServer) {
await this.datagramHandlerServer.stop();
this.datagramHandlerServer = null;
}
logger.log('info', 'SmartProxy shutdown complete.', { component: 'smart-proxy' });
}
@@ -234,7 +270,7 @@ export class SmartProxy extends plugins.EventEmitter {
* Update routes atomically.
*/
public async updateRoutes(newRoutes: IRouteConfig[]): Promise<void> {
return this.routeUpdateLock.runExclusive(async () => {
await this.routeUpdateLock.runExclusive(async () => {
// Validate
const validation = RouteValidator.validateRoutes(newRoutes);
if (!validation.valid) {
@@ -267,14 +303,31 @@ export class SmartProxy extends plugins.EventEmitter {
this.socketHandlerServer = null;
}
// Update datagram handler relay if datagram handler routes changed
const hasDatagramHandlers = newRoutes.some(
(r) => r.action.type === 'socket-handler' && r.action.datagramHandler
);
if (hasDatagramHandlers && !this.datagramHandlerServer) {
const dgPath = `/tmp/smartproxy-dgram-relay-${process.pid}.sock`;
this.datagramHandlerServer = new DatagramHandlerServer(dgPath, this.preprocessor);
await this.datagramHandlerServer.start();
await this.bridge.setDatagramHandlerRelay(this.datagramHandlerServer.getSocketPath());
} else if (!hasDatagramHandlers && this.datagramHandlerServer) {
await this.datagramHandlerServer.stop();
this.datagramHandlerServer = null;
}
// Update stored routes
this.settings.routes = newRoutes;
// Handle cert provisioning for new routes
await this.provisionCertificatesViaCallback();
logger.log('info', `Routes updated (${newRoutes.length} routes)`, { component: 'smart-proxy' });
});
// Fire-and-forget cert provisioning outside the mutex — routes are already updated,
// cert provisioning doesn't need the route update lock and may be slow.
this.certProvisionPromise = this.provisionCertificatesViaCallback()
.catch((err) => logger.log('error', `Unexpected error in cert provisioning after route update: ${err.message}`, { component: 'smart-proxy' }));
}
/**
@@ -394,6 +447,7 @@ export class SmartProxy extends plugins.EventEmitter {
keepAliveTreatment: this.settings.keepAliveTreatment,
keepAliveInactivityMultiplier: this.settings.keepAliveInactivityMultiplier,
extendedKeepAliveLifetime: this.settings.extendedKeepAliveLifetime,
proxyIps: this.settings.proxyIPs,
acceptProxyProtocol: this.settings.acceptProxyProtocol,
sendProxyProtocol: this.settings.sendProxyProtocol,
metrics: this.settings.metrics,
@@ -409,7 +463,9 @@ export class SmartProxy extends plugins.EventEmitter {
const provisionFn = this.settings.certProvisionFunction;
if (!provisionFn) return;
const provisionedDomains = new Set<string>(skipDomains);
// Phase 1: Collect all unique (domain, route) pairs that need provisioning
const seen = new Set<string>(skipDomains);
const tasks: Array<{ domain: string; route: IRouteConfig }> = [];
for (const route of this.settings.routes) {
if (route.action.tls?.certificate !== 'auto') continue;
@@ -419,91 +475,139 @@ export class SmartProxy extends plugins.EventEmitter {
const certDomains = this.normalizeDomainsForCertProvisioning(rawDomains);
for (const domain of certDomains) {
if (provisionedDomains.has(domain)) continue;
provisionedDomains.add(domain);
if (seen.has(domain)) continue;
seen.add(domain);
tasks.push({ domain, route });
}
}
// Build eventComms channel for this domain
let expiryDate: string | undefined;
let source = 'certProvisionFunction';
if (tasks.length === 0) return;
const eventComms: ICertProvisionEventComms = {
log: (msg) => logger.log('info', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
warn: (msg) => logger.log('warn', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
error: (msg) => logger.log('error', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
setExpiryDate: (date) => { expiryDate = date.toISOString(); },
setSource: (s) => { source = s; },
};
// Phase 2: Process all domains in parallel with concurrency limit
const concurrency = this.settings.certProvisionConcurrency ?? 4;
const semaphore = new ConcurrencySemaphore(concurrency);
const promises = tasks.map(async ({ domain, route }) => {
await semaphore.acquire();
try {
await this.provisionSingleDomain(domain, route, provisionFn);
} finally {
semaphore.release();
}
});
await Promise.allSettled(promises);
}
/**
* Provision a single domain's certificate via the callback.
* Includes per-domain timeout and shutdown checks.
*/
private async provisionSingleDomain(
domain: string,
route: IRouteConfig,
provisionFn: (domain: string, eventComms: ICertProvisionEventComms) => Promise<TSmartProxyCertProvisionObject>,
): Promise<void> {
if (this.stopping) return;
let expiryDate: string | undefined;
let source = 'certProvisionFunction';
const eventComms: ICertProvisionEventComms = {
log: (msg) => logger.log('info', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
warn: (msg) => logger.log('warn', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
error: (msg) => logger.log('error', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
setExpiryDate: (date) => { expiryDate = date.toISOString(); },
setSource: (s) => { source = s; },
};
const timeoutMs = this.settings.certProvisionTimeout ?? 300_000; // 5 min default
try {
const result: TSmartProxyCertProvisionObject = await this.withTimeout(
provisionFn(domain, eventComms),
timeoutMs,
`Certificate provisioning timed out for ${domain} after ${timeoutMs}ms`,
);
if (this.stopping) return;
if (result === 'http01') {
if (route.name) {
try {
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Triggered Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (provisionErr: any) {
logger.log('warn', `Cannot provision cert for ${domain} — callback returned 'http01' but Rust ACME failed: ${provisionErr.message}. ` +
'Note: Rust ACME is disabled when certProvisionFunction is set.', { component: 'smart-proxy' });
}
}
return;
}
if (result && typeof result === 'object') {
if (this.stopping) return;
const certObj = result as plugins.tsclass.network.ICert;
await this.bridge.loadCertificate(
domain,
certObj.publicKey,
certObj.privateKey,
);
logger.log('info', `Certificate loaded via provision function for ${domain}`, { component: 'smart-proxy' });
// Persist to consumer store
if (this.settings.certStore?.save) {
try {
await this.settings.certStore.save(domain, certObj.publicKey, certObj.privateKey);
} catch (storeErr: any) {
logger.log('warn', `certStore.save() failed for ${domain}: ${storeErr.message}`, { component: 'smart-proxy' });
}
}
this.emit('certificate-issued', {
domain,
expiryDate: expiryDate || (certObj.validUntil ? new Date(certObj.validUntil).toISOString() : undefined),
source,
} satisfies ICertificateIssuedEvent);
}
} catch (err: any) {
logger.log('warn', `certProvisionFunction failed for ${domain}: ${err.message}`, { component: 'smart-proxy' });
this.emit('certificate-failed', {
domain,
error: err.message,
source,
} satisfies ICertificateFailedEvent);
// Fallback to ACME if enabled and route has a name
if (this.settings.certProvisionFallbackToAcme !== false && route.name) {
try {
const result: TSmartProxyCertProvisionObject = await provisionFn(domain, eventComms);
if (result === 'http01') {
// Callback wants HTTP-01 for this domain — trigger Rust ACME explicitly
if (route.name) {
try {
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Triggered Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (provisionErr: any) {
logger.log('warn', `Cannot provision cert for ${domain} — callback returned 'http01' but Rust ACME failed: ${provisionErr.message}. ` +
'Note: Rust ACME is disabled when certProvisionFunction is set.', { component: 'smart-proxy' });
}
}
continue;
}
// Got a static cert object - load it into Rust
if (result && typeof result === 'object') {
const certObj = result as plugins.tsclass.network.ICert;
await this.bridge.loadCertificate(
domain,
certObj.publicKey,
certObj.privateKey,
);
logger.log('info', `Certificate loaded via provision function for ${domain}`, { component: 'smart-proxy' });
// Persist to consumer store
if (this.settings.certStore?.save) {
try {
await this.settings.certStore.save(domain, certObj.publicKey, certObj.privateKey);
} catch (storeErr: any) {
logger.log('warn', `certStore.save() failed for ${domain}: ${storeErr.message}`, { component: 'smart-proxy' });
}
}
// Emit certificate-issued event
this.emit('certificate-issued', {
domain,
expiryDate: expiryDate || (certObj.validUntil ? new Date(certObj.validUntil).toISOString() : undefined),
source,
} satisfies ICertificateIssuedEvent);
}
} catch (err: any) {
logger.log('warn', `certProvisionFunction failed for ${domain}: ${err.message}`, { component: 'smart-proxy' });
// Emit certificate-failed event
this.emit('certificate-failed', {
domain,
error: err.message,
source,
} satisfies ICertificateFailedEvent);
// Fallback to ACME if enabled and route has a name
if (this.settings.certProvisionFallbackToAcme !== false && route.name) {
try {
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Falling back to Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (acmeErr: any) {
logger.log('warn', `ACME fallback also failed for ${domain}: ${acmeErr.message}` +
(this.settings.disableDefaultCert
? ' — TLS will fail for this domain (disableDefaultCert is true)'
: ' — default self-signed fallback cert will be used'), { component: 'smart-proxy' });
}
}
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Falling back to Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (acmeErr: any) {
logger.log('warn', `ACME fallback also failed for ${domain}: ${acmeErr.message}` +
(this.settings.disableDefaultCert
? ' — TLS will fail for this domain (disableDefaultCert is true)'
: ' — default self-signed fallback cert will be used'), { component: 'smart-proxy' });
}
}
}
}
/**
* Race a promise against a timeout. Rejects with the given message if the timeout fires first.
*/
private withTimeout<T>(promise: Promise<T>, ms: number, message: string): Promise<T> {
return new Promise<T>((resolve, reject) => {
const timer = setTimeout(() => reject(new Error(message)), ms);
promise.then(
(val) => { clearTimeout(timer); resolve(val); },
(err) => { clearTimeout(timer); reject(err); },
);
});
}
/**
* Normalize routing glob patterns into valid domain identifiers for cert provisioning.
* - `*nevermind.cloud` → `['nevermind.cloud', '*.nevermind.cloud']`

View File

@@ -92,6 +92,16 @@ export class SocketHandlerServer {
let metadataBuffer = '';
let metadataParsed = false;
// 10s timeout for metadata parsing phase — if Rust connects but never
// sends the JSON metadata line, don't hold the socket open indefinitely.
socket.setTimeout(10_000);
socket.on('timeout', () => {
if (!metadataParsed) {
logger.log('warn', 'Socket handler metadata timeout, closing', { component: 'socket-handler-server' });
socket.destroy();
}
});
const onData = (chunk: Buffer) => {
if (metadataParsed) return;
@@ -108,6 +118,7 @@ export class SocketHandlerServer {
}
metadataParsed = true;
socket.setTimeout(0); // Clear metadata timeout
socket.removeListener('data', onData);
socket.pause(); // Prevent data loss between handler removal and pipe setup
@@ -254,11 +265,30 @@ export class SocketHandlerServer {
// Connect to the resolved target
const backend = plugins.net.connect(port, host, () => {
// Connection established — set idle timeout on both sides (5 min)
socket.setTimeout(300_000);
backend.setTimeout(300_000);
// Pipe bidirectionally
socket.pipe(backend);
backend.pipe(socket);
});
// Connect timeout: if backend doesn't connect within 30s, destroy both
backend.setTimeout(30_000);
backend.on('timeout', () => {
logger.log('warn', `Dynamic forward timeout to ${host}:${port}`, { component: 'socket-handler-server' });
backend.destroy();
socket.destroy();
});
socket.on('timeout', () => {
logger.log('debug', `Dynamic forward client idle timeout`, { component: 'socket-handler-server' });
socket.destroy();
backend.destroy();
});
backend.on('error', (err) => {
logger.log('error', `Dynamic forward backend error: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();

View File

@@ -0,0 +1,28 @@
/**
* Async concurrency semaphore — limits the number of concurrent async operations.
*/
export class ConcurrencySemaphore {
private running = 0;
private waitQueue: Array<() => void> = [];
constructor(private readonly maxConcurrency: number) {}
async acquire(): Promise<void> {
if (this.running < this.maxConcurrency) {
this.running++;
return;
}
return new Promise<void>((resolve) => {
this.waitQueue.push(() => {
this.running++;
resolve();
});
});
}
release(): void {
this.running--;
const next = this.waitQueue.shift();
if (next) next();
}
}

View File

@@ -17,6 +17,9 @@ export * from './route-utils.js';
// Export default certificate generator
export { generateDefaultCertificate } from './default-cert-generator.js';
// Export concurrency semaphore
export { ConcurrencySemaphore } from './concurrency-semaphore.js';
// Export additional functions from route-helpers that weren't already exported
export {
createApiGatewayRoute,

View File

@@ -123,10 +123,10 @@ export class RouteValidator {
errors.push(`Invalid action type: ${route.action.type}. Must be one of: ${this.VALID_ACTION_TYPES.join(', ')}`);
}
// Validate socket-handler
// Validate socket-handler (TCP socketHandler or UDP datagramHandler)
if (route.action.type === 'socket-handler') {
if (typeof route.action.socketHandler !== 'function') {
errors.push('socket-handler action requires a socketHandler function');
if (typeof route.action.socketHandler !== 'function' && typeof route.action.datagramHandler !== 'function') {
errors.push('socket-handler action requires a socketHandler or datagramHandler function');
}
}
@@ -620,10 +620,12 @@ export function validateRouteAction(action: IRouteAction): { valid: boolean; err
}
if (action.type === 'socket-handler') {
if (!action.socketHandler) {
errors.push('Socket handler function is required for socket-handler action');
} else if (typeof action.socketHandler !== 'function') {
if (!action.socketHandler && !action.datagramHandler) {
errors.push('Socket handler or datagram handler function is required for socket-handler action');
} else if (action.socketHandler && typeof action.socketHandler !== 'function') {
errors.push('Socket handler must be a function');
} else if (action.datagramHandler && typeof action.datagramHandler !== 'function') {
errors.push('Datagram handler must be a function');
}
}
@@ -714,7 +716,8 @@ export function hasRequiredPropertiesForAction(route: IRouteConfig, actionType:
route.action.targets.length > 0 &&
route.action.targets.every(t => t.host && t.port !== undefined);
case 'socket-handler':
return !!route.action.socketHandler && typeof route.action.socketHandler === 'function';
return (!!route.action.socketHandler && typeof route.action.socketHandler === 'function') ||
(!!route.action.datagramHandler && typeof route.action.datagramHandler === 'function');
default:
return false;
}