Compare commits

..

49 Commits

Author SHA1 Message Date
e4e4b4f1ec v25.7.2
Some checks failed
Default (tags) / security (push) Successful in 33s
Default (tags) / test (push) Failing after 4m4s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 13:43:22 +00:00
d361a21543 fix(rustproxy-http): preserve original Host header when proxying and add X-Forwarded-* headers; add TLS WebSocket echo backend helper and integration test for terminate-and-reencrypt websocket 2026-02-16 13:43:22 +00:00
106713a546 v25.7.1
Some checks failed
Default (tags) / security (push) Successful in 34s
Default (tags) / test (push) Failing after 4m6s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 13:29:45 +00:00
101675b5f8 fix(proxy): use TLS to backends for terminate-and-reencrypt routes 2026-02-16 13:29:45 +00:00
9fac17bc39 v25.7.0
Some checks failed
Default (tags) / security (push) Successful in 30s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 12:11:49 +00:00
2e3cf515a4 feat(routes): add protocol-based route matching and ensure terminate-and-reencrypt routes HTTP through the full HTTP proxy; update docs and tests 2026-02-16 12:11:49 +00:00
754d32fd34 v25.6.0
Some checks failed
Default (tags) / security (push) Successful in 1m39s
Default (tags) / test (push) Failing after 5m7s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 12:02:36 +00:00
f0b7c27996 feat(rustproxy): add protocol-based routing and backend TLS re-encryption support 2026-02-16 12:02:36 +00:00
db932e8acc v25.5.0
Some checks failed
Default (tags) / security (push) Successful in 1m1s
Default (tags) / test (push) Failing after 5m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 03:00:39 +00:00
455d5bb757 feat(tls): add shared TLS acceptor with SNI resolver and session resumption; prefer shared acceptor and fall back to per-connection when routes specify custom TLS versions 2026-02-16 03:00:39 +00:00
fa2a27df6d v25.4.0
Some checks failed
Default (tags) / security (push) Successful in 30s
Default (tags) / test (push) Failing after 5m5s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-16 01:37:43 +00:00
7b2ccbdd11 feat(rustproxy): support dynamically loaded TLS certificates via loadCertificate IPC and include them in listener TLS configs for rebuilds and hot-swap 2026-02-16 01:37:43 +00:00
8409984fcc v25.3.1
Some checks failed
Default (tags) / security (push) Successful in 1m44s
Default (tags) / test (push) Failing after 5m8s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-15 15:05:03 +00:00
af10d189a3 fix(plugins): remove unused dependencies and simplify plugin exports 2026-02-15 15:05:03 +00:00
0b4d180cdf v25.3.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 14:02:25 +00:00
7b3545d1b5 feat(smart-proxy): add background concurrent certificate provisioning with per-domain timeouts and concurrency control 2026-02-14 14:02:25 +00:00
e837419d5d v25.2.2
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 12:42:20 +00:00
487a603fa3 fix(smart-proxy): start metrics polling before certificate provisioning to avoid blocking metrics collection 2026-02-14 12:42:20 +00:00
d6fdd3fc86 v25.2.1
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 12:28:42 +00:00
344f224c89 fix(smartproxy): no changes detected in git diff 2026-02-14 12:28:42 +00:00
6bbd2b3ee1 test(metrics): add v25.2.0 end-to-end assertions for per-IP, history, and HTTP request metrics 2026-02-14 12:24:48 +00:00
c44216df28 v25.2.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-14 11:15:17 +00:00
f80cdcf41c feat(metrics): add per-IP and HTTP-request metrics, propagate source IP through proxy paths, and expose new metrics to the TS adapter 2026-02-14 11:15:17 +00:00
6c84aedee1 v25.1.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 23:18:22 +00:00
1f95d2b6c4 feat(metrics): add real-time throughput sampling and byte-counting metrics 2026-02-13 23:18:22 +00:00
37372353d7 v25.0.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 21:24:16 +00:00
7afa4c4c58 BREAKING CHANGE(certs): accept a second eventComms argument in certProvisionFunction, add cert provisioning event types, and emit certificate lifecycle events 2026-02-13 21:24:16 +00:00
998662e137 v24.0.1
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 16:57:46 +00:00
a8f8946a4d fix(proxy): improve proxy robustness: add connect timeouts, graceful shutdown, WebSocket watchdog, and metrics guard 2026-02-13 16:57:46 +00:00
07e464fdac v24.0.0
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 16:32:02 +00:00
0e058594c9 BREAKING CHANGE(smart-proxy): move certificate persistence to an in-memory store and introduce consumer-managed certStore API; add default self-signed fallback cert and change ACME account handling 2026-02-13 16:32:02 +00:00
e0af82c1ef v23.1.6
Some checks failed
Default (tags) / security (push) Has been cancelled
Default (tags) / test (push) Has been cancelled
Default (tags) / release (push) Has been cancelled
Default (tags) / metadata (push) Has been cancelled
2026-02-13 13:08:30 +00:00
efe3d80713 fix(smart-proxy): disable built-in Rust ACME when a certProvisionFunction is provided and improve certificate provisioning flow 2026-02-13 13:08:30 +00:00
6b04bc612b v23.1.5
Some checks failed
Default (tags) / security (push) Successful in 38s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-13 12:02:47 +00:00
e774ec87ca fix(smart-proxy): provision certificates for wildcard domains instead of skipping them 2026-02-13 12:02:47 +00:00
cbde778f09 v23.1.4
Some checks failed
Default (tags) / security (push) Successful in 43s
Default (tags) / test (push) Failing after 4m6s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-12 22:35:25 +00:00
bc2bc874a5 fix(tests): make tests more robust and bump small dependencies 2026-02-12 22:35:25 +00:00
fdabf807b0 v23.1.3
Some checks failed
Default (tags) / security (push) Successful in 44s
Default (tags) / test (push) Failing after 4m6s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-12 20:17:32 +00:00
81e0e6b4d8 fix(rustproxy): install default rustls crypto provider early; detect and skip raw fast-path for HTTP connections and return proper HTTP 502 when no route matches 2026-02-12 20:17:32 +00:00
28fa69bf59 v23.1.2
Some checks failed
Default (tags) / security (push) Successful in 36s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-11 13:48:30 +00:00
5019658032 fix(core): use node: scoped builtin imports and add route unit tests 2026-02-11 13:48:30 +00:00
a9fe365c78 v23.1.1
Some checks failed
Default (tags) / security (push) Successful in 39s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-11 12:52:45 +00:00
32e0410227 fix(rust-proxy): increase rust proxy bridge maxPayloadSize to 100 MB and bump dependencies 2026-02-11 12:52:45 +00:00
fd56064495 v23.1.0
Some checks failed
Default (tags) / security (push) Successful in 35s
Default (tags) / test (push) Failing after 4m1s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-10 09:43:40 +00:00
3b7e6a6ed7 feat(rust-bridge): integrate tsrust to build and locate cross-compiled Rust binaries; refactor rust-proxy bridge to use typed IPC and streamline process handling; add @push.rocks/smartrust and update build/dev dependencies 2026-02-10 09:43:40 +00:00
131ed8949a v23.0.0
Some checks failed
Default (tags) / security (push) Successful in 52s
Default (tags) / test (push) Failing after 48s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-09 17:11:37 +00:00
7b3009dc53 BREAKING CHANGE(proxies/nftables-proxy): remove nftables-proxy implementation, models, and utilities from the repository 2026-02-09 17:11:37 +00:00
db2e2fb76e v22.6.0
Some checks failed
Default (tags) / security (push) Successful in 40s
Default (tags) / test (push) Failing after 48s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2026-02-09 16:25:33 +00:00
f7605e042e feat(smart-proxy): add socket-handler relay, fast-path port-only forwarding, metrics and bridge improvements, and various TS/Rust integration fixes 2026-02-09 16:25:33 +00:00
79 changed files with 13713 additions and 5432 deletions

View File

@@ -1,19 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDCzCCAfOgAwIBAgIUPU4tviz3ZvsMDjCz1NZRT16b0Y4wDQYJKoZIhvcNAQEL
BQAwFTETMBEGA1UEAwwKcHVzaC5yb2NrczAeFw0yNTAyMDMyMzA5MzRaFw0yNjAy
MDMyMzA5MzRaMBUxEzARBgNVBAMMCnB1c2gucm9ja3MwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQCZMkBYD/pYLBv9MiyHTLRT24kQyPeJBtZqryibi1jk
BT1ZgNl3yo5U6kjj/nYBU/oy7M4OFC0xyaJQ4wpvLHu7xzREqwT9N9WcDcxaahUi
P8+PsjGyznPrtXa1ASzGAYMNvXyWWp3351UWZHMEs6eY/Y7i8m4+0NwP5h8RNBCF
KSFS41Ee9rNAMCnQSHZv1vIzKeVYPmYnCVmL7X2kQb+gS6Rvq5sEGLLKMC5QtTwI
rdkPGpx4xZirIyf8KANbt0sShwUDpiCSuOCtpze08jMzoHLG9Nv97cJQjb/BhiES
hLL+YjfAUFjq0rQ38zFKLJ87QB9Jym05mY6IadGQLXVXAgMBAAGjUzBRMB0GA1Ud
DgQWBBQjpowWjrql/Eo2EVjl29xcjuCgkTAfBgNVHSMEGDAWgBQjpowWjrql/Eo2
EVjl29xcjuCgkTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAY
44vqbaf6ewFrZC0f3Kk4A10lC6qjWkcDFfw+JE8nzt+4+xPqp1eWgZKF2rONyAv2
nG41Xygt19ByancXLU44KB24LX8F1GV5Oo7CGBA+xtoSPc0JulXw9fGclZDC6XiR
P/+vhGgCHicbfP2O+N00pOifrTtf2tmOT4iPXRRo4TxmPzuCd+ZJTlBhPlKCmICq
yGdAiEo6HsSiP+M5qVlNx8s57MhQYk5TpgmI6FU4mO7zfDfSatFonlg+aDbrnaqJ
v/+km02M+oB460GmKwsSTnThHZgLNCLiKqD8bdziiCQjx5u0GjLI6468o+Aehb8l
l/x9vWTTk/QKq41X5hFk
MIIDQTCCAimgAwIBAgIUJm+igT1AVSuwNzjvqjSF6cysw6MwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI2MDIxMzIyMzI1MloXDTM2MDIx
MTIyMzI1MlowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAyjitkDd4DdlVk4TfVxKUqdxnJCGj9uyrUPAqR8hB6+bR
+8rW63ryBNYNRizxOGw41E19ascNuyA98mUF4oqjid1K4VqDiKzv1Uq/3NUunCw/
rEddR5hCoVkTsBJjzNgBJqncS606v0hfA00cCkpGR+Te7Q/E8T8lApioz1zFQ05Y
C69oeJHIrJcrIkIFAgmXDgRF0Z4ErUeu+wVOWT267uVAYn5AdFMxCSIBsYtPseqy
cC5EQ6BCBtsIGitlRgzLRg957ZZa+SF38ao+/ijYmOLHpQT0mFaUyLT7BKgxguGs
8CHcIxN5Qo27J3sC5ymnrv2uk5DcAOUcxklXUbVCeQIDAQABo4GKMIGHMB0GA1Ud
DgQWBBShZhz7aX/KhleAfYKvTgyG5ANuDjAfBgNVHSMEGDAWgBShZhz7aX/KhleA
fYKvTgyG5ANuDjAPBgNVHRMBAf8EBTADAQH/MDQGA1UdEQQtMCuCCWxvY2FsaG9z
dIIKcHVzaC5yb2Nrc4IMKi5wdXNoLnJvY2tzhwR/AAABMA0GCSqGSIb3DQEBCwUA
A4IBAQAyUvjUszQp4riqa3CfBFFtjh+7DKNuQPOlYAwSEis4l+YK06Glx4fJBHcx
eCPhQ/0wnPzi6CZe3vVRXd5fX27nVs+lMQD6Oc47F8OmTU6NXnb/1AcvrycDsP8D
9Y9qecekbpegrN1W4D46goBAwvrd6Qy0EHi0Z5z02rfyXAdxm0OmdpuWoIMcEgUQ
YyXIq3zSFE6uoO61WdLvBcXN6iaiSTVy0605WncDe2+UT9MeNq6zi1JD34jsgUrd
xq0WRUk2C6C4Irkf00Q12rXeL+Jv5OwyrUUZRvz0gLgG02UUbB/6Ca5GYNXniEuI
Py/EHTqbtjLIs7HxYjQH86FI9fUj
-----END CERTIFICATE-----

View File

@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCZMkBYD/pYLBv9
MiyHTLRT24kQyPeJBtZqryibi1jkBT1ZgNl3yo5U6kjj/nYBU/oy7M4OFC0xyaJQ
4wpvLHu7xzREqwT9N9WcDcxaahUiP8+PsjGyznPrtXa1ASzGAYMNvXyWWp3351UW
ZHMEs6eY/Y7i8m4+0NwP5h8RNBCFKSFS41Ee9rNAMCnQSHZv1vIzKeVYPmYnCVmL
7X2kQb+gS6Rvq5sEGLLKMC5QtTwIrdkPGpx4xZirIyf8KANbt0sShwUDpiCSuOCt
pze08jMzoHLG9Nv97cJQjb/BhiEShLL+YjfAUFjq0rQ38zFKLJ87QB9Jym05mY6I
adGQLXVXAgMBAAECggEARGCBBq1PBHbfoUH5TQSIAlvdEEBa9+602lZG7jIioVfT
W7Uem5Ctuan+kcDcY9hbNsqqZ+9KgsvoJmlIGXoF2jjeE/4vUmRO9AHWoc5yk2Be
4NjcxN3QMLdEfiLBnLlFCOd4CdX1ZxZ6TG3WRpV3a1pVIeeqHGB1sKT6Xd/atcwG
RvpiXzu0SutGxVb6WE9r6hovZ4fVERCyCRczUGrUH5ICbxf6E7L4u8xjEYR4uEKK
/8ZkDqrWdRASDAdPPMNqnHUEAho/WnxpNeb6B4lvvv2QWxIS9H1OikF/NzWPgVNS
oPpvtJgjyo5xdgLm3zE4lcSPNVSrh1TBXuAn9TG4WQKBgQDScPFkUNBqjC5iPMof
bqDHlhlptrHmiv9LC0lgjEDPgIEQfjLfdCugwDk32QyAcb5B60upDYeqCFDkfV/C
T536qxevYPjPAjahLPHqMxkWpjvtY6NOTgbbcpVtblU2Fj8R8qbyPNADG31LicU9
GVPtQ4YcVaMWCYbg5107+9dFWQKBgQC6XK+foKK+81RFdrqaNNgebTWTsANnBcZe
xl0bj6oL5yY0IzroxHvgcNS7UMriWCu+K2xfkUBdMmxU773VN5JQ5k15ezjgtrvc
8oAaEsxYP4su12JSTC/zsBANUgrNbFj8++qqKYWt2aQc2O/kbZ4MNfekIVFc8AjM
2X9PxvxKLwKBgHXL7QO3TQLnVyt8VbQEjBFMzwriznB7i+4o8jkOKVU93IEr8zQr
5iQElcLSR3I6uUJTALYvsaoXH5jXKVwujwL69LYiNQRDe+r6qqvrUHbiNJdsd8Rk
XuhGGqj34tD04Pcd+h+MtO+YWqmHBBZwcA9XBeIkebbjPFH2kLT8AwN5AoGAYQy9
hMJxnkE3hIkk+gNE/OtgeE20J+Vw/ZANkrnJEzPHyGUEW41e+W2oyvdzAFZsSTdx
037f5ujIU58Z27x53NliRT4vS4693H0Iyws5EUfeIoGVuUflvODWKymraHjhCrXh
6cV/0R5DAabTnsCbCr7b/MRBC8YQvyUQ0KnOXo8CgYBQYGpvJnSWyvsCjtb6apTP
drjcBhVd0aSBpLGtDdtUCV4oLl9HPy+cLzcGaqckBqCwEq5DKruhMEf7on56bUMd
m/3ItFk1TnhysAeJHb3zLqmJ9CKBitpqLlsOE7MEXVNmbTYeXU10Uo9yOfyt1i7T
su+nT5VtyPkmF/l4wZl5+g==
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDKOK2QN3gN2VWT
hN9XEpSp3GckIaP27KtQ8CpHyEHr5tH7ytbrevIE1g1GLPE4bDjUTX1qxw27ID3y
ZQXiiqOJ3UrhWoOIrO/VSr/c1S6cLD+sR11HmEKhWROwEmPM2AEmqdxLrTq/SF8D
TRwKSkZH5N7tD8TxPyUCmKjPXMVDTlgLr2h4kcislysiQgUCCZcOBEXRngStR677
BU5ZPbru5UBifkB0UzEJIgGxi0+x6rJwLkRDoEIG2wgaK2VGDMtGD3ntllr5IXfx
qj7+KNiY4selBPSYVpTItPsEqDGC4azwIdwjE3lCjbsnewLnKaeu/a6TkNwA5RzG
SVdRtUJ5AgMBAAECggEAEM8piTp9I5yQVxr1RCv+mMiB99BGjHrTij6uawlXxnPJ
Ol574zbU6Yc/8vh/JB8l0arvzQmHCAoX8B9K4BABZE81X1paYujqJi8ImAMN9Owe
LlQ/yhjbWAVbJDiBHHjLjrLRpaY8gwQxZqk5FpdiNG1vROIZzypeKZM2PAdke9HA
PvJtsyfXdEz+jb5EUgaadyn7aquR6y607a8m55y34POLOcssteUOje4GdrTekHK0
62E+iEnawBjIs7gBzJf0j1XjFNq3aAeLrn8gFCEb+yK7X++8FJ8YjwsqS5V1aMsR
1PZguW0jCzYHATc2OcIozlvdBriPxy7eX8Y3MFvNMQKBgQD22ReUyKX5TKA/fg3z
S/QGfYqd4T35jkwK1MaXOuFOBzNyTMT6ZJkbjxPOYPB0uakcfIlva8bI77mE5vRe
PWYlvitp9Zz3v2kt/WgnwG32ZdVedPjEoi9aitUXmiiIoxdPVAUAgLPFFN65Sr2G
2NM/vduZcAPUr0UWnFx4dlpo8QKBgQDRuAV44Y+1396511oW4OR8ofWFECMc5uEV
wQ26EpbilEYhRSBty+1PAK5AcEGybeVtUn9RSmx0Ef1L15wnzP/C886LIzkaig/9
xs0yudXgOFdBAzYQKnK2lZmSKkzcUFJtifat3E+ZMCo/duhzXpzecg/lVNGh6gcx
xbtphJCyCQKBgEO8zvvFE8aVgGPr82gQL6aYTLGGXbtdkQBn4xcc0TbYQwXaizMq
59joKkc30sQ1LnLiudQZfzMklYQi3Gv/7UfuJ3usKqbRn8s+/pXp+ELlLuf8sUdE
OjpeXptbckQMfRkHtVet+abbU0MFf3zBgza6osg4NNToQ80wmy9zStwBAoGAGLeD
jZeoBFt6OJT0/TVMOJQuB5y7RrC/Xnz+TSvbtKCdE1a+V7JtKZ5+6wFP/OOO4q+S
adZHqfZk0Ad9VAOJMUTi1usz07jp4ZMIpC3a0y5QukzSll0qX/KJwvxRSrX8wQQ9
mogYqYlPsWMmSlKgUmdHEFRK0LZwWqFfUTRaiWECgYEA6KR6KMbqnYn5CglHeD42
NmOgFYXljRLIxS1coTiWWQZM/nUyx/tSk+MAS7770USHoZhAfh6lmEa/AeKSoLVl
Su3yzgtKk1/doAtbiWD8TasHAhacwWmzTuZtH5cZUgW3QIVJg6ADi6m8zswqKxIS
qfU/1N4aHp832v4ggRe/Og0=
-----END PRIVATE KEY-----

View File

@@ -1,5 +1,222 @@
# Changelog
## 2026-02-16 - 25.7.2 - fix(rustproxy-http)
preserve original Host header when proxying and add X-Forwarded-* headers; add TLS WebSocket echo backend helper and integration test for terminate-and-reencrypt websocket
- Preserve the client's original Host header instead of replacing it with backend host:port when proxying requests.
- Add standard reverse-proxy headers: X-Forwarded-For (appends client IP), X-Forwarded-Host, and X-Forwarded-Proto for upstream requests.
- Ensure raw TCP/HTTP upstream requests copy original headers and skip X-Forwarded-* (which are added explicitly).
- Add start_tls_ws_echo_backend test helper to start a TLS WebSocket echo backend for tests.
- Add integration test test_terminate_and_reencrypt_websocket to verify WS upgrade through terminate-and-reencrypt TLS path.
- Rename unused parameter upstream to _upstream in proxy_service functions to avoid warnings.
## 2026-02-16 - 25.7.1 - fix(proxy)
use TLS to backends for terminate-and-reencrypt routes
- Set upstream.use_tls = true when a route's TLS mode is TerminateAndReencrypt so the proxy re-encrypts to backend servers.
- Add start_tls_http_backend test helper and update integration tests to run TLS-enabled backend servers validating re-encryption behavior.
- Make the selected upstream mutable to allow toggling the use_tls flag during request handling.
## 2026-02-16 - 25.7.0 - feat(routes)
add protocol-based route matching and ensure terminate-and-reencrypt routes HTTP through the full HTTP proxy; update docs and tests
- Introduce a new 'protocol' match field for routes (supports 'http' and 'tcp') and preserve it through cloning/merging.
- Add Rust integration test verifying terminate-and-reencrypt decrypts TLS and routes HTTP traffic via the HTTP proxy (per-request Host/path routing) instead of raw tunneling.
- Add TypeScript unit tests covering protocol field validation, preservation, interaction with terminate-and-reencrypt, cloning, merging, and matching behavior.
- Update README with a Protocol-Specific Routing section and clarify terminate-and-reencrypt behavior (HTTP routed via HTTP proxy; non-HTTP uses raw TLS-to-TLS tunnel).
- Example config: include health check thresholds (unhealthyThreshold and healthyThreshold) in the sample healthCheck settings.
## 2026-02-16 - 25.6.0 - feat(rustproxy)
add protocol-based routing and backend TLS re-encryption support
- Introduce optional route_match.protocol ("http" | "tcp") in Rust and TypeScript route types to allow protocol-restricted routing.
- RouteManager: respect protocol field during matching and treat TLS connections without SNI as not matching domain-restricted routes (except wildcard-only routes).
- HTTP proxy: add BackendStream abstraction to unify plain TCP and tokio-rustls TLS backend streams, and support connecting to upstreams over TLS (upstream.use_tls) with an InsecureBackendVerifier for internal/self-signed backends.
- WebSocket and HTTP forwarding updated to use BackendStream so upstream TLS is handled transparently.
- Passthrough listener: perform post-termination protocol detection for TerminateAndReencrypt; route HTTP flows into HttpProxyService and handle non-HTTP as TLS-to-TLS tunnel.
- Add tests for protocol matching, TLS/no-SNI behavior, and other routing edge cases.
- Add rustls and tokio-rustls dependencies (Cargo.toml/Cargo.lock updates).
## 2026-02-16 - 25.5.0 - feat(tls)
add shared TLS acceptor with SNI resolver and session resumption; prefer shared acceptor and fall back to per-connection when routes specify custom TLS versions
- Add CertResolver that pre-parses PEM certs/keys into CertifiedKey instances for SNI-based lookup and cheap runtime resolution
- Introduce build_shared_tls_acceptor to create a shared ServerConfig with session cache (4096) and Ticketer for session ticket resumption
- Add ArcSwap<Option<TlsAcceptor>> shared_tls_acceptor to tcp_listener for hot-reloadable, pre-built acceptor and update accept loop/handlers to use it
- set_tls_configs now attempts to build and store the shared TLS acceptor, falling back to per-connection acceptors on failure; raw PEM configs are still retained for route-level fallbacks
- Add get_tls_acceptor helper: prefer shared acceptor for performance and session resumption, but build per-connection acceptor when a route requests custom TLS versions
## 2026-02-16 - 25.4.0 - feat(rustproxy)
support dynamically loaded TLS certificates via loadCertificate IPC and include them in listener TLS configs for rebuilds and hot-swap
- Adds loaded_certs: HashMap<String, TlsCertConfig> to RustProxy to store certificates loaded at runtime
- Merge loaded_certs into tls_configs in rebuild and listener hot-swap paths so dynamically loaded certs are served immediately
- Persist loaded certificates on loadCertificate so future rebuilds include them
## 2026-02-15 - 25.3.1 - fix(plugins)
remove unused dependencies and simplify plugin exports
- Removed multiple dependencies from package.json to reduce dependency footprint: @push.rocks/lik, @push.rocks/smartacme, @push.rocks/smartdelay, @push.rocks/smartfile, @push.rocks/smartnetwork, @push.rocks/smartpromise, @push.rocks/smartrequest, @push.rocks/smartrx, @push.rocks/smartstring, @push.rocks/taskbuffer, @types/minimatch, @types/ws, pretty-ms, ws
- ts/plugins.ts: stopped importing/exporting node:https and many push.rocks and third-party modules; plugins now only re-export core node modules (without https), tsclass, smartcrypto, smartlog (+ destination-local), smartrust, and minimatch
- Intended effect: trim surface area and remove unused/optional integrations; patch-level change (no feature/API additions)
## 2026-02-14 - 25.3.0 - feat(smart-proxy)
add background concurrent certificate provisioning with per-domain timeouts and concurrency control
- Add ISmartProxyOptions settings: certProvisionTimeout (ms) and certProvisionConcurrency (default 4)
- Run certProvisionFunction as fire-and-forget background tasks (stores promise on start/route-update and awaited on stop)
- Provision certificates in parallel with a concurrency limit using a new ConcurrencySemaphore utility
- Introduce per-domain timeout handling (default 300000ms) via withTimeout and surface timeout errors as certificate-failed events
- Refactor provisioning into provisionSingleDomain to isolate domain handling, ACME fallback preserved
- Run provisioning outside route update mutex so route updates are not blocked by slow provisioning
## 2026-02-14 - 25.2.2 - fix(smart-proxy)
start metrics polling before certificate provisioning to avoid blocking metrics collection
- Start metrics polling immediately after Rust engine startup so metrics are available without waiting for certificate provisioning.
- Run certProvisionFunction after startup because ACME/DNS-01 provisioning can hang or be slow and must not block observability.
- Code change in ts/proxies/smart-proxy/smart-proxy.ts: metricsAdapter.startPolling() moved to run before provisionCertificatesViaCallback().
## 2026-02-14 - 25.2.1 - fix(smartproxy)
no changes detected in git diff
- The provided diff contains no file changes; no code or documentation updates to release.
## 2026-02-14 - 25.2.0 - feat(metrics)
add per-IP and HTTP-request metrics, propagate source IP through proxy paths, and expose new metrics to the TS adapter
- Add per-IP tracking and IpMetrics in MetricsCollector (active/total connections, bytes, throughput).
- Add HTTP request counters and tracking (total_http_requests, http_requests_per_sec, recent counters and tests).
- Include throughput history (ThroughputSample serialization, retention and snapshotting) and expose history in snapshots.
- Propagate source IP through HTTP and passthrough code paths: CountingBody.record_bytes and MetricsCollector methods now accept source_ip; connection_opened/closed calls include source IP.
- Introduce ForwardMetricsCtx to carry metrics context (collector, route_id, source_ip) into passthrough forwarding routines; update ConnectionGuard to include source_ip.
- TypeScript adapter (rust-metrics-adapter.ts) updated to return per-IP counts, top IPs, per-IP throughput, throughput history mapping, and HTTP request rates/total where available.
- Numerous unit tests added for per-IP tracking, HTTP request tracking, throughput history and ThroughputTracker.history behavior.
## 2026-02-13 - 25.1.0 - feat(metrics)
add real-time throughput sampling and byte-counting metrics
- Add CountingBody wrapper to count HTTP request and response bytes and report them to MetricsCollector.
- Implement lock-free hot-path byte recording and a cold-path sampling API (sample_all) in MetricsCollector with throughput history and configurable retention (default 3600s).
- Spawn a background sampling task in RustProxy (configurable sample_interval_ms) and tear it down on stop so throughput trackers are regularly sampled.
- Instrument passthrough TCP forwarding and socket-relay paths to record per-chunk bytes (lock-free) so long-lived connections contribute to throughput measurements.
- Wrap HTTP request/response bodies with CountingBody in proxy_service to capture bytes_in/bytes_out and report on body completion; connection_closed handling updated accordingly.
- Expose recent throughput metrics to the TypeScript adapter (throughputRecentIn/Out) and pass metrics settings from the TS SmartProxy into Rust.
- Add http-body dependency and update Cargo.toml/Cargo.lock entries for the new body wrapper usage.
- Add unit tests for MetricsCollector throughput tracking and a new end-to-end throughput test (test.throughput.ts).
- Update test certificates (assets/certs cert.pem and key.pem) used by TLS tests.
## 2026-02-13 - 25.0.0 - BREAKING CHANGE(certs)
accept a second eventComms argument in certProvisionFunction, add cert provisioning event types, and emit certificate lifecycle events
- Breaking API change: certProvisionFunction signature changed from (domain: string) => Promise<TSmartProxyCertProvisionObject> to (domain: string, eventComms: ICertProvisionEventComms) => Promise<TSmartProxyCertProvisionObject>. Custom provisioners must accept (or safely ignore) the new second argument.
- New types added and exported: ICertProvisionEventComms, ICertificateIssuedEvent, ICertificateFailedEvent.
- smart-proxy now constructs an eventComms channel that allows provisioners to log/warn/error and set expiry date and source for the issued event.
- Emits 'certificate-issued' (domain, expiryDate, source, isRenewal?) on successful provisioning and 'certificate-failed' (domain, error, source) on failures.
- Updated public exports to include the new types so they are available to consumers.
- Removed readme.byte-counting-audit.md (documentation file deleted).
## 2026-02-13 - 24.0.1 - fix(proxy)
improve proxy robustness: add connect timeouts, graceful shutdown, WebSocket watchdog, and metrics guard
- Add tokio-util CancellationToken to HTTP handlers to support graceful shutdown (stop accepting new requests while letting in-flight requests finish).
- Introduce configurable upstream connect timeout (DEFAULT_CONNECT_TIMEOUT) and return 504 Gateway Timeout on connect timeouts to avoid hanging connections.
- Add WebSocket watchdog with inactivity and max-lifetime checks, activity tracking via AtomicU64, and cancellation-driven tunnel aborts.
- Add ConnectionGuard RAII in passthrough listener to ensure metrics.connection_closed() is called on all exit paths and disarm the guard when handing off to the HTTP proxy.
- Expose HttpProxyService::with_connect_timeout and wire connection timeout from ConnectionConfig into listeners.
- Add tokio-util workspace dependency (CancellationToken) and related code changes across rustproxy-http and rustproxy-passthrough.
## 2026-02-13 - 24.0.0 - BREAKING CHANGE(smart-proxy)
move certificate persistence to an in-memory store and introduce consumer-managed certStore API; add default self-signed fallback cert and change ACME account handling
- Cert persistence removed from Rust side: CertStore is now an in-memory cache (no filesystem reads/writes). Rust no longer persists or loads certs from disk.
- ACME account credentials are no longer persisted by the library; AcmeClient uses ephemeral accounts only and account persistence APIs were removed.
- TypeScript API changes: removed certificateStore option and added ISmartProxyCertStore + certStore option for consumer-provided persistence (loadAll, save, optional remove).
- Default self-signed fallback certificate added (generateDefaultCertificate) and loaded as '*' unless disableDefaultCert is set.
- SmartProxy now pre-loads certificates from consumer certStore on startup and persists certificates by calling certStore.save() after provisioning.
- provisionCertificatesViaCallback signature changed to accept preloaded domains (prevents re-provisioning), and ACME fallback behavior adjusted with clearer logging.
- Rust cert manager methods made infallible for cache-only operations (load_static/store no longer return errors for cache insertions); removed store-backed load_all/remove/base_dir APIs.
- TCP listener tls_configs concurrency improved: switched to ArcSwap<HashMap<...>> so accept loops see hot-reloads immediately.
- Removed dependencies related to filesystem cert persistence from the tls crate (serde_json, tempfile) and corresponding Cargo.lock changes and test updates.
## 2026-02-13 - 23.1.6 - fix(smart-proxy)
disable built-in Rust ACME when a certProvisionFunction is provided and improve certificate provisioning flow
- Pass an optional ACME override into buildRustConfig so Rust ACME can be disabled per-run
- Disable Rust ACME when certProvisionFunction is configured to avoid provisioning race conditions
- Normalize routing glob patterns into concrete domain identifiers for certificate provisioning (expand leading-star globs and warn on unsupported patterns)
- Deduplicate domains during provisioning to avoid repeated attempts
- When the callback returns 'http01', explicitly trigger Rust ACME for the route via bridge.provisionCertificate and log success/failure
## 2026-02-13 - 23.1.5 - fix(smart-proxy)
provision certificates for wildcard domains instead of skipping them
- Removed early continue that skipped domains containing '*' in the domain loop
- Now calls provisionFn for wildcard domains so certificate provisioning can proceed for wildcard hosts
- Fixes cases where wildcard domains never had certificates requested
## 2026-02-12 - 23.1.4 - fix(tests)
make tests more robust and bump small dependencies
- Bump dependencies: @push.rocks/smartrust ^1.2.1 and minimatch ^10.2.0
- Replace hardcoded ports with named constants (ECHO_PORT, PROXY_PORT, PROXY_PORT_1/2) to avoid collisions between tests
- Add server 'error' handlers and reject listen promises on server errors to prevent silent hangs
- Reduce test timeouts and intervals (shorter test durations, more frequent pings) to speed up test runs
- Ensure proxy is stopped between tests and remove forced process.exit; export tap.start() consistently
- Adjust assertions to match the new shorter ping/response counts
## 2026-02-12 - 23.1.3 - fix(rustproxy)
install default rustls crypto provider early; detect and skip raw fast-path for HTTP connections and return proper HTTP 502 when no route matches
- Install ring-based rustls crypto provider at startup to prevent panics from instant-acme/hyper-rustls calling ClientConfig::builder() before TLS listeners are initialized
- Add a non-blocking 10ms peek to detect HTTP traffic in the TCP passthrough fast-path to avoid misrouting HTTP and ensure HTTP proxy handles CORS, errors, and request-level routing
- Skip the fast-path and fall back to the HTTP proxy when HTTP is detected (with a debug log)
- When no route matches for detected HTTP connections, send an HTTP 502 Bad Gateway response and close the connection instead of silently dropping it
## 2026-02-11 - 23.1.2 - fix(core)
use node: scoped builtin imports and add route unit tests
- Replaced bare Node built-in imports (events, fs, http, https, net, path, tls, url, http2, buffer, crypto) with 'node:' specifiers for ESM/bundler compatibility (files updated include ts/plugins.ts, ts/core/models/socket-types.ts, ts/core/utils/enhanced-connection-pool.ts, ts/core/utils/socket-tracker.ts, ts/protocols/common/fragment-handler.ts, ts/protocols/tls/sni/client-hello-parser.ts, ts/protocols/tls/sni/sni-extraction.ts, ts/protocols/websocket/utils.ts, ts/tls/sni/sni-handler.ts).
- Added new unit tests (test/test.bun.ts and test/test.deno.ts) covering route helpers, validators, matching, merging and cloning to improve test coverage.
## 2026-02-11 - 23.1.1 - fix(rust-proxy)
increase rust proxy bridge maxPayloadSize to 100 MB and bump dependencies
- Set maxPayloadSize to 100 * 1024 * 1024 (100 MB) in ts/proxies/smart-proxy/rust-proxy-bridge.ts to support large route configs
- Bump devDependency @types/node from ^25.2.2 to ^25.2.3
- Bump dependency @push.rocks/smartrust from ^1.1.1 to ^1.2.0
## 2026-02-10 - 23.1.0 - feat(rust-bridge)
integrate tsrust to build and locate cross-compiled Rust binaries; refactor rust-proxy bridge to use typed IPC and streamline process handling; add @push.rocks/smartrust and update build/dev dependencies
- Add tsrust to the build script and include dist_rust candidates when locating the Rust binary (enables cross-compiled artifacts produced by tsrust).
- Remove the old rust-binary-locator and refactor rust-proxy-bridge to use explicit, typed IPC command definitions and improved process spawn/cleanup logic.
- Introduce @push.rocks/smartrust for type-safe JSON IPC and export it via plugins; update README with expanded metrics documentation and change initialDataTimeout default from 60s to 120s.
- Add rust/.cargo/config.toml with aarch64 linker configuration to support cross-compilation for arm64.
- Bump several devDependencies and runtime dependencies (e.g. @git.zone/tsbuild, @git.zone/tstest, @push.rocks/smartserve, @push.rocks/taskbuffer, ws, minimatch, etc.).
- Update runtime message guiding local builds to use 'pnpm build' (tsrust) instead of direct cargo invocation.
## 2026-02-09 - 23.0.0 - BREAKING CHANGE(proxies/nftables-proxy)
remove nftables-proxy implementation, models, and utilities from the repository
- Deleted nftables-proxy module files under ts/proxies/nftables-proxy (index, models, utils, command executor, validators, etc.)
- Removed nftables-proxy exports from ts/index.ts and ts/proxies/index.ts
- Updated smart-proxy types to drop dependency on nftables proxy models
- Breaking change: any consumers importing nftables-proxy will no longer find those exports; update imports or install/use the extracted/alternative package if applicable
## 2026-02-09 - 22.6.0 - feat(smart-proxy)
add socket-handler relay, fast-path port-only forwarding, metrics and bridge improvements, and various TS/Rust integration fixes
- Add Unix-domain socket relay for socket-handler routes so Rust can hand off matched connections to TypeScript handlers (metadata JSON + initial bytes, relay implementation in Rust and SocketHandlerServer in TS).
- Implement fast-path port-only forwarding in the TCP accept/handler path to forward simple non-TLS, port-only routes immediately without peeking at client data (improves server-speaks-first protocol handling).
- Use ArcSwap for route manager hot-reload visibility in accept loops and share socket_handler_relay via Arc<RwLock> so listeners see relay path updates immediately.
- Enhance SNI/HTTP parsing: add extract_http_path and extract_http_host to aid domain/path matching from initial data.
- Improve RustProxy shutdown/kill handling: remove listeners, reject pending requests, destroy stdio pipes and unref process to avoid leaking handles.
- Enhance Rust <-> TS metrics bridge and adapter: add immediate poll(), map Rust JSON fields to IMetrics (per-route active/throughput/totals), and use safer polling/unref timers.
- SocketHandlerServer enhancements: track active sockets, destroy on stop, pause/resume to prevent data loss, support async socketHandler callbacks and dynamic function-based target forwarding (resolve host/port functions and forward).
- TypeScript smart-proxy lifecycle tweaks: only set bridge relay after Rust starts, guard unexpected-exit emission when intentionally stopping, stop polling and remove listeners on stop, add stopping flag.
- Misc: README and API ergonomics updates (nft proxy option renames and config comments), various test updates to use stable http.request helper, adjust timeouts/metrics sampling and assertions, and multiple small bugfixes in listeners, timeouts and TLS typings.
## 2026-02-09 - 22.5.0 - feat(rustproxy)
introduce a Rust-powered proxy engine and workspace with core crates for proxy functionality, ACME/TLS support, passthrough and HTTP proxies, metrics, nftables integration, routing/security, management IPC, tests, and README updates

7069
deno.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -40,5 +40,8 @@
},
"@ship.zone/szci": {
"npmGlobalTools": []
},
"@git.zone/tsrust": {
"targets": ["linux_amd64", "linux_arm64"]
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@push.rocks/smartproxy",
"version": "22.5.0",
"version": "25.7.2",
"private": false,
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
"main": "dist_ts/index.js",
@@ -10,38 +10,26 @@
"license": "MIT",
"scripts": {
"test": "(tstest test/**/test*.ts --verbose --timeout 60 --logfile)",
"build": "(tsbuild tsfolders --allowimplicitany)",
"build": "(tsbuild tsfolders --allowimplicitany) && (tsrust)",
"format": "(gitzone format)",
"buildDocs": "tsdoc"
},
"devDependencies": {
"@git.zone/tsbuild": "^3.1.2",
"@git.zone/tsrun": "^2.0.0",
"@git.zone/tstest": "^3.1.3",
"@push.rocks/smartserve": "^1.4.0",
"@types/node": "^24.10.2",
"@git.zone/tsbuild": "^4.1.2",
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tsrust": "^1.3.0",
"@git.zone/tstest": "^3.1.8",
"@push.rocks/smartserve": "^2.0.1",
"@types/node": "^25.2.3",
"typescript": "^5.9.3",
"why-is-node-running": "^3.2.2"
},
"dependencies": {
"@push.rocks/lik": "^6.2.2",
"@push.rocks/smartacme": "^8.0.0",
"@push.rocks/smartcrypto": "^2.0.4",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^13.1.0",
"@push.rocks/smartlog": "^3.1.10",
"@push.rocks/smartnetwork": "^4.4.0",
"@push.rocks/smartpromise": "^4.2.3",
"@push.rocks/smartrequest": "^5.0.1",
"@push.rocks/smartrx": "^3.0.10",
"@push.rocks/smartstring": "^4.1.0",
"@push.rocks/taskbuffer": "^3.5.0",
"@push.rocks/smartrust": "^1.2.1",
"@tsclass/tsclass": "^9.3.0",
"@types/minimatch": "^6.0.0",
"@types/ws": "^8.18.1",
"minimatch": "^10.1.1",
"pretty-ms": "^9.3.0",
"ws": "^8.18.3"
"minimatch": "^10.2.0"
},
"files": [
"ts/**/*",

2772
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,169 +0,0 @@
# SmartProxy Byte Counting Audit Report
## Executive Summary
After a comprehensive audit of the SmartProxy codebase, I can confirm that **byte counting is implemented correctly** with no instances of double counting. Each byte transferred through the proxy is counted exactly once in each direction.
## Byte Counting Implementation
### 1. Core Tracking Mechanisms
SmartProxy uses two complementary tracking systems:
1. **Connection Records** (`IConnectionRecord`):
- `bytesReceived`: Total bytes received from client
- `bytesSent`: Total bytes sent to client
2. **MetricsCollector**:
- Global throughput tracking via `ThroughputTracker`
- Per-connection byte tracking for route/IP metrics
- Called via `recordBytes(connectionId, bytesIn, bytesOut)`
### 2. Where Bytes Are Counted
Bytes are counted in only two files:
#### a) `route-connection-handler.ts`
- **Line 351**: TLS alert bytes when no SNI is provided
- **Lines 1286-1301**: Data forwarding callbacks in `setupBidirectionalForwarding()`
#### b) `http-proxy-bridge.ts`
- **Line 127**: Initial TLS chunk for HttpProxy connections
- **Lines 142-154**: Data forwarding callbacks in `setupBidirectionalForwarding()`
## Connection Flow Analysis
### 1. Direct TCP Connection (No TLS)
```
Client → SmartProxy → Target Server
```
1. Connection arrives at `RouteConnectionHandler.handleConnection()`
2. For non-TLS ports, immediately routes via `routeConnection()`
3. `setupDirectConnection()` creates target connection
4. `setupBidirectionalForwarding()` handles all data transfer:
- `onClientData`: `bytesReceived += chunk.length` + `recordBytes(chunk.length, 0)`
- `onServerData`: `bytesSent += chunk.length` + `recordBytes(0, chunk.length)`
**Result**: ✅ Each byte counted exactly once
### 2. TLS Passthrough Connection
```
Client (TLS) → SmartProxy → Target Server (TLS)
```
1. Connection waits for initial data to detect TLS
2. TLS handshake detected, SNI extracted
3. Route matched, `setupDirectConnection()` called
4. Initial chunk stored in `pendingData` (NOT counted yet)
5. On target connect, `pendingData` written to target (still not counted)
6. `setupBidirectionalForwarding()` counts ALL bytes including initial chunk
**Result**: ✅ Each byte counted exactly once
### 3. TLS Termination via HttpProxy
```
Client (TLS) → SmartProxy → HttpProxy (localhost) → Target Server
```
1. TLS connection detected with `tls.mode = "terminate"`
2. `forwardToHttpProxy()` called:
- Initial chunk: `bytesReceived += chunk.length` + `recordBytes(chunk.length, 0)`
3. Proxy connection created to HttpProxy on localhost
4. `setupBidirectionalForwarding()` handles subsequent data
**Result**: ✅ Each byte counted exactly once
### 4. HTTP Connection via HttpProxy
```
Client (HTTP) → SmartProxy → HttpProxy (localhost) → Target Server
```
1. Connection on configured HTTP port (`useHttpProxy` ports)
2. Same flow as TLS termination
3. All byte counting identical to TLS termination
**Result**: ✅ Each byte counted exactly once
### 5. NFTables Forwarding
```
Client → [Kernel NFTables] → Target Server
```
1. Connection detected, route matched with `forwardingEngine: 'nftables'`
2. Connection marked as `usingNetworkProxy = true`
3. NO application-level forwarding (kernel handles packet routing)
4. NO byte counting in application layer
**Result**: ✅ No counting (correct - kernel handles everything)
## Special Cases
### PROXY Protocol
- PROXY protocol headers sent to backend servers are NOT counted in client metrics
- Only actual client data is counted
- **Correct behavior**: Protocol overhead is not client data
### TLS Alerts
- TLS alerts (e.g., for missing SNI) are counted as sent bytes
- **Correct behavior**: Alerts are actual data sent to the client
### Initial Chunks
- **Direct connections**: Stored in `pendingData`, counted when forwarded
- **HttpProxy connections**: Counted immediately upon receipt
- **Both approaches**: Count each byte exactly once
## Verification Methodology
1. **Code Analysis**: Searched for all instances of:
- `bytesReceived +=` and `bytesSent +=`
- `recordBytes()` calls
- Data forwarding implementations
2. **Flow Tracing**: Followed data path for each connection type from entry to exit
3. **Handler Review**: Examined all forwarding handlers to ensure no additional counting
## Findings
### ✅ No Double Counting Detected
- Each byte is counted exactly once in the direction it flows
- Connection records and metrics are updated consistently
- No overlapping or duplicate counting logic found
### Areas of Excellence
1. **Centralized Counting**: All byte counting happens in just two files
2. **Consistent Pattern**: Uses `setupBidirectionalForwarding()` with callbacks
3. **Clear Separation**: Forwarding handlers don't interfere with proxy metrics
## Recommendations
1. **Debug Logging**: Add optional debug logging to verify byte counts in production:
```typescript
if (settings.debugByteCount) {
logger.log('debug', `Bytes counted: ${connectionId} +${bytes} (total: ${record.bytesReceived})`);
}
```
2. **Unit Tests**: Create specific tests to ensure byte counting accuracy:
- Test initial chunk handling
- Test PROXY protocol overhead exclusion
- Test HttpProxy forwarding accuracy
3. **Protocol Overhead Tracking**: Consider separately tracking:
- PROXY protocol headers
- TLS handshake bytes
- HTTP headers vs body
4. **NFTables Documentation**: Clearly document that NFTables-forwarded connections are not included in application metrics
## Conclusion
SmartProxy's byte counting implementation is **robust and accurate**. The design ensures that each byte is counted exactly once, with clear separation between connection tracking and metrics collection. No remediation is required.

231
readme.md
View File

@@ -27,7 +27,7 @@ Whether you're building microservices, deploying edge infrastructure, or need a
| 🦀 **Rust-Powered Engine** | All networking handled by a high-performance Rust binary via IPC |
| 🔀 **Unified Route-Based Config** | Clean match/action patterns for intuitive traffic routing |
| 🔒 **Automatic SSL/TLS** | Zero-config HTTPS with Let's Encrypt ACME integration |
| 🎯 **Flexible Matching** | Route by port, domain, path, client IP, TLS version, headers, or custom logic |
| 🎯 **Flexible Matching** | Route by port, domain, path, protocol, client IP, TLS version, headers, or custom logic |
| 🚄 **High-Performance** | Choose between user-space or kernel-level (NFTables) forwarding |
| ⚖️ **Load Balancing** | Round-robin, least-connections, IP-hash with health checks |
| 🛡️ **Enterprise Security** | IP filtering, rate limiting, basic auth, JWT auth, connection limits |
@@ -36,6 +36,7 @@ Whether you're building microservices, deploying edge infrastructure, or need a
| 📊 **Live Metrics** | Real-time throughput, connection counts, and performance data |
| 🔧 **Dynamic Management** | Add/remove ports and routes at runtime without restarts |
| 🔄 **PROXY Protocol** | Full PROXY protocol v1/v2 support for preserving client information |
| 💾 **Consumer Cert Storage** | Bring your own persistence — SmartProxy never writes certs to disk |
## 🚀 Quick Start
@@ -88,7 +89,7 @@ SmartProxy uses a powerful **match/action** pattern that makes routing predictab
```
Every route consists of:
- **Match** — What traffic to capture (ports, domains, paths, IPs, headers)
- **Match** — What traffic to capture (ports, domains, paths, protocol, IPs, headers)
- **Action** — What to do with it (`forward` or `socket-handler`)
- **Security** (optional) — IP allow/block lists, rate limits, authentication
- **Headers** (optional) — Request/response header manipulation with template variables
@@ -102,7 +103,7 @@ SmartProxy supports three TLS handling modes:
|------|-------------|----------|
| `passthrough` | Forward encrypted traffic as-is (SNI-based routing) | Backend handles TLS |
| `terminate` | Decrypt at proxy, forward plain HTTP to backend | Standard reverse proxy |
| `terminate-and-reencrypt` | Decrypt, then re-encrypt to backend | Zero-trust environments |
| `terminate-and-reencrypt` | Decrypt at proxy, re-encrypt to backend. HTTP traffic gets full per-request routing (Host header, path matching) via the HTTP proxy; non-HTTP traffic uses a raw TLS-to-TLS tunnel | Zero-trust / defense-in-depth environments |
## 💡 Common Use Cases
@@ -134,13 +135,13 @@ const proxy = new SmartProxy({
],
{
tls: { mode: 'terminate', certificate: 'auto' },
loadBalancing: {
algorithm: 'round-robin',
healthCheck: {
path: '/health',
interval: 30000,
timeout: 5000
}
algorithm: 'round-robin',
healthCheck: {
path: '/health',
interval: 30000,
timeout: 5000,
unhealthyThreshold: 3,
healthyThreshold: 2
}
}
)
@@ -214,8 +215,8 @@ const echoRoute = createSocketHandlerRoute(
const customRoute = createSocketHandlerRoute(
'custom.example.com',
9999,
async (socket, context) => {
console.log(`Connection from ${context.clientIp}`);
async (socket) => {
console.log(`New connection on custom protocol`);
socket.write('Welcome to my custom protocol!\n');
socket.on('data', (data) => {
@@ -261,8 +262,7 @@ const proxy = new SmartProxy({
{
ports: 443,
certificate: 'auto',
preserveSourceIP: true, // Backend sees real client IP
maxRate: '1gbps' // QoS rate limiting
preserveSourceIP: true // Backend sees real client IP
}
)
]
@@ -318,6 +318,42 @@ const proxy = new SmartProxy({
> **Note:** Routes with dynamic functions (host/port callbacks) are automatically relayed through the TypeScript socket handler server, since JavaScript functions can't be serialized to Rust.
### 🔀 Protocol-Specific Routing
Restrict routes to specific application-layer protocols. When `protocol` is set, the Rust engine detects the protocol after connection (or after TLS termination) and only matches routes that accept that protocol:
```typescript
// HTTP-only route (rejects raw TCP connections)
const httpOnlyRoute: IRouteConfig = {
name: 'http-api',
match: {
ports: 443,
domains: 'api.example.com',
protocol: 'http', // Only match HTTP/1.1, HTTP/2, and WebSocket upgrades
},
action: {
type: 'forward',
targets: [{ host: 'api-backend', port: 8080 }],
tls: { mode: 'terminate', certificate: 'auto' }
}
};
// Raw TCP route (rejects HTTP traffic)
const tcpOnlyRoute: IRouteConfig = {
name: 'database-proxy',
match: {
ports: 5432,
protocol: 'tcp', // Only match non-HTTP TCP streams
},
action: {
type: 'forward',
targets: [{ host: 'db-server', port: 5432 }]
}
};
```
> **Note:** Omitting `protocol` (the default) matches any protocol. For TLS routes, protocol detection happens *after* TLS termination — during the initial SNI-based route match, `protocol` is not yet known and the route is allowed to match. The protocol restriction is enforced after the proxy peeks at the decrypted data.
### 🔒 Security Controls
Comprehensive per-route security options:
@@ -379,7 +415,9 @@ await proxy.updateRoutes([...newRoutes]);
// Get real-time metrics
const metrics = proxy.getMetrics();
console.log(`Active connections: ${metrics.connections.active()}`);
console.log(`Requests/sec: ${metrics.throughput.requestsPerSecond()}`);
console.log(`Bytes in: ${metrics.totals.bytesIn()}`);
console.log(`Requests/sec: ${metrics.requests.perSecond()}`);
console.log(`Throughput in: ${metrics.throughput.instant().in} bytes/sec`);
// Get detailed statistics from the Rust engine
const stats = await proxy.getStatistics();
@@ -455,6 +493,51 @@ const proxy = new SmartProxy({
});
```
### 💾 Consumer-Managed Certificate Storage
SmartProxy **never writes certificates to disk**. Instead, you own all persistence through the `certStore` interface. This gives you full control — store certs in a database, cloud KMS, encrypted vault, or wherever makes sense for your infrastructure:
```typescript
const proxy = new SmartProxy({
routes: [...],
certProvisionFunction: async (domain) => myAcme.provision(domain),
// Your persistence layer — SmartProxy calls these hooks
certStore: {
// Called once on startup to pre-load persisted certs
loadAll: async () => {
const certs = await myDb.getAllCerts();
return certs.map(c => ({
domain: c.domain,
publicKey: c.certPem,
privateKey: c.keyPem,
ca: c.caPem, // optional
}));
},
// Called after each successful cert provision
save: async (domain, publicKey, privateKey, ca) => {
await myDb.upsertCert({ domain, certPem: publicKey, keyPem: privateKey, caPem: ca });
},
// Optional: called when a cert should be removed
remove: async (domain) => {
await myDb.deleteCert(domain);
},
},
});
```
**Startup flow:**
1. Rust engine starts
2. Default self-signed `*` fallback cert is loaded (unless `disableDefaultCert: true`)
3. `certStore.loadAll()` is called → all returned certs are loaded into the Rust TLS stack
4. `certProvisionFunction` runs for any remaining `certificate: 'auto'` routes (skipping domains already loaded from the store)
5. After each successful provision, `certStore.save()` is called
This means your second startup is instant — no re-provisioning needed for domains that already have valid certs in your store.
## 🏛️ Architecture
SmartProxy uses a hybrid **Rust + TypeScript** architecture:
@@ -487,8 +570,8 @@ SmartProxy uses a hybrid **Rust + TypeScript** architecture:
- **Rust Engine** handles all networking, TLS, HTTP proxying, connection management, security, and metrics
- **TypeScript** provides the npm API, configuration types, route helpers, validation, and socket handler callbacks
- **IPC** — JSON commands/events over stdin/stdout for seamless cross-language communication
- **Socket Relay** — a Unix domain socket server for routes requiring TypeScript-side handling (socket handlers, dynamic host/port functions)
- **IPC** — The TypeScript wrapper uses JSON commands/events over stdin/stdout to communicate with the Rust binary
- **Socket Relay** — A Unix domain socket server for routes requiring TypeScript-side handling (socket handlers, dynamic host/port functions)
## 🎯 Route Configuration Reference
@@ -496,12 +579,13 @@ SmartProxy uses a hybrid **Rust + TypeScript** architecture:
```typescript
interface IRouteMatch {
ports: number | number[] | Array<{ from: number; to: number }>; // Port(s) to listen on
ports: number | number[] | Array<{ from: number; to: number }>; // Required — port(s) to listen on
domains?: string | string[]; // 'example.com', '*.example.com'
path?: string; // '/api/*', '/users/:id'
clientIp?: string[]; // ['10.0.0.0/8', '192.168.*']
tlsVersion?: string[]; // ['TLSv1.2', 'TLSv1.3']
headers?: Record<string, string | RegExp>; // Match by HTTP headers
protocol?: 'http' | 'tcp'; // Match specific protocol ('http' includes h2 + WebSocket upgrades)
}
```
@@ -516,11 +600,16 @@ interface IRouteMatch {
```typescript
interface IRouteTarget {
host: string | string[] | ((context: IRouteContext) => string);
host: string | string[] | ((context: IRouteContext) => string | string[]);
port: number | 'preserve' | ((context: IRouteContext) => number);
tls?: { ... }; // Per-target TLS override
priority?: number; // Target priority
match?: ITargetMatch; // Sub-match within a route (by port, path, headers, method)
tls?: IRouteTls; // Per-target TLS override
priority?: number; // Target priority
match?: ITargetMatch; // Sub-match within a route (by port, path, headers, method)
websocket?: IRouteWebSocket;
loadBalancing?: IRouteLoadBalancing;
sendProxyProtocol?: boolean;
headers?: IRouteHeaders;
advanced?: IRouteAdvanced;
}
```
@@ -529,7 +618,7 @@ interface IRouteTarget {
```typescript
interface IRouteTls {
mode: 'passthrough' | 'terminate' | 'terminate-and-reencrypt';
certificate: 'auto' | {
certificate?: 'auto' | {
key: string;
cert: string;
ca?: string;
@@ -543,7 +632,7 @@ interface IRouteTls {
renewBeforeDays?: number;
};
versions?: string[];
ciphers?: string[];
ciphers?: string;
honorCipherOrder?: boolean;
sessionTimeout?: number;
}
@@ -569,10 +658,10 @@ interface IRouteLoadBalancing {
algorithm: 'round-robin' | 'least-connections' | 'ip-hash';
healthCheck?: {
path: string;
interval: number; // ms
timeout: number; // ms
unhealthyThreshold?: number;
healthyThreshold?: number;
interval: number; // ms
timeout: number; // ms
unhealthyThreshold: number;
healthyThreshold: number;
};
}
```
@@ -612,6 +701,7 @@ import {
createPortMappingRoute, // Port mapping with context
createOffsetPortMappingRoute, // Simple port offset
createDynamicRoute, // Dynamic host/port via functions
createPortOffset, // Port offset factory
// Security Modifiers
addRateLimiting, // Add rate limiting to any route
@@ -679,7 +769,6 @@ interface ISmartProxyOptions {
port?: number; // HTTP-01 challenge port (default: 80)
renewThresholdDays?: number; // Days before expiry to renew (default: 30)
autoRenew?: boolean; // Enable auto-renewal (default: true)
certificateStore?: string; // Directory to store certs (default: './certs')
renewCheckIntervalHours?: number; // Renewal check interval (default: 24)
};
@@ -687,6 +776,12 @@ interface ISmartProxyOptions {
certProvisionFunction?: (domain: string) => Promise<ICert | 'http01'>;
certProvisionFallbackToAcme?: boolean; // Fall back to ACME on failure (default: true)
// Consumer-managed certificate persistence (see "Consumer-Managed Certificate Storage")
certStore?: ISmartProxyCertStore;
// Self-signed fallback
disableDefaultCert?: boolean; // Disable '*' self-signed fallback (default: false)
// Global defaults
defaults?: {
target?: { host: string; port: number };
@@ -725,33 +820,62 @@ interface ISmartProxyOptions {
// Behavior
enableDetailedLogging?: boolean; // Verbose connection logging
enableTlsDebugLogging?: boolean; // TLS handshake debug logging
// Rust binary
rustBinaryPath?: string; // Custom path to the Rust binary
}
```
### NfTablesProxy Class
A standalone class for managing nftables NAT rules directly (Linux only, requires root):
### ISmartProxyCertStore Interface
```typescript
import { NfTablesProxy } from '@push.rocks/smartproxy';
interface ISmartProxyCertStore {
/** Called once on startup to pre-load persisted certs */
loadAll: () => Promise<Array<{
domain: string;
publicKey: string;
privateKey: string;
ca?: string;
}>>;
const nftProxy = new NfTablesProxy({
fromPorts: [80, 443],
toHost: 'backend-server',
toPorts: [8080, 8443],
protocol: 'tcp',
preserveSourceIP: true,
enableIPv6: true,
maxRate: '1gbps',
useIPSets: true
});
/** Called after each successful cert provision */
save: (domain: string, publicKey: string, privateKey: string, ca?: string) => Promise<void>;
await nftProxy.start(); // Apply nftables rules
const status = nftProxy.getStatus();
await nftProxy.stop(); // Remove rules
/** Optional: remove a cert from storage */
remove?: (domain: string) => Promise<void>;
}
```
### IMetrics Interface
The `getMetrics()` method returns a cached metrics adapter that polls the Rust engine:
```typescript
const metrics = proxy.getMetrics();
// Connection metrics
metrics.connections.active(); // Current active connections
metrics.connections.total(); // Total connections since start
metrics.connections.byRoute(); // Map<routeName, activeCount>
metrics.connections.byIP(); // Map<ip, activeCount>
metrics.connections.topIPs(10); // Top N IPs by connection count
// Throughput (bytes/sec)
metrics.throughput.instant(); // { in: number, out: number }
metrics.throughput.recent(); // Recent average
metrics.throughput.average(); // Overall average
metrics.throughput.byRoute(); // Map<routeName, { in, out }>
// Request rates
metrics.requests.perSecond(); // Requests per second
metrics.requests.perMinute(); // Requests per minute
metrics.requests.total(); // Total requests
// Cumulative totals
metrics.totals.bytesIn(); // Total bytes received
metrics.totals.bytesOut(); // Total bytes sent
metrics.totals.connections(); // Total connections
// Percentiles
metrics.percentiles.connectionDuration(); // { p50, p95, p99 }
metrics.percentiles.bytesTransferred(); // { in: { p50, p95, p99 }, out: { p50, p95, p99 } }
```
## 🐛 Troubleshooting
@@ -770,13 +894,13 @@ await nftProxy.stop(); // Remove rules
- ✅ Enable debug logging with `enableDetailedLogging: true`
### Rust Binary Not Found
SmartProxy searches for the Rust binary in this order:
1. `SMARTPROXY_RUST_BINARY` environment variable
2. Platform-specific npm package (`@push.rocks/smartproxy-linux-x64`, etc.)
3. Local dev build (`./rust/target/release/rustproxy`)
4. System PATH (`rustproxy`)
Set `rustBinaryPath` in options to override.
3. `dist_rust/rustproxy` relative to the package root (built by `tsrust`)
4. Local dev build (`./rust/target/release/rustproxy`)
5. System PATH (`rustproxy`)
### Performance Tuning
- ✅ Use NFTables forwarding for high-traffic routes (Linux only)
@@ -796,6 +920,7 @@ Set `rustBinaryPath` in options to override.
7. **✅ Validate Routes** — Use `RouteValidator.validateRoutes()` to catch config errors before deployment
8. **🔀 Atomic Updates** — Use `updateRoutes()` for hot-reloading routes (mutex-locked, no downtime)
9. **🎮 Use Socket Handlers** — For protocols beyond HTTP, implement custom socket handlers instead of fighting the proxy model
10. **💾 Use `certStore`** — Persist certs in your own storage to avoid re-provisioning on every restart
## License and Legal Information

2
rust/.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

44
rust/Cargo.lock generated
View File

@@ -285,12 +285,6 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "find-msvc-tools"
version = "0.1.9"
@@ -618,12 +612,6 @@ version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "linux-raw-sys"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039"
[[package]]
name = "lock_api"
version = "0.4.14"
@@ -866,19 +854,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rustix"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.61.2",
]
[[package]]
name = "rustls"
version = "0.23.36"
@@ -986,16 +961,20 @@ dependencies = [
"arc-swap",
"bytes",
"dashmap",
"http-body",
"http-body-util",
"hyper",
"hyper-util",
"regex",
"rustls",
"rustproxy-config",
"rustproxy-metrics",
"rustproxy-routing",
"rustproxy-security",
"thiserror 2.0.18",
"tokio",
"tokio-rustls",
"tokio-util",
"tracing",
]
@@ -1084,8 +1063,6 @@ dependencies = [
"rustls",
"rustproxy-config",
"serde",
"serde_json",
"tempfile",
"thiserror 2.0.18",
"tokio",
"tracing",
@@ -1260,19 +1237,6 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c"
dependencies = [
"fastrand",
"getrandom 0.3.4",
"once_cell",
"rustix",
"windows-sys 0.61.2",
]
[[package]]
name = "thiserror"
version = "1.0.69"

View File

@@ -29,6 +29,7 @@ serde_json = "1"
# HTTP proxy engine (hyper-based)
hyper = { version = "1", features = ["http1", "http2", "server", "client"] }
hyper-util = { version = "0.1", features = ["tokio", "http1", "http2", "client-legacy", "server-auto"] }
http-body = "1"
http-body-util = "0.1"
bytes = "1"

View File

@@ -17,6 +17,7 @@ pub fn create_http_route(
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -108,6 +109,7 @@ pub fn create_http_to_https_redirect(
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -200,6 +202,7 @@ pub fn create_load_balancer_route(
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,

View File

@@ -29,9 +29,6 @@ pub struct AcmeOptions {
/// Enable automatic renewal (default: true)
#[serde(skip_serializing_if = "Option::is_none")]
pub auto_renew: Option<bool>,
/// Directory to store certificates (default: './certs')
#[serde(skip_serializing_if = "Option::is_none")]
pub certificate_store: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub skip_configured_certs: Option<bool>,
/// How often to check for renewals (default: 24)
@@ -361,7 +358,6 @@ mod tests {
use_production: None,
renew_threshold_days: None,
auto_renew: None,
certificate_store: None,
skip_configured_certs: None,
renew_check_interval_hours: None,
}),

View File

@@ -114,6 +114,10 @@ pub struct RouteMatch {
/// Match specific HTTP headers
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<HashMap<String, String>>,
/// Match specific protocol: "http" (includes h2 + websocket) or "tcp"
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
}
// ─── Target Match ────────────────────────────────────────────────────

View File

@@ -14,11 +14,15 @@ rustproxy-metrics = { workspace = true }
hyper = { workspace = true }
hyper-util = { workspace = true }
regex = { workspace = true }
http-body = { workspace = true }
http-body-util = { workspace = true }
bytes = { workspace = true }
tokio = { workspace = true }
rustls = { workspace = true }
tokio-rustls = { workspace = true }
tracing = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
arc-swap = { workspace = true }
dashmap = { workspace = true }
tokio-util = { workspace = true }

View File

@@ -0,0 +1,126 @@
//! A body wrapper that counts bytes flowing through and reports them to MetricsCollector.
use std::pin::Pin;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::task::{Context, Poll};
use bytes::Bytes;
use http_body::Frame;
use rustproxy_metrics::MetricsCollector;
/// Wraps any `http_body::Body` and counts data bytes passing through.
///
/// When the body is fully consumed or dropped, accumulated byte counts
/// are reported to the `MetricsCollector`.
///
/// The inner body is pinned on the heap to support `!Unpin` types like `hyper::body::Incoming`.
pub struct CountingBody<B> {
inner: Pin<Box<B>>,
counted_bytes: AtomicU64,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
source_ip: Option<String>,
/// Whether we count bytes as "in" (request body) or "out" (response body).
direction: Direction,
/// Whether we've already reported the bytes (to avoid double-reporting on drop).
reported: bool,
}
/// Which direction the bytes flow.
#[derive(Clone, Copy)]
pub enum Direction {
/// Request body: bytes flowing from client → upstream (counted as bytes_in)
In,
/// Response body: bytes flowing from upstream → client (counted as bytes_out)
Out,
}
impl<B> CountingBody<B> {
/// Create a new CountingBody wrapping an inner body.
pub fn new(
inner: B,
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
source_ip: Option<String>,
direction: Direction,
) -> Self {
Self {
inner: Box::pin(inner),
counted_bytes: AtomicU64::new(0),
metrics,
route_id,
source_ip,
direction,
reported: false,
}
}
/// Report accumulated bytes to the metrics collector.
fn report(&mut self) {
if self.reported {
return;
}
self.reported = true;
let bytes = self.counted_bytes.load(Ordering::Relaxed);
if bytes == 0 {
return;
}
let route_id = self.route_id.as_deref();
let source_ip = self.source_ip.as_deref();
match self.direction {
Direction::In => self.metrics.record_bytes(bytes, 0, route_id, source_ip),
Direction::Out => self.metrics.record_bytes(0, bytes, route_id, source_ip),
}
}
}
impl<B> Drop for CountingBody<B> {
fn drop(&mut self) {
self.report();
}
}
// CountingBody is Unpin because inner is Pin<Box<B>> (always Unpin).
impl<B> Unpin for CountingBody<B> {}
impl<B> http_body::Body for CountingBody<B>
where
B: http_body::Body<Data = Bytes>,
{
type Data = Bytes;
type Error = B::Error;
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
let this = self.get_mut();
match this.inner.as_mut().poll_frame(cx) {
Poll::Ready(Some(Ok(frame))) => {
if let Some(data) = frame.data_ref() {
this.counted_bytes.fetch_add(data.len() as u64, Ordering::Relaxed);
}
Poll::Ready(Some(Ok(frame)))
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => {
// Body is fully consumed — report now
this.report();
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
}
}
fn is_end_stream(&self) -> bool {
self.inner.is_end_stream()
}
fn size_hint(&self) -> http_body::SizeHint {
self.inner.size_hint()
}
}

View File

@@ -3,12 +3,14 @@
//! Hyper-based HTTP proxy service for RustProxy.
//! Handles HTTP request parsing, route-based forwarding, and response filtering.
pub mod counting_body;
pub mod proxy_service;
pub mod request_filter;
pub mod response_filter;
pub mod template;
pub mod upstream_selector;
pub use counting_body::*;
pub use proxy_service::*;
pub use template::*;
pub use upstream_selector::*;

View File

@@ -6,6 +6,7 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use bytes::Bytes;
use http_body_util::{BodyExt, Full, combinators::BoxBody};
@@ -14,20 +15,155 @@ use hyper::{Request, Response, StatusCode};
use hyper_util::rt::TokioIo;
use regex::Regex;
use tokio::net::TcpStream;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use std::pin::Pin;
use std::task::{Context, Poll};
use rustproxy_routing::RouteManager;
use rustproxy_metrics::MetricsCollector;
use crate::counting_body::{CountingBody, Direction};
use crate::request_filter::RequestFilter;
use crate::response_filter::ResponseFilter;
use crate::upstream_selector::UpstreamSelector;
/// Default upstream connect timeout (30 seconds).
const DEFAULT_CONNECT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
/// Default WebSocket inactivity timeout (1 hour).
const DEFAULT_WS_INACTIVITY_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(3600);
/// Default WebSocket max lifetime (24 hours).
const DEFAULT_WS_MAX_LIFETIME: std::time::Duration = std::time::Duration::from_secs(86400);
/// Backend stream that can be either plain TCP or TLS-wrapped.
/// Used for `terminate-and-reencrypt` mode where the backend requires TLS.
pub(crate) enum BackendStream {
Plain(TcpStream),
Tls(tokio_rustls::client::TlsStream<TcpStream>),
}
impl tokio::io::AsyncRead for BackendStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
BackendStream::Plain(s) => Pin::new(s).poll_read(cx, buf),
BackendStream::Tls(s) => Pin::new(s).poll_read(cx, buf),
}
}
}
impl tokio::io::AsyncWrite for BackendStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
match self.get_mut() {
BackendStream::Plain(s) => Pin::new(s).poll_write(cx, buf),
BackendStream::Tls(s) => Pin::new(s).poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
BackendStream::Plain(s) => Pin::new(s).poll_flush(cx),
BackendStream::Tls(s) => Pin::new(s).poll_flush(cx),
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
BackendStream::Plain(s) => Pin::new(s).poll_shutdown(cx),
BackendStream::Tls(s) => Pin::new(s).poll_shutdown(cx),
}
}
}
/// Connect to a backend over TLS. Uses InsecureVerifier for internal backends
/// with self-signed certs (same pattern as tls_handler::connect_tls).
async fn connect_tls_backend(
host: &str,
port: u16,
) -> Result<tokio_rustls::client::TlsStream<TcpStream>, Box<dyn std::error::Error + Send + Sync>> {
let _ = rustls::crypto::ring::default_provider().install_default();
let config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(InsecureBackendVerifier))
.with_no_client_auth();
let connector = tokio_rustls::TlsConnector::from(Arc::new(config));
let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
stream.set_nodelay(true)?;
let server_name = rustls::pki_types::ServerName::try_from(host.to_string())?;
let tls_stream = connector.connect(server_name, stream).await?;
debug!("Backend TLS connection established to {}:{}", host, port);
Ok(tls_stream)
}
/// Insecure certificate verifier for backend TLS connections.
/// Internal backends may use self-signed certs.
#[derive(Debug)]
struct InsecureBackendVerifier;
impl rustls::client::danger::ServerCertVerifier for InsecureBackendVerifier {
fn verify_server_cert(
&self,
_end_entity: &rustls::pki_types::CertificateDer<'_>,
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
_server_name: &rustls::pki_types::ServerName<'_>,
_ocsp_response: &[u8],
_now: rustls::pki_types::UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &rustls::pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
vec![
rustls::SignatureScheme::RSA_PKCS1_SHA256,
rustls::SignatureScheme::RSA_PKCS1_SHA384,
rustls::SignatureScheme::RSA_PKCS1_SHA512,
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
rustls::SignatureScheme::ED25519,
rustls::SignatureScheme::RSA_PSS_SHA256,
rustls::SignatureScheme::RSA_PSS_SHA384,
rustls::SignatureScheme::RSA_PSS_SHA512,
]
}
}
/// HTTP proxy service that processes HTTP traffic.
pub struct HttpProxyService {
route_manager: Arc<RouteManager>,
metrics: Arc<MetricsCollector>,
upstream_selector: UpstreamSelector,
/// Timeout for connecting to upstream backends.
connect_timeout: std::time::Duration,
}
impl HttpProxyService {
@@ -36,6 +172,21 @@ impl HttpProxyService {
route_manager,
metrics,
upstream_selector: UpstreamSelector::new(),
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
}
}
/// Create with a custom connect timeout.
pub fn with_connect_timeout(
route_manager: Arc<RouteManager>,
metrics: Arc<MetricsCollector>,
connect_timeout: std::time::Duration,
) -> Self {
Self {
route_manager,
metrics,
upstream_selector: UpstreamSelector::new(),
connect_timeout,
}
}
@@ -45,41 +196,59 @@ impl HttpProxyService {
stream: TcpStream,
peer_addr: std::net::SocketAddr,
port: u16,
cancel: CancellationToken,
) {
self.handle_io(stream, peer_addr, port).await;
self.handle_io(stream, peer_addr, port, cancel).await;
}
/// Handle an incoming HTTP connection on any IO type (plain TCP or TLS-terminated).
///
/// Uses HTTP/1.1 with upgrade support. For clients that negotiate HTTP/2,
/// use `handle_io_auto` instead.
/// Uses HTTP/1.1 with upgrade support. Responds to graceful shutdown via the
/// cancel token — in-flight requests complete, but no new requests are accepted.
pub async fn handle_io<I>(
self: Arc<Self>,
stream: I,
peer_addr: std::net::SocketAddr,
port: u16,
cancel: CancellationToken,
)
where
I: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
{
let io = TokioIo::new(stream);
let cancel_inner = cancel.clone();
let service = hyper::service::service_fn(move |req: Request<Incoming>| {
let svc = Arc::clone(&self);
let peer = peer_addr;
let cn = cancel_inner.clone();
async move {
svc.handle_request(req, peer, port).await
svc.handle_request(req, peer, port, cn).await
}
});
// Use http1::Builder with upgrades for WebSocket support
let conn = hyper::server::conn::http1::Builder::new()
let mut conn = hyper::server::conn::http1::Builder::new()
.keep_alive(true)
.serve_connection(io, service)
.with_upgrades();
if let Err(e) = conn.await {
debug!("HTTP connection error from {}: {}", peer_addr, e);
// Use select to support graceful shutdown via cancellation token
let conn_pin = std::pin::Pin::new(&mut conn);
tokio::select! {
result = conn_pin => {
if let Err(e) = result {
debug!("HTTP connection error from {}: {}", peer_addr, e);
}
}
_ = cancel.cancelled() => {
// Graceful shutdown: let in-flight request finish, stop accepting new ones
let conn_pin = std::pin::Pin::new(&mut conn);
conn_pin.graceful_shutdown();
if let Err(e) = conn.await {
debug!("HTTP connection error during shutdown from {}: {}", peer_addr, e);
}
}
}
}
@@ -89,6 +258,7 @@ impl HttpProxyService {
req: Request<Incoming>,
peer_addr: std::net::SocketAddr,
port: u16,
cancel: CancellationToken,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let host = req.headers()
.get("host")
@@ -125,6 +295,7 @@ impl HttpProxyService {
tls_version: None,
headers: Some(&headers),
is_tls: false,
protocol: Some("http"),
};
let route_match = match self.route_manager.find_route(&ctx) {
@@ -136,12 +307,14 @@ impl HttpProxyService {
};
let route_id = route_match.route.id.as_deref();
self.metrics.connection_opened(route_id);
let ip_str = peer_addr.ip().to_string();
self.metrics.record_http_request();
self.metrics.connection_opened(route_id, Some(&ip_str));
// Apply request filters (IP check, rate limiting, auth)
if let Some(ref security) = route_match.route.security {
if let Some(response) = RequestFilter::apply(security, &req, &peer_addr) {
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(response);
}
}
@@ -149,7 +322,7 @@ impl HttpProxyService {
// Check for test response (returns immediately, no upstream needed)
if let Some(ref advanced) = route_match.route.action.advanced {
if let Some(ref test_response) = advanced.test_response {
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(Self::build_test_response(test_response));
}
}
@@ -157,7 +330,7 @@ impl HttpProxyService {
// Check for static file serving
if let Some(ref advanced) = route_match.route.action.advanced {
if let Some(ref static_files) = advanced.static_files {
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(Self::serve_static_file(&path, static_files));
}
}
@@ -166,12 +339,20 @@ impl HttpProxyService {
let target = match route_match.target {
Some(t) => t,
None => {
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(error_response(StatusCode::BAD_GATEWAY, "No target available"));
}
};
let upstream = self.upstream_selector.select(target, &peer_addr, port);
let mut upstream = self.upstream_selector.select(target, &peer_addr, port);
// If the route uses terminate-and-reencrypt, always re-encrypt to backend
if let Some(ref tls) = route_match.route.action.tls {
if tls.mode == rustproxy_config::TlsMode::TerminateAndReencrypt {
upstream.use_tls = true;
}
}
let upstream_key = format!("{}:{}", upstream.host, upstream.port);
self.upstream_selector.connection_started(&upstream_key);
@@ -184,7 +365,7 @@ impl HttpProxyService {
if is_websocket {
let result = self.handle_websocket_upgrade(
req, peer_addr, &upstream, route_match.route, route_id, &upstream_key,
req, peer_addr, &upstream, route_match.route, route_id, &upstream_key, cancel, &ip_str,
).await;
// Note: for WebSocket, connection_ended is called inside
// the spawned tunnel task when the connection closes.
@@ -223,26 +404,103 @@ impl HttpProxyService {
}
}
// Connect to upstream
let upstream_stream = match TcpStream::connect(format!("{}:{}", upstream.host, upstream.port)).await {
Ok(s) => s,
Err(e) => {
error!("Failed to connect to upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(&upstream_key);
self.metrics.connection_closed(route_id);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
// Add standard reverse-proxy headers (X-Forwarded-*)
{
let original_host = parts.headers.get("host")
.and_then(|h| h.to_str().ok())
.unwrap_or("");
let forwarded_proto = if route_match.route.action.tls.as_ref()
.map(|t| matches!(t.mode,
rustproxy_config::TlsMode::Terminate
| rustproxy_config::TlsMode::TerminateAndReencrypt))
.unwrap_or(false)
{
"https"
} else {
"http"
};
// X-Forwarded-For: append client IP to existing chain
let client_ip = peer_addr.ip().to_string();
let xff_value = if let Some(existing) = upstream_headers.get("x-forwarded-for") {
format!("{}, {}", existing.to_str().unwrap_or(""), client_ip)
} else {
client_ip
};
if let Ok(val) = hyper::header::HeaderValue::from_str(&xff_value) {
upstream_headers.insert(
hyper::header::HeaderName::from_static("x-forwarded-for"),
val,
);
}
// X-Forwarded-Host: original Host header
if let Ok(val) = hyper::header::HeaderValue::from_str(original_host) {
upstream_headers.insert(
hyper::header::HeaderName::from_static("x-forwarded-host"),
val,
);
}
// X-Forwarded-Proto: original client protocol
if let Ok(val) = hyper::header::HeaderValue::from_str(forwarded_proto) {
upstream_headers.insert(
hyper::header::HeaderName::from_static("x-forwarded-proto"),
val,
);
}
}
// Connect to upstream with timeout (TLS if upstream.use_tls is set)
let backend = if upstream.use_tls {
match tokio::time::timeout(
self.connect_timeout,
connect_tls_backend(&upstream.host, upstream.port),
).await {
Ok(Ok(tls)) => BackendStream::Tls(tls),
Ok(Err(e)) => {
error!("Failed TLS connect to upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(&upstream_key);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend TLS unavailable"));
}
Err(_) => {
error!("Upstream TLS connect timeout for {}:{}", upstream.host, upstream.port);
self.upstream_selector.connection_ended(&upstream_key);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend TLS connect timeout"));
}
}
} else {
match tokio::time::timeout(
self.connect_timeout,
TcpStream::connect(format!("{}:{}", upstream.host, upstream.port)),
).await {
Ok(Ok(s)) => {
s.set_nodelay(true).ok();
BackendStream::Plain(s)
}
Ok(Err(e)) => {
error!("Failed to connect to upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(&upstream_key);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
}
Err(_) => {
error!("Upstream connect timeout for {}:{}", upstream.host, upstream.port);
self.upstream_selector.connection_ended(&upstream_key);
self.metrics.connection_closed(route_id, Some(&ip_str));
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend connect timeout"));
}
}
};
upstream_stream.set_nodelay(true).ok();
let io = TokioIo::new(upstream_stream);
let io = TokioIo::new(backend);
let result = if use_h2 {
// HTTP/2 backend
self.forward_h2(io, parts, body, upstream_headers, &upstream_path, &upstream, route_match.route, route_id).await
self.forward_h2(io, parts, body, upstream_headers, &upstream_path, &upstream, route_match.route, route_id, &ip_str).await
} else {
// HTTP/1.1 backend (default)
self.forward_h1(io, parts, body, upstream_headers, &upstream_path, &upstream, route_match.route, route_id).await
self.forward_h1(io, parts, body, upstream_headers, &upstream_path, &upstream, route_match.route, route_id, &ip_str).await
};
self.upstream_selector.connection_ended(&upstream_key);
result
@@ -251,20 +509,21 @@ impl HttpProxyService {
/// Forward request to backend via HTTP/1.1 with body streaming.
async fn forward_h1(
&self,
io: TokioIo<TcpStream>,
io: TokioIo<BackendStream>,
parts: hyper::http::request::Parts,
body: Incoming,
upstream_headers: hyper::HeaderMap,
upstream_path: &str,
upstream: &crate::upstream_selector::UpstreamSelection,
_upstream: &crate::upstream_selector::UpstreamSelection,
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let (mut sender, conn) = match hyper::client::conn::http1::handshake(io).await {
Ok(h) => h,
Err(e) => {
error!("Upstream handshake failed: {}", e);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend handshake failed"));
}
};
@@ -282,46 +541,51 @@ impl HttpProxyService {
if let Some(headers) = upstream_req.headers_mut() {
*headers = upstream_headers;
if let Ok(host_val) = hyper::header::HeaderValue::from_str(
&format!("{}:{}", upstream.host, upstream.port)
) {
headers.insert(hyper::header::HOST, host_val);
}
}
// Wrap the request body in CountingBody to track bytes_in
let counting_req_body = CountingBody::new(
body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
Direction::In,
);
// Stream the request body through to upstream
let upstream_req = upstream_req.body(body).unwrap();
let upstream_req = upstream_req.body(counting_req_body).unwrap();
let upstream_response = match sender.send_request(upstream_req).await {
Ok(resp) => resp,
Err(e) => {
error!("Upstream request failed: {}", e);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend request failed"));
}
};
self.build_streaming_response(upstream_response, route, route_id).await
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
}
/// Forward request to backend via HTTP/2 with body streaming.
async fn forward_h2(
&self,
io: TokioIo<TcpStream>,
io: TokioIo<BackendStream>,
parts: hyper::http::request::Parts,
body: Incoming,
upstream_headers: hyper::HeaderMap,
upstream_path: &str,
upstream: &crate::upstream_selector::UpstreamSelection,
_upstream: &crate::upstream_selector::UpstreamSelection,
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let exec = hyper_util::rt::TokioExecutor::new();
let (mut sender, conn) = match hyper::client::conn::http2::handshake(exec, io).await {
Ok(h) => h,
Err(e) => {
error!("HTTP/2 upstream handshake failed: {}", e);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 handshake failed"));
}
};
@@ -338,34 +602,43 @@ impl HttpProxyService {
if let Some(headers) = upstream_req.headers_mut() {
*headers = upstream_headers;
if let Ok(host_val) = hyper::header::HeaderValue::from_str(
&format!("{}:{}", upstream.host, upstream.port)
) {
headers.insert(hyper::header::HOST, host_val);
}
}
// Wrap the request body in CountingBody to track bytes_in
let counting_req_body = CountingBody::new(
body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
Direction::In,
);
// Stream the request body through to upstream
let upstream_req = upstream_req.body(body).unwrap();
let upstream_req = upstream_req.body(counting_req_body).unwrap();
let upstream_response = match sender.send_request(upstream_req).await {
Ok(resp) => resp,
Err(e) => {
error!("HTTP/2 upstream request failed: {}", e);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend H2 request failed"));
}
};
self.build_streaming_response(upstream_response, route, route_id).await
self.build_streaming_response(upstream_response, route, route_id, source_ip).await
}
/// Build the client-facing response from an upstream response, streaming the body.
///
/// The response body is wrapped in a `CountingBody` that counts bytes as they
/// stream from upstream to client. When the body is fully consumed (or dropped),
/// it reports byte counts to the metrics collector and calls `connection_closed`.
async fn build_streaming_response(
&self,
upstream_response: Response<Incoming>,
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
source_ip: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
let (resp_parts, resp_body) = upstream_response.into_parts();
@@ -377,10 +650,23 @@ impl HttpProxyService {
ResponseFilter::apply_headers(route, headers, None);
}
self.metrics.connection_closed(route_id);
// Wrap the response body in CountingBody to track bytes_out.
// CountingBody will report bytes and we close the connection metric
// after the body stream completes (not before it even starts).
let counting_body = CountingBody::new(
resp_body,
Arc::clone(&self.metrics),
route_id.map(|s| s.to_string()),
Some(source_ip.to_string()),
Direction::Out,
);
// Stream the response body directly from upstream to client
let body: BoxBody<Bytes, hyper::Error> = BoxBody::new(resp_body);
// Close the connection metric now — the HTTP request/response cycle is done
// from the proxy's perspective once we hand the streaming body to hyper.
// Bytes will still be counted as they flow.
self.metrics.connection_closed(route_id, Some(source_ip));
let body: BoxBody<Bytes, hyper::Error> = BoxBody::new(counting_body);
Ok(response.body(body).unwrap())
}
@@ -394,6 +680,8 @@ impl HttpProxyService {
route: &rustproxy_config::RouteConfig,
route_id: Option<&str>,
upstream_key: &str,
cancel: CancellationToken,
source_ip: &str,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -409,7 +697,7 @@ impl HttpProxyService {
.unwrap_or("");
if !allowed_origins.is_empty() && !allowed_origins.iter().any(|o| o == "*" || o == origin) {
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::FORBIDDEN, "Origin not allowed"));
}
}
@@ -417,18 +705,49 @@ impl HttpProxyService {
info!("WebSocket upgrade from {} -> {}:{}", peer_addr, upstream.host, upstream.port);
let mut upstream_stream = match TcpStream::connect(
format!("{}:{}", upstream.host, upstream.port)
).await {
Ok(s) => s,
Err(e) => {
error!("WebSocket: failed to connect upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
// Connect to upstream with timeout (TLS if upstream.use_tls is set)
let mut upstream_stream: BackendStream = if upstream.use_tls {
match tokio::time::timeout(
self.connect_timeout,
connect_tls_backend(&upstream.host, upstream.port),
).await {
Ok(Ok(tls)) => BackendStream::Tls(tls),
Ok(Err(e)) => {
error!("WebSocket: failed TLS connect upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend TLS unavailable"));
}
Err(_) => {
error!("WebSocket: upstream TLS connect timeout for {}:{}", upstream.host, upstream.port);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend TLS connect timeout"));
}
}
} else {
match tokio::time::timeout(
self.connect_timeout,
TcpStream::connect(format!("{}:{}", upstream.host, upstream.port)),
).await {
Ok(Ok(s)) => {
s.set_nodelay(true).ok();
BackendStream::Plain(s)
}
Ok(Err(e)) => {
error!("WebSocket: failed to connect upstream {}:{}: {}", upstream.host, upstream.port, e);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend unavailable"));
}
Err(_) => {
error!("WebSocket: upstream connect timeout for {}:{}", upstream.host, upstream.port);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::GATEWAY_TIMEOUT, "Backend connect timeout"));
}
}
};
upstream_stream.set_nodelay(true).ok();
let path = req.uri().path().to_string();
let upstream_path = {
@@ -455,13 +774,44 @@ impl HttpProxyService {
parts.method, upstream_path
);
let upstream_host = format!("{}:{}", upstream.host, upstream.port);
// Copy all original headers (preserving the client's Host header).
// Skip X-Forwarded-* since we set them ourselves below.
for (name, value) in parts.headers.iter() {
if name == hyper::header::HOST {
raw_request.push_str(&format!("host: {}\r\n", upstream_host));
} else {
raw_request.push_str(&format!("{}: {}\r\n", name, value.to_str().unwrap_or("")));
let name_str = name.as_str();
if name_str == "x-forwarded-for"
|| name_str == "x-forwarded-host"
|| name_str == "x-forwarded-proto"
{
continue;
}
raw_request.push_str(&format!("{}: {}\r\n", name, value.to_str().unwrap_or("")));
}
// Add standard reverse-proxy headers (X-Forwarded-*)
{
let original_host = parts.headers.get("host")
.and_then(|h| h.to_str().ok())
.unwrap_or("");
let forwarded_proto = if route.action.tls.as_ref()
.map(|t| matches!(t.mode,
rustproxy_config::TlsMode::Terminate
| rustproxy_config::TlsMode::TerminateAndReencrypt))
.unwrap_or(false)
{
"https"
} else {
"http"
};
let client_ip = peer_addr.ip().to_string();
let xff_value = if let Some(existing) = parts.headers.get("x-forwarded-for") {
format!("{}, {}", existing.to_str().unwrap_or(""), client_ip)
} else {
client_ip
};
raw_request.push_str(&format!("x-forwarded-for: {}\r\n", xff_value));
raw_request.push_str(&format!("x-forwarded-host: {}\r\n", original_host));
raw_request.push_str(&format!("x-forwarded-proto: {}\r\n", forwarded_proto));
}
if let Some(ref route_headers) = route.headers {
@@ -486,7 +836,7 @@ impl HttpProxyService {
if let Err(e) = upstream_stream.write_all(raw_request.as_bytes()).await {
error!("WebSocket: failed to send upgrade request to upstream: {}", e);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend write failed"));
}
@@ -497,7 +847,7 @@ impl HttpProxyService {
Ok(0) => {
error!("WebSocket: upstream closed before completing handshake");
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend closed"));
}
Ok(_) => {
@@ -511,14 +861,14 @@ impl HttpProxyService {
if response_buf.len() > 8192 {
error!("WebSocket: upstream response headers too large");
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend response too large"));
}
}
Err(e) => {
error!("WebSocket: failed to read upstream response: {}", e);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(StatusCode::BAD_GATEWAY, "Backend read failed"));
}
}
@@ -536,7 +886,7 @@ impl HttpProxyService {
if status_code != 101 {
debug!("WebSocket: upstream rejected upgrade with status {}", status_code);
self.upstream_selector.connection_ended(upstream_key);
self.metrics.connection_closed(route_id);
self.metrics.connection_closed(route_id, Some(source_ip));
return Ok(error_response(
StatusCode::from_u16(status_code).unwrap_or(StatusCode::BAD_GATEWAY),
"WebSocket upgrade rejected by backend",
@@ -570,6 +920,7 @@ impl HttpProxyService {
let metrics = Arc::clone(&self.metrics);
let route_id_owned = route_id.map(|s| s.to_string());
let source_ip_owned = source_ip.to_string();
let upstream_selector = self.upstream_selector.clone();
let upstream_key_owned = upstream_key.to_string();
@@ -580,7 +931,7 @@ impl HttpProxyService {
debug!("WebSocket: client upgrade failed: {}", e);
upstream_selector.connection_ended(&upstream_key_owned);
if let Some(ref rid) = route_id_owned {
metrics.connection_closed(Some(rid.as_str()));
metrics.connection_closed(Some(rid.as_str()), Some(&source_ip_owned));
}
return;
}
@@ -591,6 +942,11 @@ impl HttpProxyService {
let (mut cr, mut cw) = tokio::io::split(client_io);
let (mut ur, mut uw) = tokio::io::split(upstream_stream);
// Shared activity tracker for the watchdog
let last_activity = Arc::new(AtomicU64::new(0));
let start = std::time::Instant::now();
let la1 = Arc::clone(&last_activity);
let c2u = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
@@ -603,11 +959,13 @@ impl HttpProxyService {
break;
}
total += n as u64;
la1.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
}
let _ = uw.shutdown().await;
total
});
let la2 = Arc::clone(&last_activity);
let u2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
@@ -620,20 +978,66 @@ impl HttpProxyService {
break;
}
total += n as u64;
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
}
let _ = cw.shutdown().await;
total
});
// Watchdog: monitors inactivity, max lifetime, and cancellation
let la_watch = Arc::clone(&last_activity);
let c2u_handle = c2u.abort_handle();
let u2c_handle = u2c.abort_handle();
let inactivity_timeout = DEFAULT_WS_INACTIVITY_TIMEOUT;
let max_lifetime = DEFAULT_WS_MAX_LIFETIME;
let watchdog = tokio::spawn(async move {
let check_interval = std::time::Duration::from_secs(5);
let mut last_seen = 0u64;
loop {
tokio::select! {
_ = tokio::time::sleep(check_interval) => {}
_ = cancel.cancelled() => {
debug!("WebSocket tunnel cancelled by shutdown");
c2u_handle.abort();
u2c_handle.abort();
break;
}
}
// Check max lifetime
if start.elapsed() >= max_lifetime {
debug!("WebSocket tunnel exceeded max lifetime, closing");
c2u_handle.abort();
u2c_handle.abort();
break;
}
// Check inactivity
let current = la_watch.load(Ordering::Relaxed);
if current == last_seen {
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
debug!("WebSocket tunnel inactive for {}ms, closing", elapsed_since_activity);
c2u_handle.abort();
u2c_handle.abort();
break;
}
}
last_seen = current;
}
});
let bytes_in = c2u.await.unwrap_or(0);
let bytes_out = u2c.await.unwrap_or(0);
watchdog.abort();
debug!("WebSocket tunnel closed: {} bytes in, {} bytes out", bytes_in, bytes_out);
upstream_selector.connection_ended(&upstream_key_owned);
if let Some(ref rid) = route_id_owned {
metrics.record_bytes(bytes_in, bytes_out, Some(rid.as_str()));
metrics.connection_closed(Some(rid.as_str()));
metrics.record_bytes(bytes_in, bytes_out, Some(rid.as_str()), Some(&source_ip_owned));
metrics.connection_closed(Some(rid.as_str()), Some(&source_ip_owned));
}
});
@@ -812,6 +1216,7 @@ impl Default for HttpProxyService {
route_manager: Arc::new(RouteManager::new(vec![])),
metrics: Arc::new(MetricsCollector::new()),
upstream_selector: UpstreamSelector::new(),
connect_timeout: DEFAULT_CONNECT_TIMEOUT,
}
}
}

View File

@@ -1,6 +1,9 @@
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Mutex;
use crate::throughput::{ThroughputSample, ThroughputTracker};
/// Aggregated metrics snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -12,7 +15,14 @@ pub struct Metrics {
pub bytes_out: u64,
pub throughput_in_bytes_per_sec: u64,
pub throughput_out_bytes_per_sec: u64,
pub throughput_recent_in_bytes_per_sec: u64,
pub throughput_recent_out_bytes_per_sec: u64,
pub routes: std::collections::HashMap<String, RouteMetrics>,
pub ips: std::collections::HashMap<String, IpMetrics>,
pub throughput_history: Vec<ThroughputSample>,
pub total_http_requests: u64,
pub http_requests_per_sec: u64,
pub http_requests_per_sec_recent: u64,
}
/// Per-route metrics.
@@ -25,6 +35,20 @@ pub struct RouteMetrics {
pub bytes_out: u64,
pub throughput_in_bytes_per_sec: u64,
pub throughput_out_bytes_per_sec: u64,
pub throughput_recent_in_bytes_per_sec: u64,
pub throughput_recent_out_bytes_per_sec: u64,
}
/// Per-IP metrics.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct IpMetrics {
pub active_connections: u64,
pub total_connections: u64,
pub bytes_in: u64,
pub bytes_out: u64,
pub throughput_in_bytes_per_sec: u64,
pub throughput_out_bytes_per_sec: u64,
}
/// Statistics snapshot.
@@ -38,7 +62,18 @@ pub struct Statistics {
pub uptime_seconds: u64,
}
/// Default retention for throughput samples (1 hour).
const DEFAULT_RETENTION_SECONDS: usize = 3600;
/// Maximum number of IPs to include in a snapshot (top by active connections).
const MAX_IPS_IN_SNAPSHOT: usize = 100;
/// Metrics collector tracking connections and throughput.
///
/// Design: The hot path (`record_bytes`) is entirely lock-free — it only touches
/// `AtomicU64` counters. The cold path (`sample_all`, called at 1Hz) drains
/// those atomics and feeds the throughput trackers under a Mutex. This avoids
/// contention when `record_bytes` is called per-chunk in the TCP copy loop.
pub struct MetricsCollector {
active_connections: AtomicU64,
total_connections: AtomicU64,
@@ -51,10 +86,38 @@ pub struct MetricsCollector {
/// Per-route byte counters
route_bytes_in: DashMap<String, AtomicU64>,
route_bytes_out: DashMap<String, AtomicU64>,
// ── Per-IP tracking ──
ip_connections: DashMap<String, AtomicU64>,
ip_total_connections: DashMap<String, AtomicU64>,
ip_bytes_in: DashMap<String, AtomicU64>,
ip_bytes_out: DashMap<String, AtomicU64>,
ip_pending_tp: DashMap<String, (AtomicU64, AtomicU64)>,
ip_throughput: DashMap<String, Mutex<ThroughputTracker>>,
// ── HTTP request tracking ──
total_http_requests: AtomicU64,
pending_http_requests: AtomicU64,
http_request_throughput: Mutex<ThroughputTracker>,
// ── Lock-free pending throughput counters (hot path) ──
global_pending_tp_in: AtomicU64,
global_pending_tp_out: AtomicU64,
route_pending_tp: DashMap<String, (AtomicU64, AtomicU64)>,
// ── Throughput history — only locked during sampling (cold path) ──
global_throughput: Mutex<ThroughputTracker>,
route_throughput: DashMap<String, Mutex<ThroughputTracker>>,
retention_seconds: usize,
}
impl MetricsCollector {
pub fn new() -> Self {
Self::with_retention(DEFAULT_RETENTION_SECONDS)
}
/// Create a MetricsCollector with a custom retention period for throughput history.
pub fn with_retention(retention_seconds: usize) -> Self {
Self {
active_connections: AtomicU64::new(0),
total_connections: AtomicU64::new(0),
@@ -64,11 +127,26 @@ impl MetricsCollector {
route_total_connections: DashMap::new(),
route_bytes_in: DashMap::new(),
route_bytes_out: DashMap::new(),
ip_connections: DashMap::new(),
ip_total_connections: DashMap::new(),
ip_bytes_in: DashMap::new(),
ip_bytes_out: DashMap::new(),
ip_pending_tp: DashMap::new(),
ip_throughput: DashMap::new(),
total_http_requests: AtomicU64::new(0),
pending_http_requests: AtomicU64::new(0),
http_request_throughput: Mutex::new(ThroughputTracker::new(retention_seconds)),
global_pending_tp_in: AtomicU64::new(0),
global_pending_tp_out: AtomicU64::new(0),
route_pending_tp: DashMap::new(),
global_throughput: Mutex::new(ThroughputTracker::new(retention_seconds)),
route_throughput: DashMap::new(),
retention_seconds,
}
}
/// Record a new connection.
pub fn connection_opened(&self, route_id: Option<&str>) {
pub fn connection_opened(&self, route_id: Option<&str>, source_ip: Option<&str>) {
self.active_connections.fetch_add(1, Ordering::Relaxed);
self.total_connections.fetch_add(1, Ordering::Relaxed);
@@ -82,10 +160,21 @@ impl MetricsCollector {
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
if let Some(ip) = source_ip {
self.ip_connections
.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
self.ip_total_connections
.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(1, Ordering::Relaxed);
}
}
/// Record a connection closing.
pub fn connection_closed(&self, route_id: Option<&str>) {
pub fn connection_closed(&self, route_id: Option<&str>, source_ip: Option<&str>) {
self.active_connections.fetch_sub(1, Ordering::Relaxed);
if let Some(route_id) = route_id {
@@ -96,13 +185,34 @@ impl MetricsCollector {
}
}
}
if let Some(ip) = source_ip {
if let Some(counter) = self.ip_connections.get(ip) {
let val = counter.load(Ordering::Relaxed);
if val > 0 {
counter.fetch_sub(1, Ordering::Relaxed);
}
// Clean up zero-count entries to prevent memory growth
if val <= 1 {
drop(counter);
self.ip_connections.remove(ip);
}
}
}
}
/// Record bytes transferred.
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>) {
/// Record bytes transferred (lock-free hot path).
///
/// Called per-chunk in the TCP copy loop. Only touches AtomicU64 counters —
/// no Mutex is taken. The throughput trackers are fed during `sample_all()`.
pub fn record_bytes(&self, bytes_in: u64, bytes_out: u64, route_id: Option<&str>, source_ip: Option<&str>) {
self.total_bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
self.total_bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into lock-free pending throughput counters
self.global_pending_tp_in.fetch_add(bytes_in, Ordering::Relaxed);
self.global_pending_tp_out.fetch_add(bytes_out, Ordering::Relaxed);
if let Some(route_id) = route_id {
self.route_bytes_in
.entry(route_id.to_string())
@@ -112,6 +222,123 @@ impl MetricsCollector {
.entry(route_id.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into per-route pending throughput counters (lock-free)
let entry = self.route_pending_tp
.entry(route_id.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
}
if let Some(ip) = source_ip {
self.ip_bytes_in
.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_in, Ordering::Relaxed);
self.ip_bytes_out
.entry(ip.to_string())
.or_insert_with(|| AtomicU64::new(0))
.fetch_add(bytes_out, Ordering::Relaxed);
// Accumulate into per-IP pending throughput counters (lock-free)
let entry = self.ip_pending_tp
.entry(ip.to_string())
.or_insert_with(|| (AtomicU64::new(0), AtomicU64::new(0)));
entry.0.fetch_add(bytes_in, Ordering::Relaxed);
entry.1.fetch_add(bytes_out, Ordering::Relaxed);
}
}
/// Record an HTTP request (called once per request in the HTTP proxy).
pub fn record_http_request(&self) {
self.total_http_requests.fetch_add(1, Ordering::Relaxed);
self.pending_http_requests.fetch_add(1, Ordering::Relaxed);
}
/// Take a throughput sample on all trackers (cold path, call at 1Hz or configured interval).
///
/// Drains the lock-free pending counters and feeds the accumulated bytes
/// into the throughput trackers (under Mutex). This is the only place
/// the Mutex is locked.
pub fn sample_all(&self) {
// Drain global pending bytes and feed into the tracker
let global_in = self.global_pending_tp_in.swap(0, Ordering::Relaxed);
let global_out = self.global_pending_tp_out.swap(0, Ordering::Relaxed);
if let Ok(mut tracker) = self.global_throughput.lock() {
tracker.record_bytes(global_in, global_out);
tracker.sample();
}
// Drain per-route pending bytes; collect into a Vec to avoid holding DashMap shards
let mut route_samples: Vec<(String, u64, u64)> = Vec::new();
for entry in self.route_pending_tp.iter() {
let route_id = entry.key().clone();
let pending_in = entry.value().0.swap(0, Ordering::Relaxed);
let pending_out = entry.value().1.swap(0, Ordering::Relaxed);
route_samples.push((route_id, pending_in, pending_out));
}
// Feed pending bytes into route trackers and sample
let retention = self.retention_seconds;
for (route_id, pending_in, pending_out) in &route_samples {
// Ensure the tracker exists
self.route_throughput
.entry(route_id.clone())
.or_insert_with(|| Mutex::new(ThroughputTracker::new(retention)));
// Now get a separate ref and lock it
if let Some(tracker_ref) = self.route_throughput.get(route_id) {
if let Ok(mut tracker) = tracker_ref.value().lock() {
tracker.record_bytes(*pending_in, *pending_out);
tracker.sample();
}
}
}
// Also sample any route trackers that had no new pending bytes
// (to keep their sample window advancing)
for entry in self.route_throughput.iter() {
if !self.route_pending_tp.contains_key(entry.key()) {
if let Ok(mut tracker) = entry.value().lock() {
tracker.sample();
}
}
}
// Drain per-IP pending bytes and feed into IP throughput trackers
let mut ip_samples: Vec<(String, u64, u64)> = Vec::new();
for entry in self.ip_pending_tp.iter() {
let ip = entry.key().clone();
let pending_in = entry.value().0.swap(0, Ordering::Relaxed);
let pending_out = entry.value().1.swap(0, Ordering::Relaxed);
ip_samples.push((ip, pending_in, pending_out));
}
for (ip, pending_in, pending_out) in &ip_samples {
self.ip_throughput
.entry(ip.clone())
.or_insert_with(|| Mutex::new(ThroughputTracker::new(retention)));
if let Some(tracker_ref) = self.ip_throughput.get(ip) {
if let Ok(mut tracker) = tracker_ref.value().lock() {
tracker.record_bytes(*pending_in, *pending_out);
tracker.sample();
}
}
}
// Sample idle IP trackers
for entry in self.ip_throughput.iter() {
if !self.ip_pending_tp.contains_key(entry.key()) {
if let Ok(mut tracker) = entry.value().lock() {
tracker.sample();
}
}
}
// Drain pending HTTP request count and feed into HTTP throughput tracker
let pending_reqs = self.pending_http_requests.swap(0, Ordering::Relaxed);
if let Ok(mut tracker) = self.http_request_throughput.lock() {
// Use bytes_in field to track request count (each request = 1 "byte")
tracker.record_bytes(pending_reqs, 0);
tracker.sample();
}
}
@@ -135,10 +362,22 @@ impl MetricsCollector {
self.total_bytes_out.load(Ordering::Relaxed)
}
/// Get a full metrics snapshot including per-route data.
/// Get a full metrics snapshot including per-route and per-IP data.
pub fn snapshot(&self) -> Metrics {
let mut routes = std::collections::HashMap::new();
// Get global throughput (instant = last 1 sample, recent = last 10 samples)
let (global_tp_in, global_tp_out, global_recent_in, global_recent_out, throughput_history) =
self.global_throughput
.lock()
.map(|t| {
let (i_in, i_out) = t.instant();
let (r_in, r_out) = t.recent();
let history = t.history(60);
(i_in, i_out, r_in, r_out, history)
})
.unwrap_or((0, 0, 0, 0, Vec::new()));
// Collect per-route metrics
for entry in self.route_total_connections.iter() {
let route_id = entry.key().clone();
@@ -156,24 +395,92 @@ impl MetricsCollector {
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let (route_tp_in, route_tp_out, route_recent_in, route_recent_out) = self.route_throughput
.get(&route_id)
.and_then(|entry| entry.value().lock().ok().map(|t| {
let (i_in, i_out) = t.instant();
let (r_in, r_out) = t.recent();
(i_in, i_out, r_in, r_out)
}))
.unwrap_or((0, 0, 0, 0));
routes.insert(route_id, RouteMetrics {
active_connections: active,
total_connections: total,
bytes_in,
bytes_out,
throughput_in_bytes_per_sec: 0,
throughput_out_bytes_per_sec: 0,
throughput_in_bytes_per_sec: route_tp_in,
throughput_out_bytes_per_sec: route_tp_out,
throughput_recent_in_bytes_per_sec: route_recent_in,
throughput_recent_out_bytes_per_sec: route_recent_out,
});
}
// Collect per-IP metrics — only IPs with active connections or total > 0,
// capped at top MAX_IPS_IN_SNAPSHOT sorted by active count
let mut ip_entries: Vec<(String, u64, u64, u64, u64, u64, u64)> = Vec::new();
for entry in self.ip_total_connections.iter() {
let ip = entry.key().clone();
let total = entry.value().load(Ordering::Relaxed);
let active = self.ip_connections
.get(&ip)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let bytes_in = self.ip_bytes_in
.get(&ip)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let bytes_out = self.ip_bytes_out
.get(&ip)
.map(|c| c.load(Ordering::Relaxed))
.unwrap_or(0);
let (tp_in, tp_out) = self.ip_throughput
.get(&ip)
.and_then(|entry| entry.value().lock().ok().map(|t| t.instant()))
.unwrap_or((0, 0));
ip_entries.push((ip, active, total, bytes_in, bytes_out, tp_in, tp_out));
}
// Sort by active connections descending, then cap
ip_entries.sort_by(|a, b| b.1.cmp(&a.1));
ip_entries.truncate(MAX_IPS_IN_SNAPSHOT);
let mut ips = std::collections::HashMap::new();
for (ip, active, total, bytes_in, bytes_out, tp_in, tp_out) in ip_entries {
ips.insert(ip, IpMetrics {
active_connections: active,
total_connections: total,
bytes_in,
bytes_out,
throughput_in_bytes_per_sec: tp_in,
throughput_out_bytes_per_sec: tp_out,
});
}
// HTTP request rates
let (http_rps, http_rps_recent) = self.http_request_throughput
.lock()
.map(|t| {
let (instant, _) = t.instant();
let (recent, _) = t.recent();
(instant, recent)
})
.unwrap_or((0, 0));
Metrics {
active_connections: self.active_connections(),
total_connections: self.total_connections(),
bytes_in: self.total_bytes_in(),
bytes_out: self.total_bytes_out(),
throughput_in_bytes_per_sec: 0,
throughput_out_bytes_per_sec: 0,
throughput_in_bytes_per_sec: global_tp_in,
throughput_out_bytes_per_sec: global_tp_out,
throughput_recent_in_bytes_per_sec: global_recent_in,
throughput_recent_out_bytes_per_sec: global_recent_out,
routes,
ips,
throughput_history,
total_http_requests: self.total_http_requests.load(Ordering::Relaxed),
http_requests_per_sec: http_rps,
http_requests_per_sec_recent: http_rps_recent,
}
}
}
@@ -198,10 +505,10 @@ mod tests {
#[test]
fn test_connection_opened_increments() {
let collector = MetricsCollector::new();
collector.connection_opened(None);
collector.connection_opened(None, None);
assert_eq!(collector.active_connections(), 1);
assert_eq!(collector.total_connections(), 1);
collector.connection_opened(None);
collector.connection_opened(None, None);
assert_eq!(collector.active_connections(), 2);
assert_eq!(collector.total_connections(), 2);
}
@@ -209,10 +516,10 @@ mod tests {
#[test]
fn test_connection_closed_decrements() {
let collector = MetricsCollector::new();
collector.connection_opened(None);
collector.connection_opened(None);
collector.connection_opened(None, None);
collector.connection_opened(None, None);
assert_eq!(collector.active_connections(), 2);
collector.connection_closed(None);
collector.connection_closed(None, None);
assert_eq!(collector.active_connections(), 1);
// total_connections should stay at 2
assert_eq!(collector.total_connections(), 2);
@@ -221,23 +528,23 @@ mod tests {
#[test]
fn test_route_specific_tracking() {
let collector = MetricsCollector::new();
collector.connection_opened(Some("route-a"));
collector.connection_opened(Some("route-a"));
collector.connection_opened(Some("route-b"));
collector.connection_opened(Some("route-a"), None);
collector.connection_opened(Some("route-a"), None);
collector.connection_opened(Some("route-b"), None);
assert_eq!(collector.active_connections(), 3);
assert_eq!(collector.total_connections(), 3);
collector.connection_closed(Some("route-a"));
collector.connection_closed(Some("route-a"), None);
assert_eq!(collector.active_connections(), 2);
}
#[test]
fn test_record_bytes() {
let collector = MetricsCollector::new();
collector.record_bytes(100, 200, Some("route-a"));
collector.record_bytes(50, 75, Some("route-a"));
collector.record_bytes(25, 30, None);
collector.record_bytes(100, 200, Some("route-a"), None);
collector.record_bytes(50, 75, Some("route-a"), None);
collector.record_bytes(25, 30, None, None);
let total_in = collector.total_bytes_in.load(Ordering::Relaxed);
let total_out = collector.total_bytes_out.load(Ordering::Relaxed);
@@ -248,4 +555,114 @@ mod tests {
let route_in = collector.route_bytes_in.get("route-a").unwrap();
assert_eq!(route_in.load(Ordering::Relaxed), 150);
}
#[test]
fn test_throughput_tracking() {
let collector = MetricsCollector::with_retention(60);
// Open a connection so the route appears in the snapshot
collector.connection_opened(Some("route-a"), None);
// Record some bytes
collector.record_bytes(1000, 2000, Some("route-a"), None);
collector.record_bytes(500, 750, None, None);
// Take a sample (simulates the 1Hz tick)
collector.sample_all();
// Check global throughput
let snapshot = collector.snapshot();
assert_eq!(snapshot.throughput_in_bytes_per_sec, 1500);
assert_eq!(snapshot.throughput_out_bytes_per_sec, 2750);
// Check per-route throughput
let route_a = snapshot.routes.get("route-a").unwrap();
assert_eq!(route_a.throughput_in_bytes_per_sec, 1000);
assert_eq!(route_a.throughput_out_bytes_per_sec, 2000);
}
#[test]
fn test_throughput_zero_before_sampling() {
let collector = MetricsCollector::with_retention(60);
collector.record_bytes(1000, 2000, None, None);
// Without sampling, throughput should be 0
let snapshot = collector.snapshot();
assert_eq!(snapshot.throughput_in_bytes_per_sec, 0);
assert_eq!(snapshot.throughput_out_bytes_per_sec, 0);
}
#[test]
fn test_per_ip_tracking() {
let collector = MetricsCollector::with_retention(60);
collector.connection_opened(Some("route-a"), Some("1.2.3.4"));
collector.connection_opened(Some("route-a"), Some("1.2.3.4"));
collector.connection_opened(Some("route-b"), Some("5.6.7.8"));
// Check IP active connections (drop DashMap refs immediately to avoid deadlock)
assert_eq!(
collector.ip_connections.get("1.2.3.4").unwrap().load(Ordering::Relaxed),
2
);
assert_eq!(
collector.ip_connections.get("5.6.7.8").unwrap().load(Ordering::Relaxed),
1
);
// Record bytes per IP
collector.record_bytes(100, 200, Some("route-a"), Some("1.2.3.4"));
collector.record_bytes(300, 400, Some("route-b"), Some("5.6.7.8"));
collector.sample_all();
let snapshot = collector.snapshot();
assert_eq!(snapshot.ips.len(), 2);
let ip1_metrics = snapshot.ips.get("1.2.3.4").unwrap();
assert_eq!(ip1_metrics.active_connections, 2);
assert_eq!(ip1_metrics.bytes_in, 100);
// Close connections
collector.connection_closed(Some("route-a"), Some("1.2.3.4"));
assert_eq!(
collector.ip_connections.get("1.2.3.4").unwrap().load(Ordering::Relaxed),
1
);
// Close last connection for IP — should be cleaned up
collector.connection_closed(Some("route-a"), Some("1.2.3.4"));
assert!(collector.ip_connections.get("1.2.3.4").is_none());
}
#[test]
fn test_http_request_tracking() {
let collector = MetricsCollector::with_retention(60);
collector.record_http_request();
collector.record_http_request();
collector.record_http_request();
assert_eq!(collector.total_http_requests.load(Ordering::Relaxed), 3);
collector.sample_all();
let snapshot = collector.snapshot();
assert_eq!(snapshot.total_http_requests, 3);
assert_eq!(snapshot.http_requests_per_sec, 3);
}
#[test]
fn test_throughput_history_in_snapshot() {
let collector = MetricsCollector::with_retention(60);
for i in 1..=5 {
collector.record_bytes(i * 100, i * 200, None, None);
collector.sample_all();
}
let snapshot = collector.snapshot();
assert_eq!(snapshot.throughput_history.len(), 5);
// History should be chronological (oldest first)
assert_eq!(snapshot.throughput_history[0].bytes_in, 100);
assert_eq!(snapshot.throughput_history[4].bytes_in, 500);
}
}

View File

@@ -1,8 +1,10 @@
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Instant, SystemTime, UNIX_EPOCH};
/// A single throughput sample.
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ThroughputSample {
pub timestamp_ms: u64,
pub bytes_in: u64,
@@ -106,6 +108,27 @@ impl ThroughputTracker {
self.throughput(10)
}
/// Return the last N samples in chronological order (oldest first).
pub fn history(&self, window_seconds: usize) -> Vec<ThroughputSample> {
let window = window_seconds.min(self.count);
if window == 0 {
return Vec::new();
}
let mut result = Vec::with_capacity(window);
for i in 0..window {
let idx = if self.write_index >= i + 1 {
self.write_index - i - 1
} else {
self.capacity - (i + 1 - self.write_index)
};
if idx < self.samples.len() {
result.push(self.samples[idx]);
}
}
result.reverse(); // Return oldest-first (chronological)
result
}
/// How long this tracker has been alive.
pub fn uptime(&self) -> std::time::Duration {
self.created_at.elapsed()
@@ -170,4 +193,40 @@ mod tests {
std::thread::sleep(std::time::Duration::from_millis(10));
assert!(tracker.uptime().as_millis() >= 10);
}
#[test]
fn test_history_returns_chronological() {
let mut tracker = ThroughputTracker::new(60);
for i in 1..=5 {
tracker.record_bytes(i * 100, i * 200);
tracker.sample();
}
let history = tracker.history(5);
assert_eq!(history.len(), 5);
// First sample should have 100 bytes_in, last should have 500
assert_eq!(history[0].bytes_in, 100);
assert_eq!(history[4].bytes_in, 500);
}
#[test]
fn test_history_wraps_around() {
let mut tracker = ThroughputTracker::new(3); // Small capacity
for i in 1..=5 {
tracker.record_bytes(i * 100, i * 200);
tracker.sample();
}
// Only last 3 should be retained
let history = tracker.history(10); // Ask for more than available
assert_eq!(history.len(), 3);
assert_eq!(history[0].bytes_in, 300);
assert_eq!(history[1].bytes_in, 400);
assert_eq!(history[2].bytes_in, 500);
}
#[test]
fn test_history_empty() {
let tracker = ThroughputTracker::new(60);
let history = tracker.history(10);
assert!(history.is_empty());
}
}

View File

@@ -5,13 +5,14 @@ use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use tracing::debug;
use super::connection_record::ConnectionRecord;
use rustproxy_metrics::MetricsCollector;
/// Statistics for a forwarded connection.
#[derive(Debug, Default)]
pub struct ForwardStats {
pub bytes_in: AtomicU64,
pub bytes_out: AtomicU64,
/// Context for forwarding metrics, replacing the growing tuple pattern.
#[derive(Clone)]
pub struct ForwardMetricsCtx {
pub collector: Arc<MetricsCollector>,
pub route_id: Option<String>,
pub source_ip: Option<String>,
}
/// Perform bidirectional TCP forwarding between client and backend.
@@ -68,6 +69,10 @@ pub async fn forward_bidirectional(
/// Perform bidirectional TCP forwarding with inactivity and max lifetime timeouts.
///
/// When `metrics` is provided, bytes are reported to the MetricsCollector
/// per-chunk (lock-free) as they flow through the copy loops, enabling
/// real-time throughput sampling for long-lived connections.
///
/// Returns (bytes_from_client, bytes_from_backend) when the connection closes or times out.
pub async fn forward_bidirectional_with_timeouts(
client: TcpStream,
@@ -76,10 +81,14 @@ pub async fn forward_bidirectional_with_timeouts(
inactivity_timeout: std::time::Duration,
max_lifetime: std::time::Duration,
cancel: CancellationToken,
metrics: Option<ForwardMetricsCtx>,
) -> std::io::Result<(u64, u64)> {
// Send initial data (peeked bytes) to backend
if let Some(data) = initial_data {
backend.write_all(data).await?;
if let Some(ref ctx) = metrics {
ctx.collector.record_bytes(data.len() as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let (mut client_read, mut client_write) = client.into_split();
@@ -90,6 +99,7 @@ pub async fn forward_bidirectional_with_timeouts(
let la1 = Arc::clone(&last_activity);
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
let metrics_c2b = metrics.clone();
let c2b = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = initial_len;
@@ -103,12 +113,16 @@ pub async fn forward_bidirectional_with_timeouts(
}
total += n as u64;
la1.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some(ref ctx) = metrics_c2b {
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = backend_write.shutdown().await;
total
});
let la2 = Arc::clone(&last_activity);
let metrics_b2c = metrics;
let b2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
@@ -122,6 +136,9 @@ pub async fn forward_bidirectional_with_timeouts(
}
total += n as u64;
la2.store(start.elapsed().as_millis() as u64, Ordering::Relaxed);
if let Some(ref ctx) = metrics_b2c {
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = client_write.shutdown().await;
total
@@ -173,153 +190,3 @@ pub async fn forward_bidirectional_with_timeouts(
watchdog.abort();
Ok((bytes_in, bytes_out))
}
/// Forward bidirectional with a callback for byte counting.
pub async fn forward_bidirectional_with_stats(
client: TcpStream,
backend: TcpStream,
initial_data: Option<&[u8]>,
stats: Arc<ForwardStats>,
) -> std::io::Result<()> {
let (bytes_in, bytes_out) = forward_bidirectional(client, backend, initial_data).await?;
stats.bytes_in.fetch_add(bytes_in, Ordering::Relaxed);
stats.bytes_out.fetch_add(bytes_out, Ordering::Relaxed);
Ok(())
}
/// Perform bidirectional TCP forwarding with inactivity / lifetime timeouts,
/// updating a `ConnectionRecord` with byte counts and activity timestamps
/// in real time for zombie detection.
///
/// When `record` is `None`, this behaves identically to
/// `forward_bidirectional_with_timeouts`.
///
/// The record's `client_closed` / `backend_closed` flags are set when the
/// respective copy loop terminates, giving the zombie scanner visibility
/// into half-open connections.
pub async fn forward_bidirectional_with_record(
client: TcpStream,
mut backend: TcpStream,
initial_data: Option<&[u8]>,
inactivity_timeout: std::time::Duration,
max_lifetime: std::time::Duration,
cancel: CancellationToken,
record: Option<Arc<ConnectionRecord>>,
) -> std::io::Result<(u64, u64)> {
// Send initial data (peeked bytes) to backend
if let Some(data) = initial_data {
backend.write_all(data).await?;
if let Some(ref r) = record {
r.record_bytes_in(data.len() as u64);
}
}
let (mut client_read, mut client_write) = client.into_split();
let (mut backend_read, mut backend_write) = backend.into_split();
let last_activity = Arc::new(AtomicU64::new(0));
let start = std::time::Instant::now();
let la1 = Arc::clone(&last_activity);
let initial_len = initial_data.map_or(0u64, |d| d.len() as u64);
let rec1 = record.clone();
let c2b = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = initial_len;
loop {
let n = match client_read.read(&mut buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
};
if backend_write.write_all(&buf[..n]).await.is_err() {
break;
}
total += n as u64;
let now_ms = start.elapsed().as_millis() as u64;
la1.store(now_ms, Ordering::Relaxed);
if let Some(ref r) = rec1 {
r.record_bytes_in(n as u64);
}
}
let _ = backend_write.shutdown().await;
// Mark client side as closed
if let Some(ref r) = rec1 {
r.client_closed.store(true, Ordering::Relaxed);
}
total
});
let la2 = Arc::clone(&last_activity);
let rec2 = record.clone();
let b2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
loop {
let n = match backend_read.read(&mut buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
};
if client_write.write_all(&buf[..n]).await.is_err() {
break;
}
total += n as u64;
let now_ms = start.elapsed().as_millis() as u64;
la2.store(now_ms, Ordering::Relaxed);
if let Some(ref r) = rec2 {
r.record_bytes_out(n as u64);
}
}
let _ = client_write.shutdown().await;
// Mark backend side as closed
if let Some(ref r) = rec2 {
r.backend_closed.store(true, Ordering::Relaxed);
}
total
});
// Watchdog: inactivity, max lifetime, and cancellation
let la_watch = Arc::clone(&last_activity);
let c2b_handle = c2b.abort_handle();
let b2c_handle = b2c.abort_handle();
let watchdog = tokio::spawn(async move {
let check_interval = std::time::Duration::from_secs(5);
let mut last_seen = 0u64;
loop {
tokio::select! {
_ = cancel.cancelled() => {
debug!("Connection cancelled by shutdown");
c2b_handle.abort();
b2c_handle.abort();
break;
}
_ = tokio::time::sleep(check_interval) => {
// Check max lifetime
if start.elapsed() >= max_lifetime {
debug!("Connection exceeded max lifetime, closing");
c2b_handle.abort();
b2c_handle.abort();
break;
}
// Check inactivity
let current = la_watch.load(Ordering::Relaxed);
if current == last_seen {
let elapsed_since_activity = start.elapsed().as_millis() as u64 - current;
if elapsed_since_activity >= inactivity_timeout.as_millis() as u64 {
debug!("Connection inactive for {}ms, closing", elapsed_since_activity);
c2b_handle.abort();
b2c_handle.abort();
break;
}
}
last_seen = current;
}
}
}
});
let bytes_in = c2b.await.unwrap_or(0);
let bytes_out = b2c.await.unwrap_or(0);
watchdog.abort();
Ok((bytes_in, bytes_out))
}

View File

@@ -146,6 +146,41 @@ pub fn is_tls(data: &[u8]) -> bool {
data.len() >= 3 && data[0] == 0x16 && data[1] == 0x03
}
/// Extract the HTTP request path from initial data.
/// E.g., from "GET /foo/bar HTTP/1.1\r\n..." returns Some("/foo/bar").
pub fn extract_http_path(data: &[u8]) -> Option<String> {
let text = std::str::from_utf8(data).ok()?;
// Find first space (after method)
let method_end = text.find(' ')?;
let rest = &text[method_end + 1..];
// Find end of path (next space before "HTTP/...")
let path_end = rest.find(' ').unwrap_or(rest.len());
let path = &rest[..path_end];
// Strip query string for path matching
let path = path.split('?').next().unwrap_or(path);
if path.starts_with('/') {
Some(path.to_string())
} else {
None
}
}
/// Extract the HTTP Host header from initial data.
/// E.g., from "GET / HTTP/1.1\r\nHost: example.com\r\n..." returns Some("example.com").
pub fn extract_http_host(data: &[u8]) -> Option<String> {
let text = std::str::from_utf8(data).ok()?;
for line in text.split("\r\n") {
if let Some(value) = line.strip_prefix("Host: ").or_else(|| line.strip_prefix("host: ")) {
// Strip port if present
let host = value.split(':').next().unwrap_or(value).trim();
if !host.is_empty() {
return Some(host.to_lowercase());
}
}
}
None
}
/// Check if the initial bytes look like HTTP.
pub fn is_http(data: &[u8]) -> bool {
if data.len() < 4 {

View File

@@ -1,10 +1,13 @@
use std::collections::HashMap;
use std::sync::Arc;
use arc_swap::ArcSwap;
use tokio::net::TcpListener;
use tokio_rustls::TlsAcceptor;
use tokio_util::sync::CancellationToken;
use tracing::{info, error, debug, warn};
use thiserror::Error;
use rustproxy_config::RouteActionType;
use rustproxy_routing::RouteManager;
use rustproxy_metrics::MetricsCollector;
use rustproxy_http::HttpProxyService;
@@ -13,6 +16,40 @@ use crate::forwarder;
use crate::tls_handler;
use crate::connection_tracker::ConnectionTracker;
/// RAII guard that decrements the active connection metric on drop.
/// Ensures connection_closed is called on ALL exit paths — normal, error, or panic.
struct ConnectionGuard {
metrics: Arc<MetricsCollector>,
route_id: Option<String>,
source_ip: Option<String>,
disarmed: bool,
}
impl ConnectionGuard {
fn new(metrics: Arc<MetricsCollector>, route_id: Option<&str>, source_ip: Option<&str>) -> Self {
Self {
metrics,
route_id: route_id.map(|s| s.to_string()),
source_ip: source_ip.map(|s| s.to_string()),
disarmed: false,
}
}
/// Disarm the guard — prevents the Drop from running.
/// Use when handing off to a path that manages its own cleanup (e.g., HTTP proxy).
fn disarm(mut self) {
self.disarmed = true;
}
}
impl Drop for ConnectionGuard {
fn drop(&mut self) {
if !self.disarmed {
self.metrics.connection_closed(self.route_id.as_deref(), self.source_ip.as_deref());
}
}
}
#[derive(Debug, Error)]
pub enum ListenerError {
#[error("Failed to bind port {port}: {source}")]
@@ -82,12 +119,14 @@ impl Default for ConnectionConfig {
pub struct TcpListenerManager {
/// Active listeners indexed by port
listeners: HashMap<u16, tokio::task::JoinHandle<()>>,
/// Shared route manager
route_manager: Arc<RouteManager>,
/// Shared route manager (ArcSwap for hot-reload visibility in accept loops)
route_manager: Arc<ArcSwap<RouteManager>>,
/// Shared metrics collector
metrics: Arc<MetricsCollector>,
/// TLS acceptors indexed by domain
tls_configs: Arc<HashMap<String, TlsCertConfig>>,
/// Raw PEM TLS configs indexed by domain (kept for fallback with custom TLS versions)
tls_configs: Arc<ArcSwap<HashMap<String, TlsCertConfig>>>,
/// Shared TLS acceptor (pre-parsed certs + session cache). None when no certs configured.
shared_tls_acceptor: Arc<ArcSwap<Option<TlsAcceptor>>>,
/// HTTP proxy service for HTTP-level forwarding
http_proxy: Arc<HttpProxyService>,
/// Connection configuration
@@ -96,52 +135,60 @@ pub struct TcpListenerManager {
conn_tracker: Arc<ConnectionTracker>,
/// Cancellation token for graceful shutdown
cancel_token: CancellationToken,
/// Path to Unix domain socket for relaying socket-handler connections to TypeScript.
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
}
impl TcpListenerManager {
pub fn new(route_manager: Arc<RouteManager>) -> Self {
let metrics = Arc::new(MetricsCollector::new());
let http_proxy = Arc::new(HttpProxyService::new(
let conn_config = ConnectionConfig::default();
let http_proxy = Arc::new(HttpProxyService::with_connect_timeout(
Arc::clone(&route_manager),
Arc::clone(&metrics),
std::time::Duration::from_millis(conn_config.connection_timeout_ms),
));
let conn_config = ConnectionConfig::default();
let conn_tracker = Arc::new(ConnectionTracker::new(
conn_config.max_connections_per_ip,
conn_config.connection_rate_limit_per_minute,
));
Self {
listeners: HashMap::new(),
route_manager,
route_manager: Arc::new(ArcSwap::from(route_manager)),
metrics,
tls_configs: Arc::new(HashMap::new()),
tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))),
shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))),
http_proxy,
conn_config: Arc::new(conn_config),
conn_tracker,
cancel_token: CancellationToken::new(),
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
}
}
/// Create with a metrics collector.
pub fn with_metrics(route_manager: Arc<RouteManager>, metrics: Arc<MetricsCollector>) -> Self {
let http_proxy = Arc::new(HttpProxyService::new(
let conn_config = ConnectionConfig::default();
let http_proxy = Arc::new(HttpProxyService::with_connect_timeout(
Arc::clone(&route_manager),
Arc::clone(&metrics),
std::time::Duration::from_millis(conn_config.connection_timeout_ms),
));
let conn_config = ConnectionConfig::default();
let conn_tracker = Arc::new(ConnectionTracker::new(
conn_config.max_connections_per_ip,
conn_config.connection_rate_limit_per_minute,
));
Self {
listeners: HashMap::new(),
route_manager,
route_manager: Arc::new(ArcSwap::from(route_manager)),
metrics,
tls_configs: Arc::new(HashMap::new()),
tls_configs: Arc::new(ArcSwap::from(Arc::new(HashMap::new()))),
shared_tls_acceptor: Arc::new(ArcSwap::from(Arc::new(None))),
http_proxy,
conn_config: Arc::new(conn_config),
conn_tracker,
cancel_token: CancellationToken::new(),
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
}
}
@@ -155,8 +202,33 @@ impl TcpListenerManager {
}
/// Set TLS certificate configurations.
pub fn set_tls_configs(&mut self, configs: HashMap<String, TlsCertConfig>) {
self.tls_configs = Arc::new(configs);
/// Builds a shared TLS acceptor with pre-parsed certs and session resumption support.
/// Uses ArcSwap so running accept loops immediately see the new certs.
pub fn set_tls_configs(&self, configs: HashMap<String, TlsCertConfig>) {
if !configs.is_empty() {
match tls_handler::CertResolver::new(&configs)
.and_then(tls_handler::build_shared_tls_acceptor)
{
Ok(acceptor) => {
info!("Built shared TLS acceptor for {} domain(s)", configs.len());
self.shared_tls_acceptor.store(Arc::new(Some(acceptor)));
}
Err(e) => {
warn!("Failed to build shared TLS acceptor: {}, falling back to per-connection", e);
self.shared_tls_acceptor.store(Arc::new(None));
}
}
} else {
self.shared_tls_acceptor.store(Arc::new(None));
}
// Keep raw PEM configs for fallback (routes with custom TLS versions)
self.tls_configs.store(Arc::new(configs));
}
/// Set the shared socket-handler relay path.
/// This allows RustProxy to share the relay path Arc with the listener.
pub fn set_socket_handler_relay(&mut self, relay: Arc<std::sync::RwLock<Option<String>>>) {
self.socket_handler_relay = relay;
}
/// Start listening on a port.
@@ -172,18 +244,20 @@ impl TcpListenerManager {
info!("Listening on port {}", port);
let route_manager = Arc::clone(&self.route_manager);
let route_manager_swap = Arc::clone(&self.route_manager);
let metrics = Arc::clone(&self.metrics);
let tls_configs = Arc::clone(&self.tls_configs);
let shared_tls_acceptor = Arc::clone(&self.shared_tls_acceptor);
let http_proxy = Arc::clone(&self.http_proxy);
let conn_config = Arc::clone(&self.conn_config);
let conn_tracker = Arc::clone(&self.conn_tracker);
let cancel = self.cancel_token.clone();
let relay = Arc::clone(&self.socket_handler_relay);
let handle = tokio::spawn(async move {
Self::accept_loop(
listener, port, route_manager, metrics, tls_configs,
http_proxy, conn_config, conn_tracker, cancel,
listener, port, route_manager_swap, metrics, tls_configs,
shared_tls_acceptor, http_proxy, conn_config, conn_tracker, cancel, relay,
).await;
});
@@ -255,8 +329,9 @@ impl TcpListenerManager {
}
/// Update the route manager (for hot-reload).
/// Uses ArcSwap so running accept loops immediately see the new routes.
pub fn update_route_manager(&mut self, route_manager: Arc<RouteManager>) {
self.route_manager = route_manager;
self.route_manager.store(route_manager);
}
/// Get a reference to the metrics collector.
@@ -268,13 +343,15 @@ impl TcpListenerManager {
async fn accept_loop(
listener: TcpListener,
port: u16,
route_manager: Arc<RouteManager>,
route_manager_swap: Arc<ArcSwap<RouteManager>>,
metrics: Arc<MetricsCollector>,
tls_configs: Arc<HashMap<String, TlsCertConfig>>,
tls_configs: Arc<ArcSwap<HashMap<String, TlsCertConfig>>>,
shared_tls_acceptor: Arc<ArcSwap<Option<TlsAcceptor>>>,
http_proxy: Arc<HttpProxyService>,
conn_config: Arc<ConnectionConfig>,
conn_tracker: Arc<ConnectionTracker>,
cancel: CancellationToken,
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
) {
loop {
tokio::select! {
@@ -296,18 +373,23 @@ impl TcpListenerManager {
conn_tracker.connection_opened(&ip);
let rm = Arc::clone(&route_manager);
// Load the latest route manager from ArcSwap on each connection
let rm = route_manager_swap.load_full();
let m = Arc::clone(&metrics);
let tc = Arc::clone(&tls_configs);
// Load the latest TLS configs from ArcSwap on each connection
let tc = tls_configs.load_full();
// Load the latest shared TLS acceptor from ArcSwap
let sa = shared_tls_acceptor.load_full();
let hp = Arc::clone(&http_proxy);
let cc = Arc::clone(&conn_config);
let ct = Arc::clone(&conn_tracker);
let cn = cancel.clone();
let sr = Arc::clone(&socket_handler_relay);
debug!("Accepted connection from {} on port {}", peer_addr, port);
tokio::spawn(async move {
let result = Self::handle_connection(
stream, port, peer_addr, rm, m, tc, hp, cc, cn,
stream, port, peer_addr, rm, m, tc, sa, hp, cc, cn, sr,
).await;
if let Err(e) = result {
debug!("Connection error from {}: {}", peer_addr, e);
@@ -333,14 +415,149 @@ impl TcpListenerManager {
route_manager: Arc<RouteManager>,
metrics: Arc<MetricsCollector>,
tls_configs: Arc<HashMap<String, TlsCertConfig>>,
shared_tls_acceptor: Arc<Option<TlsAcceptor>>,
http_proxy: Arc<HttpProxyService>,
conn_config: Arc<ConnectionConfig>,
cancel: CancellationToken,
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use tokio::io::AsyncReadExt;
stream.set_nodelay(true)?;
// Extract source IP once for all metric calls
let ip_str = peer_addr.ip().to_string();
// === Fast path: try port-only matching before peeking at data ===
// This handles "server-speaks-first" protocols where the client
// doesn't send initial data (e.g., SMTP, greeting-based protocols).
// If a route matches by port alone and doesn't need domain/path/TLS info,
// we can forward immediately without waiting for client data.
//
// IMPORTANT: HTTP connections must NOT use this path — they need the HTTP
// proxy for proper error responses, CORS handling, and request-level routing.
// We detect HTTP via a non-blocking peek before committing to raw forwarding.
{
let quick_ctx = rustproxy_routing::MatchContext {
port,
domain: None,
path: None,
client_ip: Some(&peer_addr.ip().to_string()),
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
if let Some(quick_match) = route_manager.find_route(&quick_ctx) {
let rm = &quick_match.route.route_match;
let has_no_domain = rm.domains.is_none();
let has_no_path = rm.path.is_none();
let is_forward = quick_match.route.action.action_type == RouteActionType::Forward;
let has_no_tls = quick_match.route.action.tls.is_none();
// Only use fast path for simple port-only forward routes with no TLS
if has_no_domain && has_no_path && is_forward && has_no_tls {
// Non-blocking peek: if client has already sent data that looks
// like HTTP, skip fast path and let the normal path handle it
// through the HTTP proxy (for CORS, error responses, path routing).
let is_likely_http = {
let mut probe = [0u8; 16];
// Brief peek: HTTP clients send data immediately after connect.
// Server-speaks-first protocols (SMTP etc.) send nothing initially.
// 10ms is ample for any HTTP client while negligible for
// server-speaks-first protocols (which wait seconds for greeting).
match tokio::time::timeout(
std::time::Duration::from_millis(10),
stream.peek(&mut probe),
).await {
Ok(Ok(n)) if n > 0 => sni_parser::is_http(&probe[..n]),
_ => false,
}
};
if is_likely_http {
debug!("Fast-path skipped: HTTP detected from {}, using HTTP proxy", peer_addr);
// Fall through to normal path for HTTP proxy handling
} else if let Some(target) = quick_match.target {
let target_host = target.host.first().to_string();
let target_port = target.port.resolve(port);
let route_id = quick_match.route.id.as_deref();
// Check route-level IP security
if let Some(ref security) = quick_match.route.security {
if !rustproxy_http::request_filter::RequestFilter::check_ip_security(
security, &peer_addr.ip(),
) {
debug!("Connection from {} blocked by route security", peer_addr);
return Ok(());
}
}
metrics.connection_opened(route_id, Some(&ip_str));
let _fast_guard = ConnectionGuard::new(Arc::clone(&metrics), route_id, Some(&ip_str));
let connect_timeout = std::time::Duration::from_millis(conn_config.connection_timeout_ms);
let inactivity_timeout = std::time::Duration::from_millis(conn_config.socket_timeout_ms);
let max_lifetime = std::time::Duration::from_millis(conn_config.max_connection_lifetime_ms);
debug!(
"Fast-path forward (no peek): {} -> {}:{}",
peer_addr, target_host, target_port
);
let backend = match tokio::time::timeout(
connect_timeout,
tokio::net::TcpStream::connect(format!("{}:{}", target_host, target_port)),
).await {
Ok(Ok(s)) => s,
Ok(Err(e)) => return Err(e.into()),
Err(_) => return Err("Backend connection timeout".into()),
};
backend.set_nodelay(true)?;
// Send PROXY protocol header if configured
let should_send_proxy = conn_config.send_proxy_protocol
|| quick_match.route.action.send_proxy_protocol.unwrap_or(false)
|| target.send_proxy_protocol.unwrap_or(false);
if should_send_proxy {
use tokio::io::AsyncWriteExt;
let dest = std::net::SocketAddr::new(
target_host.parse().unwrap_or(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED)),
target_port,
);
let header = crate::proxy_protocol::generate_v1(&peer_addr, &dest);
let mut backend_w = backend;
backend_w.write_all(header.as_bytes()).await?;
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend_w, None,
inactivity_timeout, max_lifetime, cancel,
Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(ip_str.clone()),
}),
).await?;
} else {
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend, None,
inactivity_timeout, max_lifetime, cancel,
Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(ip_str.clone()),
}),
).await?;
}
return Ok(());
}
}
}
}
// === End fast path ===
// Handle PROXY protocol if configured
let mut effective_peer_addr = peer_addr;
if conn_config.accept_proxy_protocol {
@@ -412,15 +629,30 @@ impl TcpListenerManager {
None
};
// Extract HTTP path and host from initial data for route matching
let http_path = if is_http {
sni_parser::extract_http_path(initial_data)
} else {
None
};
let http_host = if is_http && domain.is_none() {
sni_parser::extract_http_host(initial_data)
} else {
None
};
let effective_domain = domain.as_deref().or(http_host.as_deref());
// Match route
let ctx = rustproxy_routing::MatchContext {
port,
domain: domain.as_deref(),
path: None,
domain: effective_domain,
path: http_path.as_deref(),
client_ip: Some(&peer_addr.ip().to_string()),
tls_version: None,
headers: None,
is_tls,
// For TLS connections, protocol is unknown until after termination
protocol: if is_http { Some("http") } else if !is_tls { Some("tcp") } else { None },
};
let route_match = route_manager.find_route(&ctx);
@@ -429,6 +661,17 @@ impl TcpListenerManager {
Some(rm) => rm,
None => {
debug!("No route matched for port {} domain {:?}", port, domain);
if is_http {
// Send a proper HTTP error instead of dropping the connection
use tokio::io::AsyncWriteExt;
let body = "No route matched";
let resp = format!(
"HTTP/1.1 502 Bad Gateway\r\nContent-Type: text/plain\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
body.len(), body
);
let _ = stream.write_all(resp.as_bytes()).await;
let _ = stream.shutdown().await;
}
return Ok(());
}
};
@@ -446,14 +689,34 @@ impl TcpListenerManager {
}
}
// Track connection in metrics
metrics.connection_opened(route_id);
// Track connection in metrics — guard ensures connection_closed on all exit paths
metrics.connection_opened(route_id, Some(&ip_str));
let _conn_guard = ConnectionGuard::new(Arc::clone(&metrics), route_id, Some(&ip_str));
// Check if this is a socket-handler route that should be relayed to TypeScript
if route_match.route.action.action_type == RouteActionType::SocketHandler {
let relay_path = {
let guard = socket_handler_relay.read().unwrap();
guard.clone()
};
if let Some(relay_socket_path) = relay_path {
return Self::relay_to_socket_handler(
stream, n, port, peer_addr,
&route_match, domain.as_deref(), is_tls,
&relay_socket_path,
&metrics, route_id,
).await;
} else {
debug!("Socket-handler route matched but no relay path configured");
return Ok(());
}
}
let target = match route_match.target {
Some(t) => t,
None => {
debug!("Route matched but no target available");
metrics.connection_closed(route_id);
return Ok(());
}
};
@@ -533,21 +796,21 @@ impl TcpListenerManager {
let mut actual_buf = vec![0u8; n];
stream.read_exact(&mut actual_buf).await?;
let (bytes_in, bytes_out) = forwarder::forward_bidirectional_with_timeouts(
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend, Some(&actual_buf),
inactivity_timeout, max_lifetime, cancel,
Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(ip_str.clone()),
}),
).await?;
metrics.record_bytes(bytes_in, bytes_out, route_id);
Ok(())
}
Some(rustproxy_config::TlsMode::Terminate) => {
let tls_config = Self::find_tls_config(&domain, &tls_configs)?;
// TLS accept with timeout, applying route-level TLS settings
// Use shared acceptor (session resumption) or fall back to per-connection
let route_tls = route_match.route.action.tls.as_ref();
let acceptor = tls_handler::build_tls_acceptor_with_config(
&tls_config.cert_pem, &tls_config.key_pem, route_tls,
)?;
let acceptor = Self::get_tls_acceptor(&domain, &tls_configs, &*shared_tls_acceptor, route_tls)?;
let tls_stream = match tokio::time::timeout(
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
tls_handler::accept_tls(stream, &acceptor),
@@ -567,12 +830,23 @@ impl TcpListenerManager {
}
};
// Check protocol restriction from route config
if let Some(ref required_protocol) = route_match.route.route_match.protocol {
let detected = if peeked { "http" } else { "tcp" };
if required_protocol != detected {
debug!("Protocol mismatch: route requires '{}', got '{}'", required_protocol, detected);
return Err("Protocol mismatch".into());
}
}
if peeked {
debug!(
"TLS Terminate + HTTP: {} -> {}:{} (domain: {:?})",
peer_addr, target_host, target_port, domain
);
http_proxy.handle_io(buf_stream, peer_addr, port).await;
// HTTP proxy manages its own per-request metrics — disarm TCP-level guard
_conn_guard.disarm();
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
} else {
debug!(
"TLS Terminate + TCP: {} -> {}:{} (domain: {:?})",
@@ -592,27 +866,80 @@ impl TcpListenerManager {
let (tls_read, tls_write) = tokio::io::split(buf_stream);
let (backend_read, backend_write) = tokio::io::split(backend);
let (bytes_in, bytes_out) = Self::forward_bidirectional_split_with_timeouts(
let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts(
tls_read, tls_write, backend_read, backend_write,
inactivity_timeout, max_lifetime,
Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(ip_str.clone()),
}),
).await;
metrics.record_bytes(bytes_in, bytes_out, route_id);
}
Ok(())
}
Some(rustproxy_config::TlsMode::TerminateAndReencrypt) => {
// Inline TLS accept + HTTP detection (same pattern as Terminate mode)
let route_tls = route_match.route.action.tls.as_ref();
Self::handle_tls_terminate_reencrypt(
stream, n, &domain, &target_host, target_port,
peer_addr, &tls_configs, &metrics, route_id, &conn_config, route_tls,
).await
let acceptor = Self::get_tls_acceptor(&domain, &tls_configs, &*shared_tls_acceptor, route_tls)?;
let tls_stream = match tokio::time::timeout(
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
tls_handler::accept_tls(stream, &acceptor),
).await {
Ok(Ok(s)) => s,
Ok(Err(e)) => return Err(e),
Err(_) => return Err("TLS handshake timeout".into()),
};
// Peek at decrypted data to detect protocol
let mut buf_stream = tokio::io::BufReader::new(tls_stream);
let is_http_data = {
use tokio::io::AsyncBufReadExt;
match buf_stream.fill_buf().await {
Ok(data) => sni_parser::is_http(data),
Err(_) => false,
}
};
// Check protocol restriction from route config
if let Some(ref required_protocol) = route_match.route.route_match.protocol {
let detected = if is_http_data { "http" } else { "tcp" };
if required_protocol != detected {
debug!("Protocol mismatch: route requires '{}', got '{}'", required_protocol, detected);
return Err("Protocol mismatch".into());
}
}
if is_http_data {
// HTTP: full per-request routing via HttpProxyService
// (backend TLS handled by HttpProxyService when upstream.use_tls is set)
debug!(
"TLS Terminate+Reencrypt + HTTP: {} (domain: {:?})",
peer_addr, domain
);
_conn_guard.disarm();
http_proxy.handle_io(buf_stream, peer_addr, port, cancel.clone()).await;
} else {
// Non-HTTP: TLS-to-TLS tunnel (existing behavior for raw TCP protocols)
debug!(
"TLS Terminate+Reencrypt + TCP: {} -> {}:{}",
peer_addr, target_host, target_port
);
Self::handle_tls_reencrypt_tunnel(
buf_stream, &target_host, target_port,
peer_addr, Arc::clone(&metrics), route_id,
&conn_config,
).await?;
}
Ok(())
}
None => {
if is_http {
// Plain HTTP - use HTTP proxy for request-level routing
debug!("HTTP proxy: {} on port {}", peer_addr, port);
http_proxy.handle_connection(stream, peer_addr, port).await;
// HTTP proxy manages its own per-request metrics — disarm TCP-level guard
_conn_guard.disarm();
http_proxy.handle_connection(stream, peer_addr, port, cancel.clone()).await;
Ok(())
} else {
// Plain TCP forwarding (non-HTTP)
@@ -640,54 +967,117 @@ impl TcpListenerManager {
let mut actual_buf = vec![0u8; n];
stream.read_exact(&mut actual_buf).await?;
let (bytes_in, bytes_out) = forwarder::forward_bidirectional_with_timeouts(
let (_bytes_in, _bytes_out) = forwarder::forward_bidirectional_with_timeouts(
stream, backend, Some(&actual_buf),
inactivity_timeout, max_lifetime, cancel,
Some(forwarder::ForwardMetricsCtx {
collector: Arc::clone(&metrics),
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(ip_str.clone()),
}),
).await?;
metrics.record_bytes(bytes_in, bytes_out, route_id);
Ok(())
}
}
};
metrics.connection_closed(route_id);
// ConnectionGuard handles metrics.connection_closed() on drop
result
}
/// Handle TLS terminate-and-reencrypt: accept TLS from client, connect TLS to backend.
async fn handle_tls_terminate_reencrypt(
stream: tokio::net::TcpStream,
_peek_len: usize,
domain: &Option<String>,
/// Relay a connection to the TypeScript socket-handler via Unix domain socket.
///
/// Protocol:
/// 1. Connect to the Unix socket at `relay_path`
/// 2. Send a JSON metadata line (terminated by \n)
/// 3. Forward the initial peeked bytes
/// 4. Bidirectional relay between the TCP stream and Unix socket
async fn relay_to_socket_handler(
mut stream: tokio::net::TcpStream,
peek_len: usize,
port: u16,
peer_addr: std::net::SocketAddr,
route_match: &rustproxy_routing::RouteMatchResult<'_>,
domain: Option<&str>,
is_tls: bool,
relay_path: &str,
metrics: &MetricsCollector,
route_id: Option<&str>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::UnixStream;
// Connect to the TypeScript socket handler server
let mut unix_stream = match UnixStream::connect(relay_path).await {
Ok(s) => s,
Err(e) => {
error!("Failed to connect to socket handler relay at {}: {}", relay_path, e);
return Err(e.into());
}
};
// Build metadata JSON
let route_key = route_match.route.name.as_deref()
.or(route_match.route.id.as_deref())
.unwrap_or("unknown");
let metadata = serde_json::json!({
"routeKey": route_key,
"remoteIP": peer_addr.ip().to_string(),
"remotePort": peer_addr.port(),
"localPort": port,
"isTLS": is_tls,
"domain": domain,
});
// Send metadata line (JSON + newline)
let mut metadata_line = serde_json::to_string(&metadata)?;
metadata_line.push('\n');
unix_stream.write_all(metadata_line.as_bytes()).await?;
// Read the initial peeked data from the TCP stream (peek doesn't consume)
let mut initial_buf = vec![0u8; peek_len];
stream.read_exact(&mut initial_buf).await?;
// Forward initial data to the Unix socket
unix_stream.write_all(&initial_buf).await?;
// Bidirectional relay between TCP client and Unix socket handler
let initial_len = initial_buf.len() as u64;
match tokio::io::copy_bidirectional(&mut stream, &mut unix_stream).await {
Ok((c2s, s2c)) => {
// Include initial data bytes that were forwarded before copy_bidirectional
let total_in = c2s + initial_len;
debug!("Socket handler relay complete for {}: {} bytes in, {} bytes out",
route_key, total_in, s2c);
let ip = peer_addr.ip().to_string();
metrics.record_bytes(total_in, s2c, route_id, Some(&ip));
}
Err(e) => {
// Still record the initial data even on error
if initial_len > 0 {
let ip = peer_addr.ip().to_string();
metrics.record_bytes(initial_len, 0, route_id, Some(&ip));
}
debug!("Socket handler relay ended for {}: {}", route_key, e);
}
}
Ok(())
}
/// Handle non-HTTP TLS-to-TLS tunnel for terminate-and-reencrypt mode.
/// TLS accept has already been done by the caller; this only connects to the
/// backend over TLS and forwards bidirectionally.
async fn handle_tls_reencrypt_tunnel(
buf_stream: tokio::io::BufReader<tokio_rustls::server::TlsStream<tokio::net::TcpStream>>,
target_host: &str,
target_port: u16,
peer_addr: std::net::SocketAddr,
tls_configs: &HashMap<String, TlsCertConfig>,
metrics: &MetricsCollector,
metrics: Arc<MetricsCollector>,
route_id: Option<&str>,
conn_config: &ConnectionConfig,
route_tls: Option<&rustproxy_config::RouteTls>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let tls_config = Self::find_tls_config(domain, tls_configs)?;
let acceptor = tls_handler::build_tls_acceptor_with_config(
&tls_config.cert_pem, &tls_config.key_pem, route_tls,
)?;
// Accept TLS from client with timeout
let client_tls = match tokio::time::timeout(
std::time::Duration::from_millis(conn_config.initial_data_timeout_ms),
tls_handler::accept_tls(stream, &acceptor),
).await {
Ok(Ok(s)) => s,
Ok(Err(e)) => return Err(e),
Err(_) => return Err("TLS handshake timeout".into()),
};
debug!(
"TLS Terminate+Reencrypt: {} -> {}:{} (domain: {:?})",
peer_addr, target_host, target_port, domain
);
// Connect to backend over TLS with timeout
let backend_tls = match tokio::time::timeout(
std::time::Duration::from_millis(conn_config.connection_timeout_ms),
@@ -698,8 +1088,9 @@ impl TcpListenerManager {
Err(_) => return Err("Backend TLS connection timeout".into()),
};
// Forward between two TLS streams
let (client_read, client_write) = tokio::io::split(client_tls);
// Forward between decrypted client stream and backend TLS stream
// (BufReader preserves any already-buffered data from the peek)
let (client_read, client_write) = tokio::io::split(buf_stream);
let (backend_read, backend_write) = tokio::io::split(backend_tls);
let base_inactivity_ms = conn_config.socket_timeout_ms;
@@ -728,15 +1119,43 @@ impl TcpListenerManager {
}
};
let (bytes_in, bytes_out) = Self::forward_bidirectional_split_with_timeouts(
let (_bytes_in, _bytes_out) = Self::forward_bidirectional_split_with_timeouts(
client_read, client_write, backend_read, backend_write,
inactivity_timeout, max_lifetime,
Some(forwarder::ForwardMetricsCtx {
collector: metrics,
route_id: route_id.map(|s| s.to_string()),
source_ip: Some(peer_addr.ip().to_string()),
}),
).await;
metrics.record_bytes(bytes_in, bytes_out, route_id);
Ok(())
}
/// Get a TLS acceptor, preferring the shared one (with session resumption)
/// and falling back to per-connection when custom TLS versions are configured.
fn get_tls_acceptor(
domain: &Option<String>,
tls_configs: &HashMap<String, TlsCertConfig>,
shared_tls_acceptor: &Option<TlsAcceptor>,
route_tls: Option<&rustproxy_config::RouteTls>,
) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
let has_custom_versions = route_tls
.and_then(|t| t.versions.as_ref())
.map(|v| !v.is_empty())
.unwrap_or(false);
if !has_custom_versions {
if let Some(shared) = shared_tls_acceptor {
return Ok(shared.clone()); // TlsAcceptor wraps Arc<ServerConfig>, clone is cheap
}
}
// Fallback: per-connection acceptor (custom TLS versions or shared build failed)
let tls_config = Self::find_tls_config(domain, tls_configs)?;
tls_handler::build_tls_acceptor_with_config(&tls_config.cert_pem, &tls_config.key_pem, route_tls)
}
/// Find the TLS config for a given domain.
fn find_tls_config<'a>(
domain: &Option<String>,
@@ -767,6 +1186,9 @@ impl TcpListenerManager {
}
/// Forward bidirectional between two split streams with inactivity and lifetime timeouts.
///
/// When `metrics` is provided, bytes are reported per-chunk (lock-free) for
/// real-time throughput measurement.
async fn forward_bidirectional_split_with_timeouts<R1, W1, R2, W2>(
mut client_read: R1,
mut client_write: W1,
@@ -774,6 +1196,7 @@ impl TcpListenerManager {
mut backend_write: W2,
inactivity_timeout: std::time::Duration,
max_lifetime: std::time::Duration,
metrics: Option<forwarder::ForwardMetricsCtx>,
) -> (u64, u64)
where
R1: tokio::io::AsyncRead + Unpin + Send + 'static,
@@ -789,6 +1212,7 @@ impl TcpListenerManager {
let start = std::time::Instant::now();
let la1 = Arc::clone(&last_activity);
let metrics_c2b = metrics.clone();
let c2b = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
@@ -805,12 +1229,16 @@ impl TcpListenerManager {
start.elapsed().as_millis() as u64,
Ordering::Relaxed,
);
if let Some(ref ctx) = metrics_c2b {
ctx.collector.record_bytes(n as u64, 0, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = backend_write.shutdown().await;
total
});
let la2 = Arc::clone(&last_activity);
let metrics_b2c = metrics;
let b2c = tokio::spawn(async move {
let mut buf = vec![0u8; 65536];
let mut total = 0u64;
@@ -827,6 +1255,9 @@ impl TcpListenerManager {
start.elapsed().as_millis() as u64,
Ordering::Relaxed,
);
if let Some(ref ctx) = metrics_b2c {
ctx.collector.record_bytes(0, n as u64, ctx.route_id.as_deref(), ctx.source_ip.as_deref());
}
}
let _ = client_write.shutdown().await;
total

View File

@@ -1,17 +1,99 @@
use std::collections::HashMap;
use std::io::BufReader;
use std::sync::Arc;
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use rustls::server::ResolvesServerCert;
use rustls::sign::CertifiedKey;
use rustls::ServerConfig;
use tokio::net::TcpStream;
use tokio_rustls::{TlsAcceptor, TlsConnector, server::TlsStream as ServerTlsStream};
use tracing::debug;
use tracing::{debug, info};
use crate::tcp_listener::TlsCertConfig;
/// Ensure the default crypto provider is installed.
fn ensure_crypto_provider() {
let _ = rustls::crypto::ring::default_provider().install_default();
}
/// SNI-based certificate resolver with pre-parsed CertifiedKeys.
/// Enables shared ServerConfig across connections — avoids per-connection PEM parsing
/// and enables TLS session resumption.
#[derive(Debug)]
pub struct CertResolver {
certs: HashMap<String, Arc<CertifiedKey>>,
fallback: Option<Arc<CertifiedKey>>,
}
impl CertResolver {
/// Build a resolver from PEM-encoded cert/key configs.
/// Parses all PEM data upfront so connections only do a cheap HashMap lookup.
pub fn new(configs: &HashMap<String, TlsCertConfig>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let provider = rustls::crypto::ring::default_provider();
let mut certs = HashMap::new();
let mut fallback = None;
for (domain, cfg) in configs {
let cert_chain = load_certs(&cfg.cert_pem)?;
let key = load_private_key(&cfg.key_pem)?;
let ck = Arc::new(CertifiedKey::from_der(cert_chain, key, &provider)
.map_err(|e| format!("CertifiedKey for {}: {}", domain, e))?);
if domain == "*" {
fallback = Some(Arc::clone(&ck));
}
certs.insert(domain.clone(), ck);
}
// If no explicit "*" fallback, use the first available cert
if fallback.is_none() {
fallback = certs.values().next().map(Arc::clone);
}
Ok(Self { certs, fallback })
}
}
impl ResolvesServerCert for CertResolver {
fn resolve(&self, client_hello: rustls::server::ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
let domain = match client_hello.server_name() {
Some(name) => name,
None => return self.fallback.clone(),
};
// Exact match
if let Some(ck) = self.certs.get(domain) {
return Some(Arc::clone(ck));
}
// Wildcard: sub.example.com → *.example.com
if let Some(dot) = domain.find('.') {
let wc = format!("*.{}", &domain[dot + 1..]);
if let Some(ck) = self.certs.get(&wc) {
return Some(Arc::clone(ck));
}
}
self.fallback.clone()
}
}
/// Build a shared TLS acceptor with SNI resolution, session cache, and session tickets.
/// The returned acceptor can be reused across all connections (cheap Arc clone).
pub fn build_shared_tls_acceptor(resolver: CertResolver) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
ensure_crypto_provider();
let mut config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver));
// Shared session cache — enables session ID resumption across connections
config.session_storage = rustls::server::ServerSessionMemoryCache::new(4096);
// Session ticket resumption (12-hour lifetime, Chacha20Poly1305 encrypted)
config.ticketer = rustls::crypto::ring::Ticketer::new()
.map_err(|e| format!("Ticketer: {}", e))?;
info!("Built shared TLS config with session cache (4096) and ticket support");
Ok(TlsAcceptor::from(Arc::new(config)))
}
/// Build a TLS acceptor from PEM-encoded cert and key data.
pub fn build_tls_acceptor(cert_pem: &str, key_pem: &str) -> Result<TlsAcceptor, Box<dyn std::error::Error + Send + Sync>> {
build_tls_acceptor_with_config(cert_pem, key_pem, None)

View File

@@ -12,6 +12,8 @@ pub struct MatchContext<'a> {
pub tls_version: Option<&'a str>,
pub headers: Option<&'a HashMap<String, String>>,
pub is_tls: bool,
/// Detected protocol: "http" or "tcp". None when unknown (e.g. pre-TLS-termination).
pub protocol: Option<&'a str>,
}
/// Result of a route match.
@@ -87,9 +89,17 @@ impl RouteManager {
if !matchers::domain_matches_any(&patterns, domain) {
return false;
}
} else if ctx.is_tls {
// TLS connection without SNI cannot match a domain-restricted route.
// This prevents session-ticket resumption from misrouting when clients
// omit SNI (RFC 8446 recommends but doesn't mandate SNI on resumption).
// Wildcard-only routes (domains: ["*"]) still match since they accept all.
let patterns = domains.to_vec();
let is_wildcard_only = patterns.iter().all(|d| *d == "*");
if !is_wildcard_only {
return false;
}
}
// If no domain provided but route requires domain, it depends on context
// For TLS passthrough, we need SNI; for other cases we may still match
}
// Path matching
@@ -137,6 +147,17 @@ impl RouteManager {
}
}
// Protocol matching
if let Some(ref required_protocol) = rm.protocol {
if let Some(protocol) = ctx.protocol {
if required_protocol != protocol {
return false;
}
}
// If protocol not yet known (None), allow match — protocol will be
// validated after detection (post-TLS-termination peek)
}
true
}
@@ -277,6 +298,7 @@ mod tests {
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: RouteAction {
action_type: RouteActionType::Forward,
@@ -327,6 +349,7 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
let result = manager.find_route(&ctx);
@@ -349,6 +372,7 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
let result = manager.find_route(&ctx).unwrap();
@@ -372,6 +396,7 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
assert!(manager.find_route(&ctx).is_none());
@@ -457,6 +482,116 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_tls_no_sni_rejects_domain_restricted_route() {
let routes = vec![make_route(443, Some("example.com"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI should NOT match a domain-restricted route
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_tls_no_sni_rejects_wildcard_subdomain_route() {
let routes = vec![make_route(443, Some("*.example.com"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI should NOT match *.example.com
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_tls_no_sni_matches_wildcard_only_route() {
let routes = vec![make_route(443, Some("*"), 0)];
let manager = RouteManager::new(routes);
// TLS connection without SNI SHOULD match a wildcard-only route
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_tls_no_sni_skips_domain_restricted_matches_fallback() {
// Two routes: first is domain-restricted, second is wildcard catch-all
let routes = vec![
make_route(443, Some("specific.com"), 10),
make_route(443, Some("*"), 0),
];
let manager = RouteManager::new(routes);
// TLS without SNI should skip specific.com and fall through to wildcard
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
};
let result = manager.find_route(&ctx);
assert!(result.is_some());
let matched_domains = result.unwrap().route.route_match.domains.as_ref()
.map(|d| d.to_vec()).unwrap();
assert!(matched_domains.contains(&"*"));
}
#[test]
fn test_non_tls_no_domain_still_matches_domain_restricted() {
// Non-TLS (plain HTTP) without domain should still match domain-restricted routes
// (the HTTP proxy layer handles Host-based routing)
let routes = vec![make_route(80, Some("example.com"), 0)];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
assert!(manager.find_route(&ctx).is_some());
@@ -475,6 +610,7 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
assert!(manager.find_route(&ctx).is_some());
@@ -525,6 +661,7 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
let result = manager.find_route(&ctx).unwrap();
assert_eq!(result.target.unwrap().host.first(), "api-backend");
@@ -538,8 +675,102 @@ mod tests {
tls_version: None,
headers: None,
is_tls: false,
protocol: None,
};
let result = manager.find_route(&ctx).unwrap();
assert_eq!(result.target.unwrap().host.first(), "default-backend");
}
fn make_route_with_protocol(port: u16, domain: Option<&str>, protocol: Option<&str>) -> RouteConfig {
let mut route = make_route(port, domain, 0);
route.route_match.protocol = protocol.map(|s| s.to_string());
route
}
#[test]
fn test_protocol_http_matches_http() {
let routes = vec![make_route_with_protocol(80, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("http"),
};
assert!(manager.find_route(&ctx).is_some());
}
#[test]
fn test_protocol_http_rejects_tcp() {
let routes = vec![make_route_with_protocol(80, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("tcp"),
};
assert!(manager.find_route(&ctx).is_none());
}
#[test]
fn test_protocol_none_matches_any() {
// Route with no protocol restriction matches any protocol
let routes = vec![make_route_with_protocol(80, None, None)];
let manager = RouteManager::new(routes);
let ctx_http = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("http"),
};
assert!(manager.find_route(&ctx_http).is_some());
let ctx_tcp = MatchContext {
port: 80,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: false,
protocol: Some("tcp"),
};
assert!(manager.find_route(&ctx_tcp).is_some());
}
#[test]
fn test_protocol_http_matches_when_unknown() {
// Route with protocol: "http" should match when ctx.protocol is None
// (pre-TLS-termination, protocol not yet known)
let routes = vec![make_route_with_protocol(443, None, Some("http"))];
let manager = RouteManager::new(routes);
let ctx = MatchContext {
port: 443,
domain: None,
path: None,
client_ip: None,
tls_version: None,
headers: None,
is_tls: true,
protocol: None,
};
assert!(manager.find_route(&ctx).is_some());
}
}

View File

@@ -15,8 +15,6 @@ tracing = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
rcgen = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }

View File

@@ -1,16 +1,15 @@
//! ACME (Let's Encrypt) integration using instant-acme.
//!
//! This module handles HTTP-01 challenge creation and certificate provisioning.
//! Supports persisting ACME account credentials to disk for reuse across restarts.
//! Account credentials are ephemeral — the consumer owns all persistence.
use std::path::{Path, PathBuf};
use instant_acme::{
Account, NewAccount, NewOrder, Identifier, ChallengeType, OrderStatus,
AccountCredentials,
};
use rcgen::{CertificateParams, KeyPair};
use thiserror::Error;
use tracing::{debug, info, warn};
use tracing::{debug, info};
#[derive(Debug, Error)]
pub enum AcmeError {
@@ -26,8 +25,6 @@ pub enum AcmeError {
NoHttp01Challenge,
#[error("Timeout waiting for order: {0}")]
Timeout(String),
#[error("Account persistence error: {0}")]
Persistence(String),
}
/// Pending HTTP-01 challenge that needs to be served.
@@ -41,8 +38,6 @@ pub struct PendingChallenge {
pub struct AcmeClient {
use_production: bool,
email: String,
/// Optional directory where account.json is persisted.
account_dir: Option<PathBuf>,
}
impl AcmeClient {
@@ -50,56 +45,15 @@ impl AcmeClient {
Self {
use_production,
email,
account_dir: None,
}
}
/// Create a new client with account persistence at the given directory.
pub fn with_persistence(email: String, use_production: bool, account_dir: impl AsRef<Path>) -> Self {
Self {
use_production,
email,
account_dir: Some(account_dir.as_ref().to_path_buf()),
}
}
/// Get or create an ACME account, persisting credentials if account_dir is set.
/// Create a new ACME account (ephemeral — not persisted).
async fn get_or_create_account(&self) -> Result<Account, AcmeError> {
let directory_url = self.directory_url();
// Try to restore from persisted credentials
if let Some(ref dir) = self.account_dir {
let account_file = dir.join("account.json");
if account_file.exists() {
match std::fs::read_to_string(&account_file) {
Ok(json) => {
match serde_json::from_str::<AccountCredentials>(&json) {
Ok(credentials) => {
match Account::from_credentials(credentials).await {
Ok(account) => {
debug!("Restored ACME account from {}", account_file.display());
return Ok(account);
}
Err(e) => {
warn!("Failed to restore ACME account, creating new: {}", e);
}
}
}
Err(e) => {
warn!("Invalid account.json, creating new account: {}", e);
}
}
}
Err(e) => {
warn!("Could not read account.json: {}", e);
}
}
}
}
// Create a new account
let contact = format!("mailto:{}", self.email);
let (account, credentials) = Account::create(
let (account, _credentials) = Account::create(
&NewAccount {
contact: &[&contact],
terms_of_service_agreed: true,
@@ -113,27 +67,6 @@ impl AcmeClient {
debug!("ACME account created");
// Persist credentials if we have a directory
if let Some(ref dir) = self.account_dir {
if let Err(e) = std::fs::create_dir_all(dir) {
warn!("Failed to create account directory {}: {}", dir.display(), e);
} else {
let account_file = dir.join("account.json");
match serde_json::to_string_pretty(&credentials) {
Ok(json) => {
if let Err(e) = std::fs::write(&account_file, &json) {
warn!("Failed to persist ACME account to {}: {}", account_file.display(), e);
} else {
info!("ACME account credentials persisted to {}", account_file.display());
}
}
Err(e) => {
warn!("Failed to serialize account credentials: {}", e);
}
}
}
}
Ok(account)
}
@@ -158,7 +91,7 @@ impl AcmeClient {
{
info!("Starting ACME provisioning for {} via {}", domain, self.directory_url());
// 1. Get or create ACME account (with persistence)
// 1. Get or create ACME account
let account = self.get_or_create_account().await?;
// 2. Create order
@@ -339,22 +272,4 @@ mod tests {
assert!(!client.directory_url().contains("staging"));
assert!(client.is_production());
}
#[test]
fn test_with_persistence_sets_account_dir() {
let tmp = tempfile::tempdir().unwrap();
let client = AcmeClient::with_persistence(
"test@example.com".to_string(),
false,
tmp.path(),
);
assert!(client.account_dir.is_some());
assert_eq!(client.account_dir.unwrap(), tmp.path());
}
#[test]
fn test_without_persistence_no_account_dir() {
let client = AcmeClient::new("test@example.com".to_string(), false);
assert!(client.account_dir.is_none());
}
}

View File

@@ -9,8 +9,6 @@ use crate::acme::AcmeClient;
pub enum CertManagerError {
#[error("ACME provisioning failed for {domain}: {message}")]
AcmeFailure { domain: String, message: String },
#[error("Certificate store error: {0}")]
Store(#[from] crate::cert_store::CertStoreError),
#[error("No ACME email configured")]
NoEmail,
}
@@ -46,25 +44,19 @@ impl CertManager {
/// Create an ACME client using this manager's configuration.
/// Returns None if no ACME email is configured.
/// Account credentials are persisted in the cert store base directory.
pub fn acme_client(&self) -> Option<AcmeClient> {
self.acme_email.as_ref().map(|email| {
AcmeClient::with_persistence(
email.clone(),
self.use_production,
self.store.base_dir(),
)
AcmeClient::new(email.clone(), self.use_production)
})
}
/// Load a static certificate into the store.
/// Load a static certificate into the store (infallible — pure cache insert).
pub fn load_static(
&mut self,
domain: String,
bundle: CertBundle,
) -> Result<(), CertManagerError> {
self.store.store(domain, bundle)?;
Ok(())
) {
self.store.store(domain, bundle);
}
/// Check and return domains that need certificate renewal.
@@ -153,19 +145,12 @@ impl CertManager {
},
};
self.store.store(domain.to_string(), bundle.clone())?;
self.store.store(domain.to_string(), bundle.clone());
info!("Certificate renewed and stored for {}", domain);
Ok(bundle)
}
/// Load all certificates from disk.
pub fn load_all(&mut self) -> Result<usize, CertManagerError> {
let loaded = self.store.load_all()?;
info!("Loaded {} certificates from store", loaded);
Ok(loaded)
}
/// Whether this manager has an ACME email configured.
pub fn has_acme(&self) -> bool {
self.acme_email.is_some()

View File

@@ -1,21 +1,7 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum CertStoreError {
#[error("Certificate not found for domain: {0}")]
NotFound(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid certificate: {0}")]
Invalid(String),
#[error("JSON error: {0}")]
Json(#[from] serde_json::Error),
}
/// Certificate metadata stored alongside certs on disk.
/// Certificate metadata stored alongside certs.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CertMetadata {
@@ -45,27 +31,18 @@ pub struct CertBundle {
pub metadata: CertMetadata,
}
/// Filesystem-backed certificate store.
/// In-memory certificate store.
///
/// File layout per domain:
/// ```text
/// {base_dir}/{domain}/
/// key.pem
/// cert.pem
/// ca.pem (optional)
/// metadata.json
/// ```
/// All persistence is owned by the consumer (TypeScript side).
/// This struct is a thin HashMap wrapper used as a runtime cache.
pub struct CertStore {
base_dir: PathBuf,
/// In-memory cache of loaded certs
cache: HashMap<String, CertBundle>,
}
impl CertStore {
/// Create a new cert store at the given directory.
pub fn new(base_dir: impl AsRef<Path>) -> Self {
/// Create a new empty cert store.
pub fn new() -> Self {
Self {
base_dir: base_dir.as_ref().to_path_buf(),
cache: HashMap::new(),
}
}
@@ -75,33 +52,9 @@ impl CertStore {
self.cache.get(domain)
}
/// Store a certificate to both cache and filesystem.
pub fn store(&mut self, domain: String, bundle: CertBundle) -> Result<(), CertStoreError> {
// Sanitize domain for directory name (replace wildcards)
let dir_name = domain.replace('*', "_wildcard_");
let cert_dir = self.base_dir.join(&dir_name);
// Create directory
std::fs::create_dir_all(&cert_dir)?;
// Write key
std::fs::write(cert_dir.join("key.pem"), &bundle.key_pem)?;
// Write cert
std::fs::write(cert_dir.join("cert.pem"), &bundle.cert_pem)?;
// Write CA cert if present
if let Some(ref ca) = bundle.ca_pem {
std::fs::write(cert_dir.join("ca.pem"), ca)?;
}
// Write metadata
let metadata_json = serde_json::to_string_pretty(&bundle.metadata)?;
std::fs::write(cert_dir.join("metadata.json"), metadata_json)?;
// Update cache
/// Store a certificate in the cache.
pub fn store(&mut self, domain: String, bundle: CertBundle) {
self.cache.insert(domain, bundle);
Ok(())
}
/// Check if a certificate exists for a domain.
@@ -109,68 +62,6 @@ impl CertStore {
self.cache.contains_key(domain)
}
/// Load all certificates from the base directory.
pub fn load_all(&mut self) -> Result<usize, CertStoreError> {
if !self.base_dir.exists() {
return Ok(0);
}
let entries = std::fs::read_dir(&self.base_dir)?;
let mut loaded = 0;
for entry in entries {
let entry = entry?;
let path = entry.path();
if !path.is_dir() {
continue;
}
let metadata_path = path.join("metadata.json");
let key_path = path.join("key.pem");
let cert_path = path.join("cert.pem");
// All three files must exist
if !metadata_path.exists() || !key_path.exists() || !cert_path.exists() {
continue;
}
// Load metadata
let metadata_str = std::fs::read_to_string(&metadata_path)?;
let metadata: CertMetadata = serde_json::from_str(&metadata_str)?;
// Load key and cert
let key_pem = std::fs::read_to_string(&key_path)?;
let cert_pem = std::fs::read_to_string(&cert_path)?;
// Load CA cert if present
let ca_path = path.join("ca.pem");
let ca_pem = if ca_path.exists() {
Some(std::fs::read_to_string(&ca_path)?)
} else {
None
};
let domain = metadata.domain.clone();
let bundle = CertBundle {
key_pem,
cert_pem,
ca_pem,
metadata,
};
self.cache.insert(domain, bundle);
loaded += 1;
}
Ok(loaded)
}
/// Get the base directory.
pub fn base_dir(&self) -> &Path {
&self.base_dir
}
/// Get the number of cached certificates.
pub fn count(&self) -> usize {
self.cache.len()
@@ -181,17 +72,15 @@ impl CertStore {
self.cache.iter()
}
/// Remove a certificate from cache and filesystem.
pub fn remove(&mut self, domain: &str) -> Result<bool, CertStoreError> {
let removed = self.cache.remove(domain).is_some();
if removed {
let dir_name = domain.replace('*', "_wildcard_");
let cert_dir = self.base_dir.join(&dir_name);
if cert_dir.exists() {
std::fs::remove_dir_all(&cert_dir)?;
}
}
Ok(removed)
/// Remove a certificate from the cache.
pub fn remove(&mut self, domain: &str) -> bool {
self.cache.remove(domain).is_some()
}
}
impl Default for CertStore {
fn default() -> Self {
Self::new()
}
}
@@ -215,100 +104,71 @@ mod tests {
}
#[test]
fn test_store_and_load_roundtrip() {
let tmp = tempfile::tempdir().unwrap();
let mut store = CertStore::new(tmp.path());
fn test_store_and_get() {
let mut store = CertStore::new();
let bundle = make_test_bundle("example.com");
store.store("example.com".to_string(), bundle.clone()).unwrap();
store.store("example.com".to_string(), bundle.clone());
// Verify files exist
let cert_dir = tmp.path().join("example.com");
assert!(cert_dir.join("key.pem").exists());
assert!(cert_dir.join("cert.pem").exists());
assert!(cert_dir.join("metadata.json").exists());
assert!(!cert_dir.join("ca.pem").exists()); // No CA cert
// Load into a fresh store
let mut store2 = CertStore::new(tmp.path());
let loaded = store2.load_all().unwrap();
assert_eq!(loaded, 1);
let loaded_bundle = store2.get("example.com").unwrap();
assert_eq!(loaded_bundle.key_pem, bundle.key_pem);
assert_eq!(loaded_bundle.cert_pem, bundle.cert_pem);
assert_eq!(loaded_bundle.metadata.domain, "example.com");
assert_eq!(loaded_bundle.metadata.source, CertSource::Static);
let loaded = store.get("example.com").unwrap();
assert_eq!(loaded.key_pem, bundle.key_pem);
assert_eq!(loaded.cert_pem, bundle.cert_pem);
assert_eq!(loaded.metadata.domain, "example.com");
assert_eq!(loaded.metadata.source, CertSource::Static);
}
#[test]
fn test_store_with_ca_cert() {
let tmp = tempfile::tempdir().unwrap();
let mut store = CertStore::new(tmp.path());
let mut store = CertStore::new();
let mut bundle = make_test_bundle("secure.com");
bundle.ca_pem = Some("-----BEGIN CERTIFICATE-----\nca-cert\n-----END CERTIFICATE-----\n".to_string());
store.store("secure.com".to_string(), bundle).unwrap();
store.store("secure.com".to_string(), bundle);
let cert_dir = tmp.path().join("secure.com");
assert!(cert_dir.join("ca.pem").exists());
let mut store2 = CertStore::new(tmp.path());
store2.load_all().unwrap();
let loaded = store2.get("secure.com").unwrap();
let loaded = store.get("secure.com").unwrap();
assert!(loaded.ca_pem.is_some());
}
#[test]
fn test_load_all_multiple_certs() {
let tmp = tempfile::tempdir().unwrap();
let mut store = CertStore::new(tmp.path());
fn test_multiple_certs() {
let mut store = CertStore::new();
store.store("a.com".to_string(), make_test_bundle("a.com")).unwrap();
store.store("b.com".to_string(), make_test_bundle("b.com")).unwrap();
store.store("c.com".to_string(), make_test_bundle("c.com")).unwrap();
store.store("a.com".to_string(), make_test_bundle("a.com"));
store.store("b.com".to_string(), make_test_bundle("b.com"));
store.store("c.com".to_string(), make_test_bundle("c.com"));
let mut store2 = CertStore::new(tmp.path());
let loaded = store2.load_all().unwrap();
assert_eq!(loaded, 3);
assert!(store2.has("a.com"));
assert!(store2.has("b.com"));
assert!(store2.has("c.com"));
}
#[test]
fn test_load_all_missing_directory() {
let mut store = CertStore::new("/nonexistent/path/to/certs");
let loaded = store.load_all().unwrap();
assert_eq!(loaded, 0);
assert_eq!(store.count(), 3);
assert!(store.has("a.com"));
assert!(store.has("b.com"));
assert!(store.has("c.com"));
}
#[test]
fn test_remove_cert() {
let tmp = tempfile::tempdir().unwrap();
let mut store = CertStore::new(tmp.path());
let mut store = CertStore::new();
store.store("remove-me.com".to_string(), make_test_bundle("remove-me.com")).unwrap();
store.store("remove-me.com".to_string(), make_test_bundle("remove-me.com"));
assert!(store.has("remove-me.com"));
let removed = store.remove("remove-me.com").unwrap();
let removed = store.remove("remove-me.com");
assert!(removed);
assert!(!store.has("remove-me.com"));
assert!(!tmp.path().join("remove-me.com").exists());
}
#[test]
fn test_wildcard_domain_storage() {
let tmp = tempfile::tempdir().unwrap();
let mut store = CertStore::new(tmp.path());
fn test_remove_nonexistent() {
let mut store = CertStore::new();
assert!(!store.remove("nonexistent.com"));
}
store.store("*.example.com".to_string(), make_test_bundle("*.example.com")).unwrap();
#[test]
fn test_wildcard_domain() {
let mut store = CertStore::new();
// Directory should use sanitized name
assert!(tmp.path().join("_wildcard_.example.com").exists());
store.store("*.example.com".to_string(), make_test_bundle("*.example.com"));
assert!(store.has("*.example.com"));
let mut store2 = CertStore::new(tmp.path());
store2.load_all().unwrap();
assert!(store2.has("*.example.com"));
let loaded = store.get("*.example.com").unwrap();
assert_eq!(loaded.metadata.domain, "*.example.com");
}
}

View File

@@ -71,11 +71,14 @@ pub struct RustProxy {
cert_manager: Option<Arc<tokio::sync::Mutex<CertManager>>>,
challenge_server: Option<challenge_server::ChallengeServer>,
renewal_handle: Option<tokio::task::JoinHandle<()>>,
sampling_handle: Option<tokio::task::JoinHandle<()>>,
nft_manager: Option<NftManager>,
started: bool,
started_at: Option<Instant>,
/// Path to a Unix domain socket for relaying socket-handler connections back to TypeScript.
socket_handler_relay_path: Option<String>,
/// Shared path to a Unix domain socket for relaying socket-handler connections back to TypeScript.
socket_handler_relay: Arc<std::sync::RwLock<Option<String>>>,
/// Dynamically loaded certificates (via loadCertificate IPC), independent of CertManager.
loaded_certs: HashMap<String, TlsCertConfig>,
}
impl RustProxy {
@@ -100,18 +103,24 @@ impl RustProxy {
let cert_manager = Self::build_cert_manager(&options)
.map(|cm| Arc::new(tokio::sync::Mutex::new(cm)));
let retention = options.metrics.as_ref()
.and_then(|m| m.retention_seconds)
.unwrap_or(3600) as usize;
Ok(Self {
options,
route_table: ArcSwap::from(Arc::new(route_manager)),
listener_manager: None,
metrics: Arc::new(MetricsCollector::new()),
metrics: Arc::new(MetricsCollector::with_retention(retention)),
cert_manager,
challenge_server: None,
renewal_handle: None,
sampling_handle: None,
nft_manager: None,
started: false,
started_at: None,
socket_handler_relay_path: None,
socket_handler_relay: Arc::new(std::sync::RwLock::new(None)),
loaded_certs: HashMap::new(),
})
}
@@ -184,15 +193,12 @@ impl RustProxy {
return None;
}
let store_path = acme.certificate_store
.as_deref()
.unwrap_or("./certs");
let email = acme.email.clone()
.or_else(|| acme.account_email.clone());
let use_production = acme.use_production.unwrap_or(false);
let renew_before_days = acme.renew_threshold_days.unwrap_or(30);
let store = CertStore::new(store_path);
let store = CertStore::new();
Some(CertManager::new(store, email, use_production, renew_before_days))
}
@@ -222,19 +228,6 @@ impl RustProxy {
info!("Starting RustProxy...");
// Load persisted certificates
if let Some(ref cm) = self.cert_manager {
let mut cm = cm.lock().await;
match cm.load_all() {
Ok(count) => {
if count > 0 {
info!("Loaded {} persisted certificates", count);
}
}
Err(e) => warn!("Failed to load persisted certificates: {}", e),
}
}
// Auto-provision certificates for routes with certificate: 'auto'
self.auto_provision_certificates().await;
@@ -259,6 +252,9 @@ impl RustProxy {
);
listener.set_connection_config(conn_config);
// Share the socket-handler relay path with the listener
listener.set_socket_handler_relay(Arc::clone(&self.socket_handler_relay));
// Extract TLS configurations from routes and cert manager
let mut tls_configs = Self::extract_tls_configs(&self.options.routes);
@@ -275,6 +271,13 @@ impl RustProxy {
}
}
// Merge dynamically loaded certs (from loadCertificate IPC)
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
if !tls_configs.is_empty() {
debug!("Loaded TLS certificates for {} domains", tls_configs.len());
listener.set_tls_configs(tls_configs);
@@ -289,6 +292,21 @@ impl RustProxy {
self.started = true;
self.started_at = Some(Instant::now());
// Start the throughput sampling task
let metrics = Arc::clone(&self.metrics);
let interval_ms = self.options.metrics.as_ref()
.and_then(|m| m.sample_interval_ms)
.unwrap_or(1000);
self.sampling_handle = Some(tokio::spawn(async move {
let mut interval = tokio::time::interval(
std::time::Duration::from_millis(interval_ms)
);
loop {
interval.tick().await;
metrics.sample_all();
}
}));
// Apply NFTables rules for routes using nftables forwarding engine
self.apply_nftables_rules(&self.options.routes.clone()).await;
@@ -393,9 +411,7 @@ impl RustProxy {
};
let mut cm = cm_arc.lock().await;
if let Err(e) = cm.load_static(domain.clone(), bundle) {
error!("Failed to store certificate for {}: {}", domain, e);
}
cm.load_static(domain.clone(), bundle);
info!("Certificate provisioned for {}", domain);
}
@@ -493,6 +509,11 @@ impl RustProxy {
info!("Stopping RustProxy...");
// Stop sampling task
if let Some(handle) = self.sampling_handle.take() {
handle.abort();
}
// Stop renewal timer
if let Some(handle) = self.renewal_handle.take() {
handle.abort();
@@ -565,6 +586,12 @@ impl RustProxy {
}
}
}
// Merge dynamically loaded certs (from loadCertificate IPC)
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
listener.set_tls_configs(tls_configs);
// Add new ports
@@ -729,14 +756,16 @@ impl RustProxy {
}
/// Set the Unix domain socket path for relaying socket-handler connections to TypeScript.
/// The path is shared with the TcpListenerManager via Arc<RwLock>, so updates
/// take effect immediately for all new connections.
pub fn set_socket_handler_relay_path(&mut self, path: Option<String>) {
info!("Socket handler relay path set to: {:?}", path);
self.socket_handler_relay_path = path;
*self.socket_handler_relay.write().unwrap() = path;
}
/// Get the current socket handler relay path.
pub fn get_socket_handler_relay_path(&self) -> Option<&str> {
self.socket_handler_relay_path.as_deref()
pub fn get_socket_handler_relay_path(&self) -> Option<String> {
self.socket_handler_relay.read().unwrap().clone()
}
/// Load a certificate for a domain and hot-swap the TLS configuration.
@@ -770,10 +799,15 @@ impl RustProxy {
};
let mut cm = cm_arc.lock().await;
cm.load_static(domain.to_string(), bundle)
.map_err(|e| anyhow::anyhow!("Failed to store certificate: {}", e))?;
cm.load_static(domain.to_string(), bundle);
}
// Persist in loaded_certs so future rebuild calls include this cert
self.loaded_certs.insert(domain.to_string(), TlsCertConfig {
cert_pem: cert_pem.clone(),
key_pem: key_pem.clone(),
});
// Hot-swap TLS config on the listener
if let Some(ref mut listener) = self.listener_manager {
let mut tls_configs = Self::extract_tls_configs(&self.options.routes);
@@ -797,6 +831,13 @@ impl RustProxy {
}
}
// Merge dynamically loaded certs from previous loadCertificate calls
for (d, c) in &self.loaded_certs {
if !tls_configs.contains_key(d) {
tls_configs.insert(d.clone(), c.clone());
}
}
listener.set_tls_configs(tls_configs);
}

View File

@@ -29,6 +29,11 @@ struct Cli {
#[tokio::main]
async fn main() -> Result<()> {
// Install the default CryptoProvider early, before any TLS or ACME code runs.
// This prevents panics from instant-acme/hyper-rustls calling ClientConfig::builder()
// before TLS listeners have started. Idempotent — later calls harmlessly return Err.
let _ = rustls::crypto::ring::default_provider().install_default();
let cli = Cli::parse();
// Initialize tracing - write to stderr so stdout is reserved for management IPC

View File

@@ -185,6 +185,76 @@ pub async fn wait_for_port(port: u16, timeout_ms: u64) -> bool {
false
}
/// Start a TLS HTTP echo backend: accepts TLS, then responds with HTTP JSON
/// containing request details. Combines TLS acceptance with HTTP echo behavior.
pub async fn start_tls_http_backend(
port: u16,
backend_name: &str,
cert_pem: &str,
key_pem: &str,
) -> JoinHandle<()> {
use std::sync::Arc;
let acceptor = rustproxy_passthrough::build_tls_acceptor(cert_pem, key_pem)
.expect("Failed to build TLS acceptor");
let acceptor = Arc::new(acceptor);
let name = backend_name.to_string();
let listener = TcpListener::bind(format!("127.0.0.1:{}", port))
.await
.unwrap_or_else(|_| panic!("Failed to bind TLS HTTP backend on port {}", port));
tokio::spawn(async move {
loop {
let (stream, _) = match listener.accept().await {
Ok(conn) => conn,
Err(_) => break,
};
let acc = acceptor.clone();
let backend = name.clone();
tokio::spawn(async move {
let mut tls_stream = match acc.accept(stream).await {
Ok(s) => s,
Err(_) => return,
};
let mut buf = vec![0u8; 16384];
let n = match tls_stream.read(&mut buf).await {
Ok(0) | Err(_) => return,
Ok(n) => n,
};
let req_str = String::from_utf8_lossy(&buf[..n]);
// Parse first line: METHOD PATH HTTP/x.x
let first_line = req_str.lines().next().unwrap_or("");
let parts: Vec<&str> = first_line.split_whitespace().collect();
let method = parts.first().copied().unwrap_or("UNKNOWN");
let path = parts.get(1).copied().unwrap_or("/");
// Extract Host header
let host = req_str
.lines()
.find(|l| l.to_lowercase().starts_with("host:"))
.map(|l| l[5..].trim())
.unwrap_or("unknown");
let body = format!(
r#"{{"method":"{}","path":"{}","host":"{}","backend":"{}"}}"#,
method, path, host, backend
);
let response = format!(
"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}",
body.len(),
body,
);
let _ = tls_stream.write_all(response.as_bytes()).await;
let _ = tls_stream.shutdown().await;
});
}
})
}
/// Helper to create a minimal route config for testing.
pub fn make_test_route(
port: u16,
@@ -201,6 +271,7 @@ pub fn make_test_route(
client_ip: None,
tls_version: None,
headers: None,
protocol: None,
},
action: rustproxy_config::RouteAction {
action_type: rustproxy_config::RouteActionType::Forward,
@@ -381,6 +452,86 @@ pub fn make_tls_terminate_route(
route
}
/// Start a TLS WebSocket echo backend: accepts TLS, performs WS handshake, then echoes data.
/// Combines TLS acceptance (like `start_tls_http_backend`) with WebSocket echo (like `start_ws_echo_backend`).
pub async fn start_tls_ws_echo_backend(
port: u16,
cert_pem: &str,
key_pem: &str,
) -> JoinHandle<()> {
use std::sync::Arc;
let acceptor = rustproxy_passthrough::build_tls_acceptor(cert_pem, key_pem)
.expect("Failed to build TLS acceptor");
let acceptor = Arc::new(acceptor);
let listener = TcpListener::bind(format!("127.0.0.1:{}", port))
.await
.unwrap_or_else(|_| panic!("Failed to bind TLS WS echo backend on port {}", port));
tokio::spawn(async move {
loop {
let (stream, _) = match listener.accept().await {
Ok(conn) => conn,
Err(_) => break,
};
let acc = acceptor.clone();
tokio::spawn(async move {
let mut tls_stream = match acc.accept(stream).await {
Ok(s) => s,
Err(_) => return,
};
// Read the HTTP upgrade request
let mut buf = vec![0u8; 4096];
let n = match tls_stream.read(&mut buf).await {
Ok(0) | Err(_) => return,
Ok(n) => n,
};
let req_str = String::from_utf8_lossy(&buf[..n]);
// Extract Sec-WebSocket-Key for handshake
let ws_key = req_str
.lines()
.find(|l| l.to_lowercase().starts_with("sec-websocket-key:"))
.map(|l| l.split(':').nth(1).unwrap_or("").trim().to_string())
.unwrap_or_default();
// Send 101 Switching Protocols
let accept_response = format!(
"HTTP/1.1 101 Switching Protocols\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Accept: {}\r\n\
\r\n",
ws_key
);
if tls_stream
.write_all(accept_response.as_bytes())
.await
.is_err()
{
return;
}
// Echo all data back (raw TCP after upgrade)
let mut echo_buf = vec![0u8; 65536];
loop {
let n = match tls_stream.read(&mut echo_buf).await {
Ok(0) | Err(_) => break,
Ok(n) => n,
};
if tls_stream.write_all(&echo_buf[..n]).await.is_err() {
break;
}
}
});
}
})
}
/// Helper to create a TLS passthrough route for testing.
pub fn make_tls_passthrough_route(
port: u16,

View File

@@ -407,6 +407,305 @@ async fn test_websocket_through_proxy() {
proxy.stop().await.unwrap();
}
/// Test that terminate-and-reencrypt mode routes HTTP traffic through the
/// full HTTP proxy with per-request Host-based routing.
///
/// This verifies the new behavior: after TLS termination, HTTP data is detected
/// and routed through HttpProxyService (like nginx) instead of being blindly tunneled.
#[tokio::test]
async fn test_terminate_and_reencrypt_http_routing() {
let backend1_port = next_port();
let backend2_port = next_port();
let proxy_port = next_port();
let (cert1, key1) = generate_self_signed_cert("alpha.example.com");
let (cert2, key2) = generate_self_signed_cert("beta.example.com");
// Generate separate backend certs (backends are independent TLS servers)
let (backend_cert1, backend_key1) = generate_self_signed_cert("localhost");
let (backend_cert2, backend_key2) = generate_self_signed_cert("localhost");
// Start TLS HTTP echo backends (proxy re-encrypts to these)
let _b1 = start_tls_http_backend(backend1_port, "alpha", &backend_cert1, &backend_key1).await;
let _b2 = start_tls_http_backend(backend2_port, "beta", &backend_cert2, &backend_key2).await;
// Create terminate-and-reencrypt routes
let mut route1 = make_tls_terminate_route(
proxy_port, "alpha.example.com", "127.0.0.1", backend1_port, &cert1, &key1,
);
route1.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let mut route2 = make_tls_terminate_route(
proxy_port, "beta.example.com", "127.0.0.1", backend2_port, &cert2, &key2,
);
route2.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let options = RustProxyOptions {
routes: vec![route1, route2],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
// Test alpha domain - HTTP request through TLS terminate-and-reencrypt
let alpha_result = with_timeout(async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector = tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name = rustls::pki_types::ServerName::try_from("alpha.example.com".to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
let request = "GET /api/data HTTP/1.1\r\nHost: alpha.example.com\r\nConnection: close\r\n\r\n";
tls_stream.write_all(request.as_bytes()).await.unwrap();
let mut response = Vec::new();
tls_stream.read_to_end(&mut response).await.unwrap();
String::from_utf8_lossy(&response).to_string()
}, 10)
.await
.unwrap();
let alpha_body = extract_body(&alpha_result);
assert!(
alpha_body.contains(r#""backend":"alpha"#),
"Expected alpha backend, got: {}",
alpha_body
);
assert!(
alpha_body.contains(r#""method":"GET"#),
"Expected GET method, got: {}",
alpha_body
);
assert!(
alpha_body.contains(r#""path":"/api/data"#),
"Expected /api/data path, got: {}",
alpha_body
);
// Verify original Host header is preserved (not replaced with backend IP:port)
assert!(
alpha_body.contains(r#""host":"alpha.example.com"#),
"Expected original Host header alpha.example.com, got: {}",
alpha_body
);
// Test beta domain - different host goes to different backend
let beta_result = with_timeout(async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector = tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name = rustls::pki_types::ServerName::try_from("beta.example.com".to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
let request = "GET /other HTTP/1.1\r\nHost: beta.example.com\r\nConnection: close\r\n\r\n";
tls_stream.write_all(request.as_bytes()).await.unwrap();
let mut response = Vec::new();
tls_stream.read_to_end(&mut response).await.unwrap();
String::from_utf8_lossy(&response).to_string()
}, 10)
.await
.unwrap();
let beta_body = extract_body(&beta_result);
assert!(
beta_body.contains(r#""backend":"beta"#),
"Expected beta backend, got: {}",
beta_body
);
assert!(
beta_body.contains(r#""path":"/other"#),
"Expected /other path, got: {}",
beta_body
);
// Verify original Host header is preserved for beta too
assert!(
beta_body.contains(r#""host":"beta.example.com"#),
"Expected original Host header beta.example.com, got: {}",
beta_body
);
proxy.stop().await.unwrap();
}
/// Test that WebSocket upgrade works through terminate-and-reencrypt mode.
///
/// Verifies the full chain: client→TLS→proxy terminates→re-encrypts→TLS→backend WebSocket.
/// The proxy's `handle_websocket_upgrade` checks `upstream.use_tls` and calls
/// `connect_tls_backend()` when true. This test covers that path.
#[tokio::test]
async fn test_terminate_and_reencrypt_websocket() {
let backend_port = next_port();
let proxy_port = next_port();
let domain = "ws.example.com";
// Frontend cert (client→proxy TLS)
let (frontend_cert, frontend_key) = generate_self_signed_cert(domain);
// Backend cert (proxy→backend TLS)
let (backend_cert, backend_key) = generate_self_signed_cert("localhost");
// Start TLS WebSocket echo backend
let _backend = start_tls_ws_echo_backend(backend_port, &backend_cert, &backend_key).await;
// Create terminate-and-reencrypt route
let mut route = make_tls_terminate_route(
proxy_port,
domain,
"127.0.0.1",
backend_port,
&frontend_cert,
&frontend_key,
);
route.action.tls.as_mut().unwrap().mode = rustproxy_config::TlsMode::TerminateAndReencrypt;
let options = RustProxyOptions {
routes: vec![route],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
let result = with_timeout(
async {
let _ = rustls::crypto::ring::default_provider().install_default();
let tls_config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(std::sync::Arc::new(InsecureVerifier))
.with_no_client_auth();
let connector =
tokio_rustls::TlsConnector::from(std::sync::Arc::new(tls_config));
let stream = tokio::net::TcpStream::connect(format!("127.0.0.1:{}", proxy_port))
.await
.unwrap();
let server_name =
rustls::pki_types::ServerName::try_from(domain.to_string()).unwrap();
let mut tls_stream = connector.connect(server_name, stream).await.unwrap();
// Send WebSocket upgrade request through TLS
let request = format!(
"GET /ws HTTP/1.1\r\n\
Host: {}\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n\
Sec-WebSocket-Version: 13\r\n\
\r\n",
domain
);
tls_stream.write_all(request.as_bytes()).await.unwrap();
// Read the 101 response (byte-by-byte until \r\n\r\n)
let mut response_buf = Vec::with_capacity(4096);
let mut temp = [0u8; 1];
loop {
let n = tls_stream.read(&mut temp).await.unwrap();
if n == 0 {
break;
}
response_buf.push(temp[0]);
if response_buf.len() >= 4 {
let len = response_buf.len();
if response_buf[len - 4..] == *b"\r\n\r\n" {
break;
}
}
}
let response_str = String::from_utf8_lossy(&response_buf).to_string();
assert!(
response_str.contains("101"),
"Expected 101 Switching Protocols, got: {}",
response_str
);
assert!(
response_str.to_lowercase().contains("upgrade: websocket"),
"Expected Upgrade header, got: {}",
response_str
);
// After upgrade, send data and verify echo
let test_data = b"Hello TLS WebSocket!";
tls_stream.write_all(test_data).await.unwrap();
// Read echoed data
let mut echo_buf = vec![0u8; 256];
let n = tls_stream.read(&mut echo_buf).await.unwrap();
let echoed = &echo_buf[..n];
assert_eq!(echoed, test_data, "Expected echo of sent data");
"ok".to_string()
},
10,
)
.await
.unwrap();
assert_eq!(result, "ok");
proxy.stop().await.unwrap();
}
/// Test that the protocol field on route config is accepted and processed.
#[tokio::test]
async fn test_protocol_field_in_route_config() {
let backend_port = next_port();
let proxy_port = next_port();
let _backend = start_http_echo_backend(backend_port, "main").await;
// Create a route with protocol: "http" - should only match HTTP traffic
let mut route = make_test_route(proxy_port, None, "127.0.0.1", backend_port);
route.route_match.protocol = Some("http".to_string());
let options = RustProxyOptions {
routes: vec![route],
..Default::default()
};
let mut proxy = RustProxy::new(options).unwrap();
proxy.start().await.unwrap();
assert!(wait_for_port(proxy_port, 2000).await);
// HTTP request should match the route and get proxied
let result = with_timeout(async {
let response = send_http_request(proxy_port, "example.com", "GET", "/test").await;
extract_body(&response).to_string()
}, 10)
.await
.unwrap();
assert!(
result.contains(r#""backend":"main"#),
"Expected main backend, got: {}",
result
);
assert!(
result.contains(r#""path":"/test"#),
"Expected /test path, got: {}",
result
);
proxy.stop().await.unwrap();
}
/// InsecureVerifier for test TLS client connections.
#[derive(Debug)]
struct InsecureVerifier;

View File

@@ -1,7 +1,31 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as plugins from '../ts/plugins.js';
import * as http from 'http';
import { SmartProxy, SocketHandlers } from '../ts/index.js';
/**
* Helper to make HTTP requests using Node's http module (unlike fetch/undici,
* http.request doesn't keep the event loop alive via a connection pool).
*/
function httpRequest(url: string, options: { method?: string; headers?: Record<string, string> } = {}): Promise<{ status: number; headers: http.IncomingHttpHeaders; body: string }> {
return new Promise((resolve, reject) => {
const parsed = new URL(url);
const req = http.request({
hostname: parsed.hostname,
port: parsed.port,
path: parsed.pathname + parsed.search,
method: options.method || 'GET',
headers: options.headers,
}, (res) => {
let body = '';
res.on('data', (chunk: Buffer) => { body += chunk.toString(); });
res.on('end', () => resolve({ status: res.statusCode!, headers: res.headers, body }));
});
req.on('error', reject);
req.end();
});
}
tap.test('should handle HTTP requests on port 80 for ACME challenges', async (tools) => {
tools.timeout(10000);
@@ -37,23 +61,14 @@ tap.test('should handle HTTP requests on port 80 for ACME challenges', async (to
const proxy = new SmartProxy(settings);
// Mock NFTables manager
(proxy as any).nftablesManager = {
ensureNFTablesSetup: async () => {},
stop: async () => {}
};
await proxy.start();
// Make an HTTP request to the challenge endpoint
const response = await fetch('http://localhost:18080/.well-known/acme-challenge/test-token', {
method: 'GET'
});
const response = await httpRequest('http://localhost:18080/.well-known/acme-challenge/test-token');
// Verify response
expect(response.status).toEqual(200);
const body = await response.text();
expect(body).toEqual('challenge-response-for-test-token');
expect(response.body).toEqual('challenge-response-for-test-token');
// Verify request was handled
expect(handledRequests.length).toEqual(1);
@@ -95,16 +110,10 @@ tap.test('should parse HTTP headers correctly', async (tools) => {
const proxy = new SmartProxy(settings);
// Mock NFTables manager
(proxy as any).nftablesManager = {
ensureNFTablesSetup: async () => {},
stop: async () => {}
};
await proxy.start();
// Make request with custom headers
const response = await fetch('http://localhost:18081/test', {
const response = await httpRequest('http://localhost:18081/test', {
method: 'POST',
headers: {
'X-Custom-Header': 'test-value',
@@ -113,7 +122,7 @@ tap.test('should parse HTTP headers correctly', async (tools) => {
});
expect(response.status).toEqual(200);
const body = await response.json();
const body = JSON.parse(response.body);
// Verify headers were parsed correctly
expect(capturedContext.headers['x-custom-header']).toEqual('test-value');

123
test/test.bun.ts Normal file
View File

@@ -0,0 +1,123 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import {
createHttpsTerminateRoute,
createCompleteHttpsServer,
createHttpRoute,
} from '../ts/proxies/smart-proxy/utils/route-helpers.js';
import {
mergeRouteConfigs,
cloneRoute,
routeMatchesPath,
} from '../ts/proxies/smart-proxy/utils/route-utils.js';
import {
validateRoutes,
validateRouteConfig,
} from '../ts/proxies/smart-proxy/utils/route-validator.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
tap.test('route creation - createHttpsTerminateRoute produces correct structure', async () => {
const route = createHttpsTerminateRoute('secure.example.com', { host: '127.0.0.1', port: 8443 });
expect(route).toHaveProperty('match');
expect(route).toHaveProperty('action');
expect(route.action.type).toEqual('forward');
expect(route.action.tls).toBeDefined();
expect(route.action.tls!.mode).toEqual('terminate');
expect(route.match.domains).toEqual('secure.example.com');
});
tap.test('route creation - createCompleteHttpsServer returns redirect and main route', async () => {
const routes = createCompleteHttpsServer('app.example.com', { host: '127.0.0.1', port: 3000 });
expect(routes).toBeArray();
expect(routes.length).toBeGreaterThanOrEqual(2);
// Should have an HTTP→HTTPS redirect and an HTTPS route
const hasRedirect = routes.some((r) => r.action.type === 'forward' && r.action.redirect !== undefined);
const hasHttps = routes.some((r) => r.action.tls?.mode === 'terminate');
expect(hasRedirect || hasHttps).toBeTrue();
});
tap.test('route validation - validateRoutes on a set of routes', async () => {
const routes: IRouteConfig[] = [
createHttpRoute('a.com', { host: '127.0.0.1', port: 3000 }),
createHttpRoute('b.com', { host: '127.0.0.1', port: 4000 }),
];
const result = validateRoutes(routes);
expect(result.valid).toBeTrue();
expect(result.errors).toHaveLength(0);
});
tap.test('route validation - validateRoutes catches invalid route in set', async () => {
const routes: any[] = [
createHttpRoute('valid.com', { host: '127.0.0.1', port: 3000 }),
{ match: { ports: 80 } }, // missing action
];
const result = validateRoutes(routes);
expect(result.valid).toBeFalse();
expect(result.errors.length).toBeGreaterThan(0);
});
tap.test('path matching - routeMatchesPath with exact path', async () => {
const route = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
route.match.path = '/api';
expect(routeMatchesPath(route, '/api')).toBeTrue();
expect(routeMatchesPath(route, '/other')).toBeFalse();
});
tap.test('path matching - route without path matches everything', async () => {
const route = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
// No path set, should match any path
expect(routeMatchesPath(route, '/anything')).toBeTrue();
expect(routeMatchesPath(route, '/')).toBeTrue();
});
tap.test('route merging - mergeRouteConfigs combines routes', async () => {
const base = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
base.priority = 10;
base.name = 'base-route';
const merged = mergeRouteConfigs(base, {
priority: 50,
name: 'merged-route',
});
expect(merged.priority).toEqual(50);
expect(merged.name).toEqual('merged-route');
// Original route fields should be preserved
expect(merged.match.domains).toEqual('example.com');
expect(merged.action.targets![0].host).toEqual('127.0.0.1');
});
tap.test('route merging - mergeRouteConfigs does not mutate original', async () => {
const base = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
base.name = 'original';
const merged = mergeRouteConfigs(base, { name: 'changed' });
expect(base.name).toEqual('original');
expect(merged.name).toEqual('changed');
});
tap.test('route cloning - cloneRoute produces independent copy', async () => {
const original = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
original.priority = 42;
original.name = 'original-route';
const cloned = cloneRoute(original);
// Should be equal in value
expect(cloned.match.domains).toEqual('example.com');
expect(cloned.priority).toEqual(42);
expect(cloned.name).toEqual('original-route');
expect(cloned.action.targets![0].host).toEqual('127.0.0.1');
expect(cloned.action.targets![0].port).toEqual(3000);
// Should be independent - modifying clone shouldn't affect original
cloned.name = 'cloned-route';
cloned.priority = 99;
expect(original.name).toEqual('original-route');
expect(original.priority).toEqual(42);
});
export default tap.start();

View File

@@ -84,17 +84,15 @@ tap.test('should forward TCP connections correctly', async () => {
socket.on('error', reject);
});
// Test data transmission
// Test data transmission - wait for welcome message first
await new Promise<void>((resolve) => {
client.on('data', (data) => {
client.once('data', (data) => {
const response = data.toString();
console.log('Received:', response);
expect(response).toContain('Connected to TCP test server');
client.end();
resolve();
});
client.write('Hello from client');
});
await smartProxy.stop();
@@ -146,15 +144,13 @@ tap.test('should handle TLS passthrough correctly', async () => {
// Test data transmission over TLS
await new Promise<void>((resolve) => {
client.on('data', (data) => {
client.once('data', (data) => {
const response = data.toString();
console.log('TLS Received:', response);
expect(response).toContain('Connected to TLS test server');
client.end();
resolve();
});
client.write('Hello from TLS client');
});
await smartProxy.stop();
@@ -222,15 +218,13 @@ tap.test('should handle SNI-based forwarding', async () => {
});
await new Promise<void>((resolve) => {
clientA.on('data', (data) => {
clientA.once('data', (data) => {
const response = data.toString();
console.log('Domain A response:', response);
expect(response).toContain('Connected to TLS test server');
clientA.end();
resolve();
});
clientA.write('Hello from domain A');
});
// Test domain B should also use TLS since it's on port 8443
@@ -251,7 +245,7 @@ tap.test('should handle SNI-based forwarding', async () => {
});
await new Promise<void>((resolve) => {
clientB.on('data', (data) => {
clientB.once('data', (data) => {
const response = data.toString();
console.log('Domain B response:', response);
// Should be forwarded to TLS server
@@ -259,8 +253,6 @@ tap.test('should handle SNI-based forwarding', async () => {
clientB.end();
resolve();
});
clientB.write('Hello from domain B');
});
await smartProxy.stop();

111
test/test.deno.ts Normal file
View File

@@ -0,0 +1,111 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import {
createHttpRoute,
createHttpsTerminateRoute,
createLoadBalancerRoute,
} from '../ts/proxies/smart-proxy/utils/route-helpers.js';
import {
findMatchingRoutes,
findBestMatchingRoute,
routeMatchesDomain,
routeMatchesPort,
routeMatchesPath,
} from '../ts/proxies/smart-proxy/utils/route-utils.js';
import {
validateRouteConfig,
isValidDomain,
isValidPort,
} from '../ts/proxies/smart-proxy/utils/route-validator.js';
import type { IRouteConfig } from '../ts/proxies/smart-proxy/models/route-types.js';
tap.test('route creation - createHttpRoute produces correct structure', async () => {
const route = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
expect(route).toHaveProperty('match');
expect(route).toHaveProperty('action');
expect(route.match.domains).toEqual('example.com');
expect(route.action.type).toEqual('forward');
expect(route.action.targets).toBeArray();
expect(route.action.targets![0].host).toEqual('127.0.0.1');
expect(route.action.targets![0].port).toEqual(3000);
});
tap.test('route creation - createHttpRoute with array of domains', async () => {
const route = createHttpRoute(['a.com', 'b.com'], { host: 'localhost', port: 8080 });
expect(route.match.domains).toEqual(['a.com', 'b.com']);
});
tap.test('route validation - validateRouteConfig accepts valid route', async () => {
const route = createHttpRoute('valid.example.com', { host: '10.0.0.1', port: 8080 });
const result = validateRouteConfig(route);
expect(result.valid).toBeTrue();
expect(result.errors).toHaveLength(0);
});
tap.test('route validation - validateRouteConfig rejects missing action', async () => {
const badRoute = { match: { ports: 80 } } as any;
const result = validateRouteConfig(badRoute);
expect(result.valid).toBeFalse();
expect(result.errors.length).toBeGreaterThan(0);
});
tap.test('route validation - isValidDomain checks correctly', async () => {
expect(isValidDomain('example.com')).toBeTrue();
expect(isValidDomain('*.example.com')).toBeTrue();
expect(isValidDomain('')).toBeFalse();
});
tap.test('route validation - isValidPort checks correctly', async () => {
expect(isValidPort(80)).toBeTrue();
expect(isValidPort(443)).toBeTrue();
expect(isValidPort(0)).toBeFalse();
expect(isValidPort(70000)).toBeFalse();
expect(isValidPort(-1)).toBeFalse();
});
tap.test('domain matching - exact domain', async () => {
const route = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
expect(routeMatchesDomain(route, 'example.com')).toBeTrue();
expect(routeMatchesDomain(route, 'other.com')).toBeFalse();
});
tap.test('domain matching - wildcard domain', async () => {
const route = createHttpRoute('*.example.com', { host: '127.0.0.1', port: 3000 });
expect(routeMatchesDomain(route, 'sub.example.com')).toBeTrue();
expect(routeMatchesDomain(route, 'example.com')).toBeFalse();
});
tap.test('port matching - single port', async () => {
const route = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
// createHttpRoute defaults to port 80
expect(routeMatchesPort(route, 80)).toBeTrue();
expect(routeMatchesPort(route, 443)).toBeFalse();
});
tap.test('route finding - findBestMatchingRoute selects by priority', async () => {
const lowPriority = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
lowPriority.priority = 10;
const highPriority = createHttpRoute('example.com', { host: '127.0.0.1', port: 4000 });
highPriority.priority = 100;
const routes: IRouteConfig[] = [lowPriority, highPriority];
const best = findBestMatchingRoute(routes, { domain: 'example.com', port: 80 });
expect(best).toBeDefined();
expect(best!.priority).toEqual(100);
expect(best!.action.targets![0].port).toEqual(4000);
});
tap.test('route finding - findMatchingRoutes returns all matches', async () => {
const route1 = createHttpRoute('example.com', { host: '127.0.0.1', port: 3000 });
const route2 = createHttpRoute('example.com', { host: '127.0.0.1', port: 4000 });
const route3 = createHttpRoute('other.com', { host: '127.0.0.1', port: 5000 });
const matches = findMatchingRoutes([route1, route2, route3], { domain: 'example.com', port: 80 });
expect(matches).toHaveLength(2);
});
export default tap.start();

View File

@@ -6,6 +6,9 @@ import { SmartProxy } from '../ts/index.js';
let testProxy: SmartProxy;
let targetServer: net.Server;
const ECHO_PORT = 47200;
const PROXY_PORT = 47201;
// Create a simple echo server as target
tap.test('setup test environment', async () => {
// Create target server that echoes data back
@@ -23,9 +26,13 @@ tap.test('setup test environment', async () => {
});
});
await new Promise<void>((resolve) => {
targetServer.listen(9876, () => {
console.log('Target server listening on port 9876');
await new Promise<void>((resolve, reject) => {
targetServer.on('error', (err) => {
console.error(`Echo server error: ${err.message}`);
reject(err);
});
targetServer.listen(ECHO_PORT, () => {
console.log(`Target server listening on port ${ECHO_PORT}`);
resolve();
});
});
@@ -35,13 +42,13 @@ tap.test('setup test environment', async () => {
routes: [{
name: 'tcp-forward-test',
match: {
ports: 8888 // Plain TCP port
ports: PROXY_PORT // Plain TCP port
},
action: {
type: 'forward',
targets: [{
host: 'localhost',
port: 9876
port: ECHO_PORT
}]
// No TLS configuration - just plain TCP forwarding
}
@@ -49,7 +56,7 @@ tap.test('setup test environment', async () => {
defaults: {
target: {
host: 'localhost',
port: 9876
port: ECHO_PORT
}
},
enableDetailedLogging: true,
@@ -64,7 +71,7 @@ tap.test('setup test environment', async () => {
});
tap.test('should keep WebSocket-like connection open for extended period', async (tools) => {
tools.timeout(60000); // 60 second test timeout
tools.timeout(15000); // 15 second test timeout
const client = new net.Socket();
let messagesReceived = 0;
@@ -72,7 +79,7 @@ tap.test('should keep WebSocket-like connection open for extended period', async
// Connect to proxy
await new Promise<void>((resolve, reject) => {
client.connect(8888, 'localhost', () => {
client.connect(PROXY_PORT, 'localhost', () => {
console.log('Client connected to proxy');
resolve();
});
@@ -99,19 +106,19 @@ tap.test('should keep WebSocket-like connection open for extended period', async
expect(messagesReceived).toEqual(1);
// Simulate WebSocket-like keep-alive pattern
// Send periodic messages over 60 seconds
// Send periodic messages over 5 seconds
const startTime = Date.now();
const pingInterval = setInterval(() => {
if (!connectionClosed && Date.now() - startTime < 60000) {
if (!connectionClosed && Date.now() - startTime < 5000) {
console.log('Sending ping...');
client.write('PING\n');
} else {
clearInterval(pingInterval);
}
}, 10000); // Every 10 seconds
}, 1000); // Every 1 second
// Wait for 55 seconds (must complete within 60s runner timeout)
await new Promise(resolve => setTimeout(resolve, 55000));
// Wait for 5 seconds — sufficient to verify the connection stays open
await new Promise(resolve => setTimeout(resolve, 5000));
// Clean up interval
clearInterval(pingInterval);
@@ -119,8 +126,8 @@ tap.test('should keep WebSocket-like connection open for extended period', async
// Connection should still be open
expect(connectionClosed).toEqual(false);
// Should have received responses (1 hello + 6 pings)
expect(messagesReceived).toBeGreaterThan(5);
// Should have received responses (1 hello + ~5 pings)
expect(messagesReceived).toBeGreaterThan(3);
// Close connection gracefully
client.end();

View File

@@ -5,8 +5,8 @@ import * as net from 'net';
let smartProxyInstance: SmartProxy;
let echoServer: net.Server;
const echoServerPort = 9876;
const proxyPort = 8080;
const echoServerPort = 47300;
const proxyPort = 47301;
// Create an echo server for testing
tap.test('should create echo server for testing', async () => {
@@ -16,7 +16,11 @@ tap.test('should create echo server for testing', async () => {
});
});
await new Promise<void>((resolve) => {
await new Promise<void>((resolve, reject) => {
echoServer.on('error', (err) => {
console.error(`Echo server error: ${err.message}`);
reject(err);
});
echoServer.listen(echoServerPort, () => {
console.log(`Echo server listening on port ${echoServerPort}`);
resolve();
@@ -27,28 +31,21 @@ tap.test('should create echo server for testing', async () => {
tap.test('should create SmartProxy instance with new metrics', async () => {
smartProxyInstance = new SmartProxy({
routes: [{
id: 'test-route', // id is needed for per-route metrics tracking in Rust
name: 'test-route',
match: {
ports: [proxyPort],
domains: '*'
ports: [proxyPort]
// No domains — port-only route uses fast-path (no data peeking)
},
action: {
type: 'forward',
targets: [{
host: 'localhost',
port: echoServerPort
}],
tls: {
mode: 'passthrough'
}
}]
// No TLS — plain TCP forwarding
}
}],
defaults: {
target: {
host: 'localhost',
port: echoServerPort
}
},
metrics: {
enabled: true,
sampleIntervalMs: 100, // Sample every 100ms for faster testing
@@ -86,12 +83,11 @@ tap.test('should verify new metrics API structure', async () => {
expect(metrics.throughput).toHaveProperty('byIP');
});
tap.test('should track throughput correctly', async (tools) => {
tap.test('should track active connections', async (tools) => {
const metrics = smartProxyInstance.getMetrics();
// Initial state - no connections yet
// Initial state - no connections
expect(metrics.connections.active()).toEqual(0);
expect(metrics.throughput.instant()).toEqual({ in: 0, out: 0 });
// Create a test connection
const client = new net.Socket();
@@ -101,21 +97,14 @@ tap.test('should track throughput correctly', async (tools) => {
console.log('Connected to proxy');
resolve();
});
client.on('error', reject);
});
// Send some data
// Send some data and wait for echo
const testData = Buffer.from('Hello, World!'.repeat(100)); // ~1.3KB
await new Promise<void>((resolve) => {
client.write(testData, () => {
console.log('Data sent');
resolve();
});
client.write(testData, () => resolve());
});
// Wait for echo response
await new Promise<void>((resolve) => {
client.once('data', (data) => {
console.log(`Received ${data.length} bytes back`);
@@ -123,49 +112,74 @@ tap.test('should track throughput correctly', async (tools) => {
});
});
// Wait for metrics to be sampled
await tools.delayFor(200);
// Wait for metrics to be polled
await tools.delayFor(500);
// Check metrics
// Active connection count should be 1
expect(metrics.connections.active()).toEqual(1);
expect(metrics.requests.total()).toBeGreaterThan(0);
// Total connections should be tracked
expect(metrics.connections.total()).toBeGreaterThan(0);
// Check throughput - should show bytes transferred
const instant = metrics.throughput.instant();
console.log('Instant throughput:', instant);
// Per-route tracking should show the connection
const byRoute = metrics.connections.byRoute();
console.log('Connections by route:', Array.from(byRoute.entries()));
expect(byRoute.get('test-route')).toEqual(1);
// Should have recorded some throughput
expect(instant.in).toBeGreaterThan(0);
expect(instant.out).toBeGreaterThan(0);
// Check totals
expect(metrics.totals.bytesIn()).toBeGreaterThan(0);
expect(metrics.totals.bytesOut()).toBeGreaterThan(0);
// Clean up
// Clean up - close the connection
client.destroy();
// Wait for connection cleanup with retry
for (let i = 0; i < 10; i++) {
// Wait for connection cleanup
for (let i = 0; i < 20; i++) {
await tools.delayFor(100);
if (metrics.connections.active() === 0) break;
}
// Verify connection was cleaned up
expect(metrics.connections.active()).toEqual(0);
});
tap.test('should track multiple connections and routes', async (tools) => {
tap.test('should track bytes after connection closes', async (tools) => {
const metrics = smartProxyInstance.getMetrics();
// Ensure we start with 0 connections
const initialActive = metrics.connections.active();
if (initialActive > 0) {
console.log(`Warning: Starting with ${initialActive} active connections, waiting for cleanup...`);
for (let i = 0; i < 10; i++) {
await tools.delayFor(100);
if (metrics.connections.active() === 0) break;
}
// Create a connection, send data, then close it
const client = new net.Socket();
await new Promise<void>((resolve, reject) => {
client.connect(proxyPort, 'localhost', () => resolve());
client.on('error', reject);
});
// Send some data
const testData = Buffer.from('Hello, World!'.repeat(100)); // ~1.3KB
await new Promise<void>((resolve) => {
client.write(testData, () => resolve());
});
// Wait for echo
await new Promise<void>((resolve) => {
client.once('data', () => resolve());
});
// Close the connection — Rust records bytes on connection close
client.destroy();
// Wait for connection to fully close and metrics to poll
for (let i = 0; i < 20; i++) {
await tools.delayFor(100);
if (metrics.connections.active() === 0 && metrics.totals.bytesIn() > 0) break;
}
// Now bytes should be recorded
console.log('Total bytes in:', metrics.totals.bytesIn());
console.log('Total bytes out:', metrics.totals.bytesOut());
expect(metrics.totals.bytesIn()).toBeGreaterThan(0);
expect(metrics.totals.bytesOut()).toBeGreaterThan(0);
});
tap.test('should track multiple connections', async (tools) => {
const metrics = smartProxyInstance.getMetrics();
// Ensure we start with 0 active connections
for (let i = 0; i < 20; i++) {
await tools.delayFor(100);
if (metrics.connections.active() === 0) break;
}
// Create multiple connections
@@ -174,61 +188,40 @@ tap.test('should track multiple connections and routes', async (tools) => {
for (let i = 0; i < connectionCount; i++) {
const client = new net.Socket();
await new Promise<void>((resolve, reject) => {
client.connect(proxyPort, 'localhost', () => {
resolve();
});
client.connect(proxyPort, 'localhost', () => resolve());
client.on('error', reject);
});
clients.push(client);
}
// Allow connections to be fully established and tracked
await tools.delayFor(100);
// Allow connections to be fully established and metrics polled
await tools.delayFor(500);
// Verify active connections
console.log('Active connections:', metrics.connections.active());
expect(metrics.connections.active()).toEqual(connectionCount);
// Send data on each connection
const dataPromises = clients.map((client, index) => {
return new Promise<void>((resolve) => {
const data = Buffer.from(`Connection ${index}: `.repeat(50));
client.write(data, () => {
client.once('data', () => resolve());
});
});
});
await Promise.all(dataPromises);
await tools.delayFor(200);
// Check metrics by route
// Per-route should track all connections
const routeConnections = metrics.connections.byRoute();
console.log('Connections by route:', Array.from(routeConnections.entries()));
expect(routeConnections.get('test-route')).toEqual(connectionCount);
// Check top IPs
const topIPs = metrics.connections.topIPs(5);
console.log('Top IPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].count).toEqual(connectionCount);
// Clean up all connections
clients.forEach(client => client.destroy());
await tools.delayFor(100);
for (let i = 0; i < 20; i++) {
await tools.delayFor(100);
if (metrics.connections.active() === 0) break;
}
expect(metrics.connections.active()).toEqual(0);
});
tap.test('should provide throughput history', async (tools) => {
tap.test('should provide throughput data', async (tools) => {
const metrics = smartProxyInstance.getMetrics();
// Create a connection and send data periodically
const client = new net.Socket();
await new Promise<void>((resolve, reject) => {
client.connect(proxyPort, 'localhost', () => resolve());
client.on('error', reject);
@@ -241,17 +234,16 @@ tap.test('should provide throughput history', async (tools) => {
await tools.delayFor(100);
}
// Get throughput history
const history = metrics.throughput.history(2); // Last 2 seconds
console.log('Throughput history entries:', history.length);
console.log('Sample history entry:', history[0]);
// Close connection so bytes are recorded
client.destroy();
expect(history.length).toBeGreaterThan(0);
expect(history[0]).toHaveProperty('timestamp');
expect(history[0]).toHaveProperty('in');
expect(history[0]).toHaveProperty('out');
// Wait for metrics to update
for (let i = 0; i < 20; i++) {
await tools.delayFor(100);
if (metrics.totals.bytesIn() > 0) break;
}
// Verify different time windows show different rates
// Verify different time windows are available (all return same data from Rust for now)
const instant = metrics.throughput.instant();
const recent = metrics.throughput.recent();
const average = metrics.throughput.average();
@@ -261,8 +253,9 @@ tap.test('should provide throughput history', async (tools) => {
console.log(' Recent (10s):', recent);
console.log(' Average (60s):', average);
// Clean up
client.destroy();
// Total bytes should have accumulated
expect(metrics.totals.bytesIn()).toBeGreaterThan(0);
expect(metrics.totals.bytesOut()).toBeGreaterThan(0);
});
tap.test('should clean up resources', async () => {

View File

@@ -5,19 +5,27 @@ import { SmartProxy } from '../ts/proxies/smart-proxy/smart-proxy.js';
let echoServer: net.Server;
let proxy: SmartProxy;
const ECHO_PORT = 47400;
const PROXY_PORT_1 = 47401;
const PROXY_PORT_2 = 47402;
tap.test('port forwarding should not immediately close connections', async (tools) => {
// Set a timeout for this test
tools.timeout(10000); // 10 seconds
// Create an echo server
echoServer = await new Promise<net.Server>((resolve) => {
echoServer = await new Promise<net.Server>((resolve, reject) => {
const server = net.createServer((socket) => {
socket.on('data', (data) => {
socket.write(`ECHO: ${data}`);
});
});
server.listen(8888, () => {
console.log('Echo server listening on port 8888');
server.on('error', (err) => {
console.error(`Echo server error: ${err.message}`);
reject(err);
});
server.listen(ECHO_PORT, () => {
console.log(`Echo server listening on port ${ECHO_PORT}`);
resolve(server);
});
});
@@ -26,10 +34,10 @@ tap.test('port forwarding should not immediately close connections', async (tool
proxy = new SmartProxy({
routes: [{
name: 'test-forward',
match: { ports: 9999 },
match: { ports: PROXY_PORT_1 },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: 8888 }]
targets: [{ host: 'localhost', port: ECHO_PORT }]
}
}]
});
@@ -37,7 +45,7 @@ tap.test('port forwarding should not immediately close connections', async (tool
await proxy.start();
// Test connection through proxy
const client = net.createConnection(9999, 'localhost');
const client = net.createConnection(PROXY_PORT_1, 'localhost');
const result = await new Promise<string>((resolve, reject) => {
client.on('data', (data) => {
@@ -52,6 +60,9 @@ tap.test('port forwarding should not immediately close connections', async (tool
});
expect(result).toEqual('ECHO: Hello');
// Stop proxy from test 1 before test 2 reassigns the variable
await proxy.stop();
});
tap.test('TLS passthrough should work correctly', async () => {
@@ -59,7 +70,7 @@ tap.test('TLS passthrough should work correctly', async () => {
proxy = new SmartProxy({
routes: [{
name: 'tls-test',
match: { ports: 8443, domains: 'test.example.com' },
match: { ports: PROXY_PORT_2, domains: 'test.example.com' },
action: {
type: 'forward',
tls: { mode: 'passthrough' },
@@ -85,16 +96,6 @@ tap.test('cleanup', async () => {
});
});
}
if (proxy) {
await proxy.stop();
console.log('Proxy stopped');
}
});
export default tap.start().then(() => {
// Force exit after tests complete
setTimeout(() => {
console.log('Forcing process exit');
process.exit(0);
}, 1000);
});
export default tap.start();

View File

@@ -562,4 +562,168 @@ tap.test('Route Integration - Combining Multiple Route Types', async () => {
}
});
// --------------------------------- Protocol Match Field Tests ---------------------------------
tap.test('Routes: Should accept protocol field on route match', async () => {
// Create a route with protocol: 'http'
const httpOnlyRoute: IRouteConfig = {
match: {
ports: 443,
domains: 'api.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 8080 }],
tls: {
mode: 'terminate',
certificate: 'auto',
},
},
name: 'HTTP-only Route',
};
// Validate the route - protocol field should not cause errors
const validation = validateRouteConfig(httpOnlyRoute);
expect(validation.valid).toBeTrue();
// Verify the protocol field is preserved
expect(httpOnlyRoute.match.protocol).toEqual('http');
});
tap.test('Routes: Should accept protocol tcp on route match', async () => {
// Create a route with protocol: 'tcp'
const tcpOnlyRoute: IRouteConfig = {
match: {
ports: 443,
domains: 'db.example.com',
protocol: 'tcp',
},
action: {
type: 'forward',
targets: [{ host: 'db-server', port: 5432 }],
tls: {
mode: 'passthrough',
},
},
name: 'TCP-only Route',
};
const validation = validateRouteConfig(tcpOnlyRoute);
expect(validation.valid).toBeTrue();
expect(tcpOnlyRoute.match.protocol).toEqual('tcp');
});
tap.test('Routes: Protocol field should work with terminate-and-reencrypt', async () => {
// Create a terminate-and-reencrypt route that only accepts HTTP
const reencryptRoute = createHttpsTerminateRoute(
'secure.example.com',
{ host: 'backend', port: 443 },
{ reencrypt: true, certificate: 'auto', name: 'Reencrypt HTTP Route' }
);
// Set protocol restriction to http
reencryptRoute.match.protocol = 'http';
// Validate the route
const validation = validateRouteConfig(reencryptRoute);
expect(validation.valid).toBeTrue();
// Verify TLS mode
expect(reencryptRoute.action.tls?.mode).toEqual('terminate-and-reencrypt');
// Verify protocol field is preserved
expect(reencryptRoute.match.protocol).toEqual('http');
});
tap.test('Routes: Protocol field should not affect domain/port matching', async () => {
// Routes with and without protocol field should both match the same domain/port
const routeWithProtocol: IRouteConfig = {
match: {
ports: 443,
domains: 'example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 8080 }],
tls: { mode: 'terminate', certificate: 'auto' },
},
name: 'With Protocol',
priority: 10,
};
const routeWithoutProtocol: IRouteConfig = {
match: {
ports: 443,
domains: 'example.com',
},
action: {
type: 'forward',
targets: [{ host: 'fallback', port: 8081 }],
tls: { mode: 'terminate', certificate: 'auto' },
},
name: 'Without Protocol',
priority: 5,
};
const routes = [routeWithProtocol, routeWithoutProtocol];
// Both routes should match the domain/port (protocol is a hint for Rust-side matching)
const matches = findMatchingRoutes(routes, { domain: 'example.com', port: 443 });
expect(matches.length).toEqual(2);
// The one with higher priority should be first
const best = findBestMatchingRoute(routes, { domain: 'example.com', port: 443 });
expect(best).not.toBeUndefined();
expect(best!.name).toEqual('With Protocol');
});
tap.test('Routes: Protocol field preserved through route cloning', async () => {
const original: IRouteConfig = {
match: {
ports: 8443,
domains: 'clone-test.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 3000 }],
tls: { mode: 'terminate-and-reencrypt', certificate: 'auto' },
},
name: 'Clone Test',
};
const cloned = cloneRoute(original);
// Verify protocol is preserved in clone
expect(cloned.match.protocol).toEqual('http');
expect(cloned.action.tls?.mode).toEqual('terminate-and-reencrypt');
// Modify clone should not affect original
cloned.match.protocol = 'tcp';
expect(original.match.protocol).toEqual('http');
});
tap.test('Routes: Protocol field preserved through route merging', async () => {
const base: IRouteConfig = {
match: {
ports: 443,
domains: 'merge-test.example.com',
protocol: 'http',
},
action: {
type: 'forward',
targets: [{ host: 'backend', port: 3000 }],
tls: { mode: 'terminate-and-reencrypt', certificate: 'auto' },
},
name: 'Merge Base',
};
// Merge with override that changes name but not protocol
const merged = mergeRouteConfigs(base, { name: 'Merged Route' });
expect(merged.match.protocol).toEqual('http');
expect(merged.name).toEqual('Merged Route');
});
export default tap.start();

View File

@@ -200,10 +200,11 @@ tap.test('route security with block list should work', async () => {
client.connect(9993, '127.0.0.1');
});
// Should connect then be immediately closed by security
// Connection should be blocked by security - either closed or error
expect(events).toContain('connected');
expect(events).toContain('closed');
expect(result).toEqual('closed');
// Rust drops the stream immediately; client may see 'closed', 'error', or both
const wasBlocked = result === 'closed' || result === 'error';
expect(wasBlocked).toEqual(true);
expect(targetServerConnections).toEqual(0);
// Clean up

View File

@@ -13,11 +13,11 @@ tap.test('route security should be correctly configured', async () => {
targets: [{
host: '127.0.0.1',
port: 8991
}],
security: {
ipAllowList: ['192.168.1.1'],
ipBlockList: ['10.0.0.1']
}
}]
},
security: {
ipAllowList: ['192.168.1.1'],
ipBlockList: ['10.0.0.1']
}
}];
@@ -30,32 +30,9 @@ tap.test('route security should be correctly configured', async () => {
// The proxy should be created successfully
expect(proxy).toBeInstanceOf(smartproxy.SmartProxy);
// Test that security manager exists and has the isIPAuthorized method
const securityManager = (proxy as any).securityManager;
expect(securityManager).toBeDefined();
expect(typeof securityManager.isIPAuthorized).toEqual('function');
// Test IP authorization logic directly
const isLocalhostAllowed = securityManager.isIPAuthorized(
'127.0.0.1',
['192.168.1.1'], // Allow list
[] // Block list
);
expect(isLocalhostAllowed).toBeFalse();
const isAllowedIPAllowed = securityManager.isIPAuthorized(
'192.168.1.1',
['192.168.1.1'], // Allow list
[] // Block list
);
expect(isAllowedIPAllowed).toBeTrue();
const isBlockedIPAllowed = securityManager.isIPAuthorized(
'10.0.0.1',
['0.0.0.0/0'], // Allow all
['10.0.0.1'] // But block this specific IP
);
expect(isBlockedIPAllowed).toBeFalse();
// Verify route configuration was preserved
expect(proxy.settings.routes[0].security?.ipAllowList).toContain('192.168.1.1');
expect(proxy.settings.routes[0].security?.ipBlockList).toContain('10.0.0.1');
});
export default tap.start();

View File

@@ -94,7 +94,7 @@ tap.test('setup port proxy test environment', async () => {
tap.test('should start port proxy', async () => {
await smartProxy.start();
// Check if the proxy is listening by verifying the ports are active
expect(smartProxy.getListeningPorts().length).toBeGreaterThan(0);
expect((await smartProxy.getListeningPorts()).length).toBeGreaterThan(0);
});
// Test basic TCP forwarding.
@@ -237,7 +237,7 @@ tap.test('should handle connection timeouts', async () => {
tap.test('should stop port proxy', async () => {
await smartProxy.stop();
// Verify that there are no listening ports after stopping
expect(smartProxy.getListeningPorts().length).toEqual(0);
expect((await smartProxy.getListeningPorts()).length).toEqual(0);
// Remove from tracking
const index = allProxies.indexOf(smartProxy);

699
test/test.throughput.ts Normal file
View File

@@ -0,0 +1,699 @@
import { expect, tap } from '@git.zone/tstest/tapbundle';
import { SmartProxy } from '../ts/index.js';
import type { IRouteConfig } from '../ts/index.js';
import * as net from 'net';
import * as http from 'http';
import * as tls from 'tls';
import * as https from 'https';
import * as fs from 'fs';
import * as path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// ────────────────────────────────────────────────────────────────────────────
// Port assignments (unique to avoid conflicts with other tests)
// ────────────────────────────────────────────────────────────────────────────
const TCP_ECHO_PORT = 47500;
const HTTP_ECHO_PORT = 47501;
const TLS_ECHO_PORT = 47502;
const PROXY_TCP_PORT = 47510;
const PROXY_HTTP_PORT = 47511;
const PROXY_TLS_PASS_PORT = 47512;
const PROXY_TLS_TERM_PORT = 47513;
const PROXY_SOCKET_PORT = 47514;
const PROXY_MULTI_A_PORT = 47515;
const PROXY_MULTI_B_PORT = 47516;
const PROXY_TP_HTTP_PORT = 47517;
// ────────────────────────────────────────────────────────────────────────────
// Test certificates
// ────────────────────────────────────────────────────────────────────────────
const CERT_PEM = fs.readFileSync(path.join(__dirname, '..', 'assets', 'certs', 'cert.pem'), 'utf8');
const KEY_PEM = fs.readFileSync(path.join(__dirname, '..', 'assets', 'certs', 'key.pem'), 'utf8');
// ────────────────────────────────────────────────────────────────────────────
// Backend servers
// ────────────────────────────────────────────────────────────────────────────
let tcpEchoServer: net.Server;
let httpEchoServer: http.Server;
let tlsEchoServer: tls.Server;
// Helper: force-poll the metrics adapter
async function pollMetrics(proxy: SmartProxy): Promise<void> {
await (proxy as any).metricsAdapter.poll();
}
// ════════════════════════════════════════════════════════════════════════════
// Setup: backend servers
// ════════════════════════════════════════════════════════════════════════════
tap.test('setup - TCP echo server', async () => {
tcpEchoServer = net.createServer((socket) => {
socket.on('data', (data) => socket.write(data));
socket.on('error', () => {});
});
await new Promise<void>((resolve) => {
tcpEchoServer.listen(TCP_ECHO_PORT, () => {
console.log(`TCP echo server on port ${TCP_ECHO_PORT}`);
resolve();
});
});
});
tap.test('setup - HTTP echo server', async () => {
httpEchoServer = http.createServer((req, res) => {
let body = '';
req.on('data', (chunk) => (body += chunk));
req.on('end', () => {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end(`echo:${body}`);
});
});
await new Promise<void>((resolve) => {
httpEchoServer.listen(HTTP_ECHO_PORT, () => {
console.log(`HTTP echo server on port ${HTTP_ECHO_PORT}`);
resolve();
});
});
});
tap.test('setup - TLS echo server', async () => {
tlsEchoServer = tls.createServer(
{ cert: CERT_PEM, key: KEY_PEM },
(socket) => {
socket.on('data', (data) => socket.write(data));
socket.on('error', () => {});
},
);
await new Promise<void>((resolve) => {
tlsEchoServer.listen(TLS_ECHO_PORT, () => {
console.log(`TLS echo server on port ${TLS_ECHO_PORT}`);
resolve();
});
});
});
// ════════════════════════════════════════════════════════════════════════════
// Group 1: TCP Forward (plain TCP passthrough — no domain, no TLS)
// ════════════════════════════════════════════════════════════════════════════
tap.test('TCP forward - real-time byte tracking', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'tcp-forward',
name: 'tcp-forward',
match: { ports: PROXY_TCP_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: TCP_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
// Connect and send data
const client = new net.Socket();
await new Promise<void>((resolve, reject) => {
client.connect(PROXY_TCP_PORT, 'localhost', () => resolve());
client.on('error', reject);
});
let received = 0;
client.on('data', (data) => (received += data.length));
// Send 10 KB in chunks over 1 second
const chunk = Buffer.alloc(1024, 'A');
for (let i = 0; i < 10; i++) {
client.write(chunk);
await tools.delayFor(100);
}
// Wait for echo data and sampling to accumulate
await tools.delayFor(500);
// === Key assertion: metrics visible WHILE the connection is still open ===
// Before this change, TCP bytes were only reported after connection close.
// Now bytes are reported per-chunk in real-time.
await pollMetrics(proxy);
const mDuring = proxy.getMetrics();
const bytesInDuring = mDuring.totals.bytesIn();
const bytesOutDuring = mDuring.totals.bytesOut();
console.log(`TCP forward (during) — bytesIn: ${bytesInDuring}, bytesOut: ${bytesOutDuring}`);
expect(bytesInDuring).toBeGreaterThan(0);
expect(bytesOutDuring).toBeGreaterThan(0);
// Check that throughput is non-zero during active TCP traffic
const tpDuring = mDuring.throughput.recent();
console.log(`TCP forward (during) — recent throughput: in=${tpDuring.in}, out=${tpDuring.out}`);
expect(tpDuring.in + tpDuring.out).toBeGreaterThan(0);
// Close connection
client.destroy();
await tools.delayFor(500);
// Final check
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
const bytesOut = m.totals.bytesOut();
console.log(`TCP forward (final) — bytesIn: ${bytesIn}, bytesOut: ${bytesOut}`);
expect(bytesIn).toBeGreaterThanOrEqual(bytesInDuring);
expect(bytesOut).toBeGreaterThanOrEqual(bytesOutDuring);
// Check per-route tracking
const byRoute = m.throughput.byRoute();
console.log('TCP forward — throughput byRoute:', Array.from(byRoute.entries()));
// ── v25.2.0: Per-IP tracking (TCP connections) ──
const byIP = m.connections.byIP();
console.log('TCP forward — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = m.connections.topIPs(10);
console.log('TCP forward — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
// ── v25.2.0: Throughput history ──
const history = m.throughput.history(10);
console.log('TCP forward — throughput history length:', history.length);
expect(history.length).toBeGreaterThan(0);
expect(history[0].timestamp).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 2: HTTP Forward (plain HTTP proxy)
// ════════════════════════════════════════════════════════════════════════════
tap.test('HTTP forward - byte totals tracking', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'http-forward',
name: 'http-forward',
match: { ports: PROXY_HTTP_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: HTTP_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
await tools.delayFor(300);
// Send 10 HTTP requests with 1 KB body each
for (let i = 0; i < 10; i++) {
const body = 'X'.repeat(1024);
await new Promise<void>((resolve, reject) => {
const req = http.request(
{
hostname: 'localhost',
port: PROXY_HTTP_PORT,
path: '/echo',
method: 'POST',
headers: { 'Content-Type': 'text/plain', 'Content-Length': String(body.length) },
},
(res) => {
let data = '';
res.on('data', (chunk) => (data += chunk));
res.on('end', () => resolve());
},
);
req.on('error', reject);
req.setTimeout(5000, () => {
req.destroy();
reject(new Error('HTTP request timeout'));
});
req.end(body);
});
}
// Wait for sampling + poll
await tools.delayFor(500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
const bytesOut = m.totals.bytesOut();
console.log(`HTTP forward — bytesIn: ${bytesIn}, bytesOut: ${bytesOut}`);
// Both directions should have bytes (CountingBody tracks request + response)
expect(bytesIn).toBeGreaterThan(0);
expect(bytesOut).toBeGreaterThan(0);
// ── v25.2.0: Per-IP tracking (HTTP connections) ──
const byIP = m.connections.byIP();
console.log('HTTP forward — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = m.connections.topIPs(10);
console.log('HTTP forward — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
// ── v25.2.0: HTTP request counting ──
const totalReqs = m.requests.total();
const rps = m.requests.perSecond();
console.log(`HTTP forward — requests total: ${totalReqs}, perSecond: ${rps}`);
expect(totalReqs).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 3: TLS Passthrough (SNI-based, Rust passes encrypted data through)
// ════════════════════════════════════════════════════════════════════════════
tap.test('TLS passthrough - byte totals tracking', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'tls-passthrough',
name: 'tls-passthrough',
match: { ports: PROXY_TLS_PASS_PORT, domains: 'localhost' },
action: {
type: 'forward',
tls: { mode: 'passthrough' },
targets: [{ host: 'localhost', port: TLS_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
await tools.delayFor(300);
// Connect via TLS through the proxy (SNI: localhost)
const tlsClient = tls.connect(
{
host: 'localhost',
port: PROXY_TLS_PASS_PORT,
servername: 'localhost',
rejectUnauthorized: false,
},
);
await new Promise<void>((resolve, reject) => {
tlsClient.on('secureConnect', () => resolve());
tlsClient.on('error', reject);
});
// Send some data
const data = Buffer.alloc(2048, 'B');
tlsClient.write(data);
// Wait for echo
let received = 0;
tlsClient.on('data', (chunk) => (received += chunk.length));
await tools.delayFor(1000);
console.log(`TLS passthrough — received ${received} bytes back`);
expect(received).toBeGreaterThan(0);
tlsClient.destroy();
await tools.delayFor(500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
const bytesOut = m.totals.bytesOut();
console.log(`TLS passthrough — bytesIn: ${bytesIn}, bytesOut: ${bytesOut}`);
// TLS passthrough tracks encrypted bytes flowing through
expect(bytesIn).toBeGreaterThan(0);
expect(bytesOut).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 4: TLS Terminate + HTTP (Rust terminates TLS, forwards to HTTP backend)
// ════════════════════════════════════════════════════════════════════════════
tap.test('TLS terminate + HTTP forward - byte totals tracking', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'tls-terminate',
name: 'tls-terminate',
match: { ports: PROXY_TLS_TERM_PORT, domains: 'localhost' },
action: {
type: 'forward',
tls: {
mode: 'terminate',
certificate: {
cert: CERT_PEM,
key: KEY_PEM,
},
},
targets: [{ host: 'localhost', port: HTTP_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
disableDefaultCert: true,
});
await proxy.start();
await tools.delayFor(300);
// Send HTTPS request through the proxy
const body = 'Z'.repeat(2048);
await new Promise<void>((resolve, reject) => {
const req = https.request(
{
hostname: 'localhost',
port: PROXY_TLS_TERM_PORT,
path: '/echo',
method: 'POST',
headers: { 'Content-Type': 'text/plain', 'Content-Length': String(body.length) },
rejectUnauthorized: false,
},
(res) => {
let data = '';
res.on('data', (chunk) => (data += chunk));
res.on('end', () => {
console.log(`TLS terminate — response: ${data.slice(0, 50)}...`);
resolve();
});
},
);
req.on('error', reject);
req.setTimeout(5000, () => {
req.destroy();
reject(new Error('HTTPS request timeout'));
});
req.end(body);
});
await tools.delayFor(500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
const bytesOut = m.totals.bytesOut();
console.log(`TLS terminate — bytesIn: ${bytesIn}, bytesOut: ${bytesOut}`);
// TLS terminate: request body (bytesIn) and response body (bytesOut) via CountingBody
expect(bytesIn).toBeGreaterThan(0);
expect(bytesOut).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 5: Socket Handler (JS callback handling)
// ════════════════════════════════════════════════════════════════════════════
tap.test('Socket handler - byte totals tracking', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'socket-handler',
name: 'socket-handler',
match: { ports: PROXY_SOCKET_PORT },
action: {
type: 'socket-handler',
socketHandler: (socket, _context) => {
socket.on('data', (data) => socket.write(data)); // echo
socket.on('error', () => {});
},
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
await tools.delayFor(300);
// Connect and send data
const client = new net.Socket();
await new Promise<void>((resolve, reject) => {
client.connect(PROXY_SOCKET_PORT, 'localhost', () => resolve());
client.on('error', reject);
});
const data = Buffer.alloc(4096, 'C');
client.write(data);
let received = 0;
client.on('data', (chunk) => (received += chunk.length));
await tools.delayFor(500);
console.log(`Socket handler — received ${received} bytes back`);
client.destroy();
await tools.delayFor(500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
const bytesIn = m.totals.bytesIn();
const bytesOut = m.totals.bytesOut();
console.log(`Socket handler — bytesIn: ${bytesIn}, bytesOut: ${bytesOut}`);
// Socket handler relay now records bytes after copy_bidirectional completes
expect(bytesIn).toBeGreaterThan(0);
expect(bytesOut).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 6: Multi-route throughput isolation
// ════════════════════════════════════════════════════════════════════════════
tap.test('Multi-route throughput isolation', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'route-alpha',
name: 'route-alpha',
match: { ports: PROXY_MULTI_A_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: TCP_ECHO_PORT }],
},
},
{
id: 'route-beta',
name: 'route-beta',
match: { ports: PROXY_MULTI_B_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: TCP_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
await tools.delayFor(300);
// Send different amounts to each route
// Route alpha: 8 KB
const clientA = new net.Socket();
await new Promise<void>((resolve, reject) => {
clientA.connect(PROXY_MULTI_A_PORT, 'localhost', () => resolve());
clientA.on('error', reject);
});
clientA.on('data', () => {}); // drain
for (let i = 0; i < 8; i++) {
clientA.write(Buffer.alloc(1024, 'A'));
await tools.delayFor(50);
}
// Route beta: 2 KB
const clientB = new net.Socket();
await new Promise<void>((resolve, reject) => {
clientB.connect(PROXY_MULTI_B_PORT, 'localhost', () => resolve());
clientB.on('error', reject);
});
clientB.on('data', () => {}); // drain
for (let i = 0; i < 2; i++) {
clientB.write(Buffer.alloc(1024, 'B'));
await tools.delayFor(50);
}
await tools.delayFor(500);
// Close both
clientA.destroy();
clientB.destroy();
await tools.delayFor(500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
// Check per-route throughput exists for both
const byRoute = m.throughput.byRoute();
console.log('Multi-route — throughput byRoute:', Array.from(byRoute.entries()));
// Check per-route connection counts
const connByRoute = m.connections.byRoute();
console.log('Multi-route — connections byRoute:', Array.from(connByRoute.entries()));
// Both routes should have tracked data
const totalIn = m.totals.bytesIn();
const totalOut = m.totals.bytesOut();
console.log(`Multi-route — total bytesIn: ${totalIn}, bytesOut: ${totalOut}`);
expect(totalIn).toBeGreaterThan(0);
expect(totalOut).toBeGreaterThan(0);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Group 7: Throughput sampling over time (HTTP-based for real-time tracking)
//
// Uses HTTP proxy path where CountingBody reports bytes incrementally
// as each request/response body completes. This allows the sampling task
// to capture non-zero throughput during active traffic.
// ════════════════════════════════════════════════════════════════════════════
tap.test('Throughput sampling - values appear during active HTTP traffic', async (tools) => {
const proxy = new SmartProxy({
routes: [
{
id: 'sampling-test',
name: 'sampling-test',
match: { ports: PROXY_TP_HTTP_PORT },
action: {
type: 'forward',
targets: [{ host: 'localhost', port: HTTP_ECHO_PORT }],
},
},
],
metrics: { enabled: true, sampleIntervalMs: 100, retentionSeconds: 60 },
});
await proxy.start();
await tools.delayFor(300);
// Send HTTP requests continuously for ~2 seconds
let sending = true;
let requestCount = 0;
const sendLoop = (async () => {
while (sending) {
const body = 'D'.repeat(5120); // 5 KB per request
try {
await new Promise<void>((resolve, reject) => {
const req = http.request(
{
hostname: 'localhost',
port: PROXY_TP_HTTP_PORT,
path: '/echo',
method: 'POST',
headers: { 'Content-Type': 'text/plain', 'Content-Length': String(body.length) },
},
(res) => {
res.on('data', () => {});
res.on('end', () => resolve());
},
);
req.on('error', reject);
req.setTimeout(3000, () => {
req.destroy();
reject(new Error('timeout'));
});
req.end(body);
});
requestCount++;
} catch {
// Ignore errors during shutdown
break;
}
}
})();
// After 1.5 seconds of active traffic, check throughput
await tools.delayFor(1500);
await pollMetrics(proxy);
const m = proxy.getMetrics();
const tp = m.throughput.instant();
const totalIn = m.totals.bytesIn();
const totalOut = m.totals.bytesOut();
console.log(`Sampling test — after 1.5s of traffic: instant in=${tp.in}, out=${tp.out}`);
console.log(`Sampling test — totals: bytesIn=${totalIn}, bytesOut=${totalOut}, requests=${requestCount}`);
// Totals should definitely be non-zero after 1.5s of HTTP requests
expect(totalIn + totalOut).toBeGreaterThan(0);
// Throughput instant should be non-zero during active traffic.
// The sampling interval is 100ms, so we've had ~15 samples by now.
// Each sample captures bytes from completed HTTP request/response bodies.
// Note: this can occasionally be 0 if sample boundaries don't align, so we
// also check that at least the throughput was non-zero for *some* recent window.
const tpRecent = m.throughput.recent();
console.log(`Sampling test — recent throughput: in=${tpRecent.in}, out=${tpRecent.out}`);
expect(tpRecent.in + tpRecent.out).toBeGreaterThan(0);
// ── v25.2.0: Per-IP tracking ──
const byIP = m.connections.byIP();
console.log('Sampling test — connections byIP:', Array.from(byIP.entries()));
expect(byIP.size).toBeGreaterThan(0);
const topIPs = m.connections.topIPs(10);
console.log('Sampling test — topIPs:', topIPs);
expect(topIPs.length).toBeGreaterThan(0);
expect(topIPs[0].ip).toBeTruthy();
expect(topIPs[0].count).toBeGreaterThanOrEqual(0);
// ── v25.2.0: Throughput history ──
const history = m.throughput.history(10);
console.log(`Sampling test — throughput history: ${history.length} points`);
if (history.length > 0) {
console.log(' first:', history[0], 'last:', history[history.length - 1]);
}
expect(history.length).toBeGreaterThan(0);
expect(history[0].timestamp).toBeGreaterThan(0);
// ── v25.2.0: Per-IP throughput ──
const tpByIP = m.throughput.byIP();
console.log('Sampling test — throughput byIP:', Array.from(tpByIP.entries()));
// ── v25.2.0: HTTP request counting ──
const totalReqs = m.requests.total();
const rps = m.requests.perSecond();
const rpm = m.requests.perMinute();
console.log(`Sampling test — HTTP requests: total=${totalReqs}, perSecond=${rps}, perMinute=${rpm}`);
expect(totalReqs).toBeGreaterThan(0);
// Stop sending
sending = false;
await sendLoop;
// After traffic stops, wait for metrics to settle
await tools.delayFor(500);
await pollMetrics(proxy);
const mAfter = proxy.getMetrics();
const tpAfter = mAfter.throughput.instant();
console.log(`Sampling test — after traffic stops: instant in=${tpAfter.in}, out=${tpAfter.out}`);
await proxy.stop();
await tools.delayFor(200);
});
// ════════════════════════════════════════════════════════════════════════════
// Cleanup
// ════════════════════════════════════════════════════════════════════════════
tap.test('cleanup - close backend servers', async () => {
await new Promise<void>((resolve) => tcpEchoServer.close(() => resolve()));
await new Promise<void>((resolve) => httpEchoServer.close(() => resolve()));
await new Promise<void>((resolve) => tlsEchoServer.close(() => resolve()));
console.log('All backend servers closed');
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@push.rocks/smartproxy',
version: '22.5.0',
version: '25.7.2',
description: 'A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.'
}

View File

@@ -85,7 +85,6 @@ export interface IAcmeOptions {
renewThresholdDays?: number; // Days before expiry to renew certificates
renewCheckIntervalHours?: number; // How often to check for renewals (in hours)
autoRenew?: boolean; // Whether to automatically renew certificates
certificateStore?: string; // Directory to store certificates
skipConfiguredCerts?: boolean; // Skip domains with existing certificates
domainForwards?: IDomainForwardConfig[]; // Domain-specific forwarding configs
}

View File

@@ -1,4 +1,4 @@
import * as net from 'net';
import * as net from 'node:net';
import { WrappedSocket } from './wrapped-socket.js';
/**

View File

@@ -1,7 +1,7 @@
import { LifecycleComponent } from './lifecycle-component.js';
import { BinaryHeap } from './binary-heap.js';
import { AsyncMutex } from './async-utils.js';
import { EventEmitter } from 'events';
import { EventEmitter } from 'node:events';
/**
* Interface for pooled connection

View File

@@ -3,7 +3,7 @@
* Provides standardized socket cleanup with proper listener and timer management
*/
import type { Socket } from 'net';
import type { Socket } from 'node:net';
export type SocketTracked = {
cleanup: () => void;

View File

@@ -2,16 +2,13 @@
* SmartProxy main module exports
*/
// NFTables proxy exports
export * from './proxies/nftables-proxy/index.js';
// Export SmartProxy elements
export { SmartProxy } from './proxies/smart-proxy/index.js';
export { SharedRouteManager as RouteManager } from './core/routing/route-manager.js';
// Export smart-proxy models
export type { ISmartProxyOptions, IConnectionRecord, IRouteConfig, IRouteMatch, IRouteAction, IRouteTls, IRouteContext } from './proxies/smart-proxy/models/index.js';
export type { TSmartProxyCertProvisionObject } from './proxies/smart-proxy/models/interfaces.js';
export type { TSmartProxyCertProvisionObject, ICertProvisionEventComms, ICertificateIssuedEvent, ICertificateFailedEvent } from './proxies/smart-proxy/models/interfaces.js';
export * from './proxies/smart-proxy/utils/index.js';
// Original: export * from './smartproxy/classes.pp.snihandler.js'

View File

@@ -1,15 +1,14 @@
// node native scope
import { EventEmitter } from 'events';
import * as fs from 'fs';
import * as http from 'http';
import * as https from 'https';
import * as net from 'net';
import * as path from 'path';
import * as tls from 'tls';
import * as url from 'url';
import * as http2 from 'http2';
import { EventEmitter } from 'node:events';
import * as fs from 'node:fs';
import * as http from 'node:http';
import * as net from 'node:net';
import * as path from 'node:path';
import * as tls from 'node:tls';
import * as url from 'node:url';
import * as http2 from 'node:http2';
export { EventEmitter, fs, http, https, net, path, tls, url, http2 };
export { EventEmitter, fs, http, net, path, tls, url, http2 };
// tsclass scope
import * as tsclass from '@tsclass/tsclass';
@@ -17,42 +16,19 @@ import * as tsclass from '@tsclass/tsclass';
export { tsclass };
// pushrocks scope
import * as lik from '@push.rocks/lik';
import * as smartdelay from '@push.rocks/smartdelay';
import * as smartpromise from '@push.rocks/smartpromise';
import * as smartrequest from '@push.rocks/smartrequest';
import * as smartstring from '@push.rocks/smartstring';
import * as smartfile from '@push.rocks/smartfile';
import * as smartcrypto from '@push.rocks/smartcrypto';
import * as smartacme from '@push.rocks/smartacme';
import * as smartacmePlugins from '@push.rocks/smartacme/dist_ts/smartacme.plugins.js';
import * as smartacmeHandlers from '@push.rocks/smartacme/dist_ts/handlers/index.js';
import * as smartlog from '@push.rocks/smartlog';
import * as smartlogDestinationLocal from '@push.rocks/smartlog/destination-local';
import * as taskbuffer from '@push.rocks/taskbuffer';
import * as smartrx from '@push.rocks/smartrx';
import * as smartrust from '@push.rocks/smartrust';
export {
lik,
smartdelay,
smartrequest,
smartpromise,
smartstring,
smartfile,
smartcrypto,
smartacme,
smartacmePlugins,
smartacmeHandlers,
smartlog,
smartlogDestinationLocal,
taskbuffer,
smartrx,
smartrust,
};
// third party scope
import prettyMs from 'pretty-ms';
import * as ws from 'ws';
import wsDefault from 'ws';
import { minimatch } from 'minimatch';
export { prettyMs, ws, wsDefault, minimatch };
export { minimatch };

View File

@@ -5,7 +5,7 @@
* that may span multiple TCP packets.
*/
import { Buffer } from 'buffer';
import { Buffer } from 'node:buffer';
/**
* Fragment tracking information
@@ -49,6 +49,10 @@ export class FragmentHandler {
() => this.cleanup(),
options.cleanupInterval
);
// Don't let this timer prevent process exit
if (this.cleanupTimer.unref) {
this.cleanupTimer.unref();
}
}
}

View File

@@ -1,4 +1,4 @@
import { Buffer } from 'buffer';
import { Buffer } from 'node:buffer';
import {
TlsRecordType,
TlsHandshakeType,

View File

@@ -1,4 +1,4 @@
import { Buffer } from 'buffer';
import { Buffer } from 'node:buffer';
import { TlsExtensionType, TlsUtils } from '../utils/tls-utils.js';
import {
ClientHelloParser,

View File

@@ -2,7 +2,7 @@
* WebSocket Protocol Utilities
*/
import * as crypto from 'crypto';
import * as crypto from 'node:crypto';
import { WEBSOCKET_MAGIC_STRING } from './constants.js';
import type { RawData } from './types.js';

View File

@@ -8,6 +8,3 @@ export { SharedRouteManager as SmartProxyRouteManager } from '../core/routing/ro
export * from './smart-proxy/utils/index.js';
// Export smart-proxy models except IAcmeOptions
export type { ISmartProxyOptions, IConnectionRecord, IRouteConfig, IRouteMatch, IRouteAction, IRouteTls, IRouteContext } from './smart-proxy/models/index.js';
// Export NFTables proxy (no conflicts)
export * from './nftables-proxy/index.js';

View File

@@ -1,6 +0,0 @@
/**
* NfTablesProxy implementation
*/
export * from './nftables-proxy.js';
export * from './models/index.js';
export * from './utils/index.js';

View File

@@ -1,30 +0,0 @@
/**
* Custom error classes for better error handling
*/
export class NftBaseError extends Error {
constructor(message: string) {
super(message);
this.name = 'NftBaseError';
}
}
export class NftValidationError extends NftBaseError {
constructor(message: string) {
super(message);
this.name = 'NftValidationError';
}
}
export class NftExecutionError extends NftBaseError {
constructor(message: string) {
super(message);
this.name = 'NftExecutionError';
}
}
export class NftResourceError extends NftBaseError {
constructor(message: string) {
super(message);
this.name = 'NftResourceError';
}
}

View File

@@ -1,5 +0,0 @@
/**
* Export all models
*/
export * from './interfaces.js';
export * from './errors.js';

View File

@@ -1,94 +0,0 @@
/**
* Interfaces for NfTablesProxy
*/
/**
* Represents a port range for forwarding
*/
export interface PortRange {
from: number;
to: number;
}
// Legacy interface name for backward compatibility
export type IPortRange = PortRange;
/**
* Settings for NfTablesProxy.
*/
export interface NfTableProxyOptions {
// Basic settings
fromPort: number | PortRange | Array<number | PortRange>; // Support single port, port range, or multiple ports/ranges
toPort: number | PortRange | Array<number | PortRange>;
toHost?: string; // Target host for proxying; defaults to 'localhost'
// Advanced settings
preserveSourceIP?: boolean; // If true, the original source IP is preserved
deleteOnExit?: boolean; // If true, clean up rules before process exit
protocol?: 'tcp' | 'udp' | 'all'; // Protocol to forward, defaults to 'tcp'
enableLogging?: boolean; // Enable detailed logging
ipv6Support?: boolean; // Enable IPv6 support
logFormat?: 'plain' | 'json'; // Format for logs
// Source filtering
ipAllowList?: string[]; // If provided, only these IPs are allowed
ipBlockList?: string[]; // If provided, these IPs are blocked
useIPSets?: boolean; // Use nftables sets for efficient IP management
// Rule management
forceCleanSlate?: boolean; // Clear all NfTablesProxy rules before starting
tableName?: string; // Custom table name (defaults to 'portproxy')
// Connection management
maxRetries?: number; // Maximum number of retries for failed commands
retryDelayMs?: number; // Delay between retries in milliseconds
useAdvancedNAT?: boolean; // Use connection tracking for stateful NAT
// Quality of Service
qos?: {
enabled: boolean;
maxRate?: string; // e.g. "10mbps"
priority?: number; // 1 (highest) to 10 (lowest)
markConnections?: boolean; // Mark connections for easier management
};
// Integration with PortProxy/NetworkProxy
netProxyIntegration?: {
enabled: boolean;
redirectLocalhost?: boolean; // Redirect localhost traffic to NetworkProxy
sslTerminationPort?: number; // Port where NetworkProxy handles SSL termination
};
}
// Legacy interface name for backward compatibility
export type INfTableProxySettings = NfTableProxyOptions;
/**
* Interface for status reporting
*/
export interface NfTablesStatus {
active: boolean;
ruleCount: {
total: number;
added: number;
verified: number;
};
tablesConfigured: { family: string; tableName: string }[];
metrics: {
forwardedConnections?: number;
activeConnections?: number;
bytesForwarded?: {
sent: number;
received: number;
};
};
qosEnabled?: boolean;
ipSetsConfigured?: {
name: string;
elementCount: number;
type: string;
}[];
}
// Legacy interface name for backward compatibility
export type INfTablesStatus = NfTablesStatus;

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +0,0 @@
/**
* NFTables Proxy Utilities
*
* This module exports utility functions and classes for NFTables operations.
*/
// Command execution
export { NftCommandExecutor } from './nft-command-executor.js';
export type { INftLoggerFn, INftExecutorOptions } from './nft-command-executor.js';
// Port specification normalization
export {
normalizePortSpec,
validatePorts,
formatPortRange,
portSpecToNftExpr,
rangesOverlap,
mergeOverlappingRanges,
countPorts,
isPortInSpec
} from './nft-port-spec-normalizer.js';
// Rule validation
export {
isValidIP,
isValidIPv4,
isValidIPv6,
isValidHostname,
isValidTableName,
isValidRate,
validateIPs,
validateHost,
validateTableName,
validateQosSettings,
validateSettings,
isIPForFamily,
filterIPsByFamily
} from './nft-rule-validator.js';

View File

@@ -1,162 +0,0 @@
/**
* NFTables Command Executor
*
* Handles command execution with retry logic, temp file management,
* and error handling for nftables operations.
*/
import { exec, execSync } from 'child_process';
import { promisify } from 'util';
import { delay } from '../../../core/utils/async-utils.js';
import { AsyncFileSystem } from '../../../core/utils/fs-utils.js';
import { NftExecutionError } from '../models/index.js';
const execAsync = promisify(exec);
export interface INftLoggerFn {
(level: 'info' | 'warn' | 'error' | 'debug', message: string, data?: Record<string, any>): void;
}
export interface INftExecutorOptions {
maxRetries?: number;
retryDelayMs?: number;
tempFilePath?: string;
}
/**
* NFTables command executor with retry logic and temp file support
*/
export class NftCommandExecutor {
private static readonly NFT_CMD = 'nft';
private maxRetries: number;
private retryDelayMs: number;
private tempFilePath: string;
constructor(
private log: INftLoggerFn,
options: INftExecutorOptions = {}
) {
this.maxRetries = options.maxRetries || 3;
this.retryDelayMs = options.retryDelayMs || 1000;
this.tempFilePath = options.tempFilePath || `/tmp/nft-rules-${Date.now()}.nft`;
}
/**
* Execute a command with retry capability
*/
async executeWithRetry(command: string, maxRetries?: number, retryDelayMs?: number): Promise<string> {
const retries = maxRetries ?? this.maxRetries;
const delayMs = retryDelayMs ?? this.retryDelayMs;
let lastError: Error | undefined;
for (let i = 0; i < retries; i++) {
try {
const { stdout } = await execAsync(command);
return stdout;
} catch (err) {
lastError = err as Error;
this.log('warn', `Command failed (attempt ${i+1}/${retries}): ${command}`, { error: lastError.message });
// Wait before retry, unless it's the last attempt
if (i < retries - 1) {
await delay(delayMs);
}
}
}
throw new NftExecutionError(`Failed after ${retries} attempts: ${lastError?.message || 'Unknown error'}`);
}
/**
* Execute system command synchronously (single attempt, no retry)
* Used only for exit handlers where the process is terminating anyway.
*/
executeSync(command: string): string {
try {
return execSync(command, { timeout: 5000 }).toString();
} catch (err) {
this.log('warn', `Sync command failed: ${command}`, { error: (err as Error).message });
throw err;
}
}
/**
* Execute nftables commands with a temporary file
*/
async executeWithTempFile(rulesetContent: string): Promise<void> {
await AsyncFileSystem.writeFile(this.tempFilePath, rulesetContent);
try {
await this.executeWithRetry(
`${NftCommandExecutor.NFT_CMD} -f ${this.tempFilePath}`,
this.maxRetries,
this.retryDelayMs
);
} finally {
// Always clean up the temp file
await AsyncFileSystem.remove(this.tempFilePath);
}
}
/**
* Check if nftables is available
*/
async checkAvailability(): Promise<boolean> {
try {
await this.executeWithRetry(`${NftCommandExecutor.NFT_CMD} --version`, this.maxRetries, this.retryDelayMs);
return true;
} catch (err) {
this.log('error', `nftables is not available: ${(err as Error).message}`);
return false;
}
}
/**
* Check if connection tracking modules are loaded
*/
async checkConntrackModules(): Promise<boolean> {
try {
await this.executeWithRetry('lsmod | grep nf_conntrack', this.maxRetries, this.retryDelayMs);
return true;
} catch (err) {
this.log('warn', 'Connection tracking modules might not be loaded, advanced NAT features may not work');
return false;
}
}
/**
* Run an nft command directly
*/
async nft(args: string): Promise<string> {
return this.executeWithRetry(`${NftCommandExecutor.NFT_CMD} ${args}`, this.maxRetries, this.retryDelayMs);
}
/**
* Run an nft command synchronously (for cleanup on exit)
*/
nftSync(args: string): string {
return this.executeSync(`${NftCommandExecutor.NFT_CMD} ${args}`);
}
/**
* Get the NFT command path
*/
static get nftCmd(): string {
return NftCommandExecutor.NFT_CMD;
}
/**
* Update the temp file path
*/
setTempFilePath(path: string): void {
this.tempFilePath = path;
}
/**
* Update retry settings
*/
setRetryOptions(maxRetries: number, retryDelayMs: number): void {
this.maxRetries = maxRetries;
this.retryDelayMs = retryDelayMs;
}
}

View File

@@ -1,125 +0,0 @@
/**
* NFTables Port Specification Normalizer
*
* Handles normalization and validation of port specifications
* for nftables rules.
*/
import type { PortRange } from '../models/index.js';
import { NftValidationError } from '../models/index.js';
/**
* Normalizes port specifications into an array of port ranges
*/
export function normalizePortSpec(portSpec: number | PortRange | Array<number | PortRange>): PortRange[] {
const result: PortRange[] = [];
if (Array.isArray(portSpec)) {
// If it's an array, process each element
for (const spec of portSpec) {
result.push(...normalizePortSpec(spec));
}
} else if (typeof portSpec === 'number') {
// Single port becomes a range with the same start and end
result.push({ from: portSpec, to: portSpec });
} else {
// Already a range
result.push(portSpec);
}
return result;
}
/**
* Validates port numbers or ranges
*/
export function validatePorts(port: number | PortRange | Array<number | PortRange>): void {
if (Array.isArray(port)) {
port.forEach(p => validatePorts(p));
return;
}
if (typeof port === 'number') {
if (port < 1 || port > 65535) {
throw new NftValidationError(`Invalid port number: ${port}`);
}
} else if (typeof port === 'object') {
if (port.from < 1 || port.from > 65535 || port.to < 1 || port.to > 65535 || port.from > port.to) {
throw new NftValidationError(`Invalid port range: ${port.from}-${port.to}`);
}
}
}
/**
* Format port range for nftables rule
*/
export function formatPortRange(range: PortRange): string {
if (range.from === range.to) {
return String(range.from);
}
return `${range.from}-${range.to}`;
}
/**
* Convert port spec to nftables expression
*/
export function portSpecToNftExpr(portSpec: number | PortRange | Array<number | PortRange>): string {
const ranges = normalizePortSpec(portSpec);
if (ranges.length === 1) {
return formatPortRange(ranges[0]);
}
// Multiple ports/ranges need to use a set
const ports = ranges.map(formatPortRange);
return `{ ${ports.join(', ')} }`;
}
/**
* Check if two port ranges overlap
*/
export function rangesOverlap(range1: PortRange, range2: PortRange): boolean {
return range1.from <= range2.to && range2.from <= range1.to;
}
/**
* Merge overlapping port ranges
*/
export function mergeOverlappingRanges(ranges: PortRange[]): PortRange[] {
if (ranges.length <= 1) return ranges;
// Sort by start port
const sorted = [...ranges].sort((a, b) => a.from - b.from);
const merged: PortRange[] = [sorted[0]];
for (let i = 1; i < sorted.length; i++) {
const current = sorted[i];
const lastMerged = merged[merged.length - 1];
if (current.from <= lastMerged.to + 1) {
// Ranges overlap or are adjacent, merge them
lastMerged.to = Math.max(lastMerged.to, current.to);
} else {
// No overlap, add as new range
merged.push(current);
}
}
return merged;
}
/**
* Calculate the total number of ports in a port specification
*/
export function countPorts(portSpec: number | PortRange | Array<number | PortRange>): number {
const ranges = normalizePortSpec(portSpec);
return ranges.reduce((total, range) => total + (range.to - range.from + 1), 0);
}
/**
* Check if a port is within the given specification
*/
export function isPortInSpec(port: number, portSpec: number | PortRange | Array<number | PortRange>): boolean {
const ranges = normalizePortSpec(portSpec);
return ranges.some(range => port >= range.from && port <= range.to);
}

View File

@@ -1,156 +0,0 @@
/**
* NFTables Rule Validator
*
* Handles validation of settings and inputs for nftables operations.
* Prevents command injection and ensures valid values.
*/
import type { PortRange, NfTableProxyOptions } from '../models/index.js';
import { NftValidationError } from '../models/index.js';
import { validatePorts } from './nft-port-spec-normalizer.js';
// IP address validation patterns
const IPV4_REGEX = /^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))?$/;
const IPV6_REGEX = /^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$/;
const HOSTNAME_REGEX = /^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$/;
const TABLE_NAME_REGEX = /^[a-zA-Z0-9_]+$/;
const RATE_REGEX = /^[0-9]+[kKmMgG]?bps$/;
/**
* Validates an IP address (IPv4 or IPv6)
*/
export function isValidIP(ip: string): boolean {
return IPV4_REGEX.test(ip) || IPV6_REGEX.test(ip);
}
/**
* Validates an IPv4 address
*/
export function isValidIPv4(ip: string): boolean {
return IPV4_REGEX.test(ip);
}
/**
* Validates an IPv6 address
*/
export function isValidIPv6(ip: string): boolean {
return IPV6_REGEX.test(ip);
}
/**
* Validates a hostname
*/
export function isValidHostname(hostname: string): boolean {
return HOSTNAME_REGEX.test(hostname);
}
/**
* Validates a table name for nftables
*/
export function isValidTableName(tableName: string): boolean {
return TABLE_NAME_REGEX.test(tableName);
}
/**
* Validates a rate specification (e.g., "10mbps")
*/
export function isValidRate(rate: string): boolean {
return RATE_REGEX.test(rate);
}
/**
* Validates an array of IP addresses
*/
export function validateIPs(ips?: string[]): void {
if (!ips) return;
for (const ip of ips) {
if (!isValidIP(ip)) {
throw new NftValidationError(`Invalid IP address format: ${ip}`);
}
}
}
/**
* Validates a host (can be hostname or IP)
*/
export function validateHost(host?: string): void {
if (!host) return;
if (!isValidHostname(host) && !isValidIP(host)) {
throw new NftValidationError(`Invalid host format: ${host}`);
}
}
/**
* Validates a table name
*/
export function validateTableName(tableName?: string): void {
if (!tableName) return;
if (!isValidTableName(tableName)) {
throw new NftValidationError(
`Invalid table name: ${tableName}. Only alphanumeric characters and underscores are allowed.`
);
}
}
/**
* Validates QoS settings
*/
export function validateQosSettings(qos?: NfTableProxyOptions['qos']): void {
if (!qos?.enabled) return;
if (qos.maxRate && !isValidRate(qos.maxRate)) {
throw new NftValidationError(
`Invalid rate format: ${qos.maxRate}. Use format like "10mbps", "1gbps", etc.`
);
}
if (qos.priority !== undefined) {
if (qos.priority < 1 || qos.priority > 10 || !Number.isInteger(qos.priority)) {
throw new NftValidationError(
`Invalid priority: ${qos.priority}. Must be an integer between 1 and 10.`
);
}
}
}
/**
* Validates all NfTablesProxy settings
*/
export function validateSettings(settings: NfTableProxyOptions): void {
// Validate port numbers
validatePorts(settings.fromPort);
validatePorts(settings.toPort);
// Validate IP addresses
validateIPs(settings.ipAllowList);
validateIPs(settings.ipBlockList);
// Validate target host
validateHost(settings.toHost);
// Validate table name
validateTableName(settings.tableName);
// Validate QoS settings
validateQosSettings(settings.qos);
}
/**
* Check if an IP matches the given family (ip or ip6)
*/
export function isIPForFamily(ip: string, family: 'ip' | 'ip6'): boolean {
if (family === 'ip6') {
return ip.includes(':');
}
return ip.includes('.');
}
/**
* Filter IPs by family
*/
export function filterIPsByFamily(ips: string[], family: 'ip' | 'ip6'): string[] {
return ips.filter(ip => isIPForFamily(ip, family));
}

View File

@@ -2,6 +2,6 @@
* SmartProxy models
*/
// Export everything except IAcmeOptions from interfaces
export type { ISmartProxyOptions, IConnectionRecord, TSmartProxyCertProvisionObject } from './interfaces.js';
export type { ISmartProxyOptions, ISmartProxyCertStore, IConnectionRecord, TSmartProxyCertProvisionObject, ICertProvisionEventComms, ICertificateIssuedEvent, ICertificateFailedEvent } from './interfaces.js';
export * from './route-types.js';
export * from './metrics-types.js';

View File

@@ -10,11 +10,23 @@ export interface IAcmeOptions {
useProduction?: boolean; // Use Let's Encrypt production (default: false)
renewThresholdDays?: number; // Days before expiry to renew (default: 30)
autoRenew?: boolean; // Enable automatic renewal (default: true)
certificateStore?: string; // Directory to store certificates (default: './certs')
skipConfiguredCerts?: boolean;
renewCheckIntervalHours?: number; // How often to check for renewals (default: 24)
routeForwards?: any[];
}
/**
* Consumer-provided certificate storage.
* SmartProxy never writes certs to disk — the consumer owns all persistence.
*/
export interface ISmartProxyCertStore {
/** Load all stored certs on startup (called once before cert provisioning) */
loadAll: () => Promise<Array<{ domain: string; publicKey: string; privateKey: string; ca?: string }>>;
/** Save a cert after successful provisioning */
save: (domain: string, publicKey: string, privateKey: string, ca?: string) => Promise<void>;
/** Remove a cert (optional) */
remove?: (domain: string) => Promise<void>;
}
import type { IRouteConfig } from './route-types.js';
/**
@@ -22,6 +34,38 @@ import type { IRouteConfig } from './route-types.js';
*/
export type TSmartProxyCertProvisionObject = plugins.tsclass.network.ICert | 'http01';
/**
* Communication channel passed as second argument to certProvisionFunction.
* Allows the callback to report metadata back to SmartProxy for event emission.
*/
export interface ICertProvisionEventComms {
/** Informational log */
log: (message: string) => void;
/** Warning (non-fatal) */
warn: (message: string) => void;
/** Error */
error: (message: string) => void;
/** Set the certificate expiry date (for the issued event) */
setExpiryDate: (date: Date) => void;
/** Set the source/method used for provisioning (e.g. 'smartacme-dns-01') */
setSource: (source: string) => void;
}
/** Payload for 'certificate-issued' and 'certificate-renewed' events */
export interface ICertificateIssuedEvent {
domain: string;
expiryDate?: string; // ISO 8601
source: string; // e.g. 'certProvisionFunction', 'smartacme-dns-01'
isRenewal?: boolean;
}
/** Payload for 'certificate-failed' event */
export interface ICertificateFailedEvent {
domain: string;
error: string;
source: string;
}
// Legacy options and type checking functions have been removed
/**
@@ -128,7 +172,7 @@ export interface ISmartProxyOptions {
* Optional certificate provider callback. Return 'http01' to use HTTP-01 challenges,
* or a static certificate object for immediate provisioning.
*/
certProvisionFunction?: (domain: string) => Promise<TSmartProxyCertProvisionObject>;
certProvisionFunction?: (domain: string, eventComms: ICertProvisionEventComms) => Promise<TSmartProxyCertProvisionObject>;
/**
* Whether to fallback to ACME if custom certificate provision fails.
@@ -136,6 +180,35 @@ export interface ISmartProxyOptions {
*/
certProvisionFallbackToAcme?: boolean;
/**
* Per-domain timeout in ms for certProvisionFunction calls.
* If a single domain's provisioning takes longer than this, it's aborted
* and a certificate-failed event is emitted.
* Default: 300000 (5 minutes)
*/
certProvisionTimeout?: number;
/**
* Maximum number of domains to provision certificates for concurrently.
* Prevents overwhelming ACME providers when many domains provision at once.
* Default: 4
*/
certProvisionConcurrency?: number;
/**
* Disable the default self-signed fallback certificate.
* When false (default), a self-signed cert is generated at startup and loaded
* as '*' so TLS handshakes never fail due to missing certs.
*/
disableDefaultCert?: boolean;
/**
* Consumer-provided cert storage. SmartProxy never writes certs to disk.
* On startup, loadAll() is called to pre-load persisted certs.
* After each successful cert provision, save() is called.
*/
certStore?: ISmartProxyCertStore;
/**
* Path to the RustProxy binary. If not set, the binary is located
* automatically via env var, platform package, local build, or PATH.

View File

@@ -1,6 +1,4 @@
import * as plugins from '../../../plugins.js';
// Certificate types removed - use local definition
import type { PortRange } from '../../../proxies/nftables-proxy/models/interfaces.js';
import type { IRouteContext } from '../../../core/models/route-context.js';
// Re-export IRouteContext for convenience
@@ -41,6 +39,7 @@ export interface IRouteMatch {
clientIp?: string[]; // Match specific client IPs
tlsVersion?: string[]; // Match specific TLS versions
headers?: Record<string, string | RegExp>; // Match specific HTTP headers
protocol?: 'http' | 'tcp'; // Match specific protocol (http includes h2 + websocket upgrades)
}

View File

@@ -1,112 +0,0 @@
import * as plugins from '../../plugins.js';
import { logger } from '../../core/utils/logger.js';
/**
* Locates the RustProxy binary using a priority-ordered search strategy:
* 1. SMARTPROXY_RUST_BINARY environment variable
* 2. Platform-specific optional npm package
* 3. Local development build at ./rust/target/release/rustproxy
* 4. System PATH
*/
export class RustBinaryLocator {
private cachedPath: string | null = null;
/**
* Find the RustProxy binary path.
* Returns null if no binary is available.
*/
public async findBinary(): Promise<string | null> {
if (this.cachedPath !== null) {
return this.cachedPath;
}
const path = await this.searchBinary();
this.cachedPath = path;
return path;
}
/**
* Clear the cached binary path (e.g., after a failed launch).
*/
public clearCache(): void {
this.cachedPath = null;
}
private async searchBinary(): Promise<string | null> {
// 1. Environment variable override
const envPath = process.env.SMARTPROXY_RUST_BINARY;
if (envPath) {
if (await this.isExecutable(envPath)) {
logger.log('info', `RustProxy binary found via SMARTPROXY_RUST_BINARY: ${envPath}`, { component: 'rust-locator' });
return envPath;
}
logger.log('warn', `SMARTPROXY_RUST_BINARY set but not executable: ${envPath}`, { component: 'rust-locator' });
}
// 2. Platform-specific optional npm package
const platformBinary = await this.findPlatformPackageBinary();
if (platformBinary) {
logger.log('info', `RustProxy binary found in platform package: ${platformBinary}`, { component: 'rust-locator' });
return platformBinary;
}
// 3. Local development build
const localPaths = [
plugins.path.resolve(process.cwd(), 'rust/target/release/rustproxy'),
plugins.path.resolve(process.cwd(), 'rust/target/debug/rustproxy'),
];
for (const localPath of localPaths) {
if (await this.isExecutable(localPath)) {
logger.log('info', `RustProxy binary found at local path: ${localPath}`, { component: 'rust-locator' });
return localPath;
}
}
// 4. System PATH
const systemPath = await this.findInPath('rustproxy');
if (systemPath) {
logger.log('info', `RustProxy binary found in system PATH: ${systemPath}`, { component: 'rust-locator' });
return systemPath;
}
logger.log('error', 'No RustProxy binary found. Set SMARTPROXY_RUST_BINARY, install the platform package, or build with: cd rust && cargo build --release', { component: 'rust-locator' });
return null;
}
private async findPlatformPackageBinary(): Promise<string | null> {
const platform = process.platform;
const arch = process.arch;
const packageName = `@push.rocks/smartproxy-${platform}-${arch}`;
try {
// Try to resolve the platform-specific package
const packagePath = require.resolve(`${packageName}/rustproxy`);
if (await this.isExecutable(packagePath)) {
return packagePath;
}
} catch {
// Package not installed - expected for development
}
return null;
}
private async isExecutable(filePath: string): Promise<boolean> {
try {
await plugins.fs.promises.access(filePath, plugins.fs.constants.X_OK);
return true;
} catch {
return false;
}
}
private async findInPath(binaryName: string): Promise<string | null> {
const pathDirs = (process.env.PATH || '').split(plugins.path.delimiter);
for (const dir of pathDirs) {
const fullPath = plugins.path.join(dir, binaryName);
if (await this.isExecutable(fullPath)) {
return fullPath;
}
}
return null;
}
}

View File

@@ -6,7 +6,11 @@ import type { RustProxyBridge } from './rust-proxy-bridge.js';
*
* Polls the Rust binary periodically via the bridge and caches the result.
* All IMetrics getters read from the cache synchronously.
* Fields not yet in Rust (percentiles, per-IP, history) return zero/empty.
*
* Rust Metrics JSON fields (camelCase via serde):
* activeConnections, totalConnections, bytesIn, bytesOut,
* throughputInBytesPerSec, throughputOutBytesPerSec,
* routes: { [routeName]: { activeConnections, totalConnections, bytesIn, bytesOut, ... } }
*/
export class RustMetricsAdapter implements IMetrics {
private bridge: RustProxyBridge;
@@ -14,30 +18,28 @@ export class RustMetricsAdapter implements IMetrics {
private pollTimer: ReturnType<typeof setInterval> | null = null;
private pollIntervalMs: number;
// Cumulative totals tracked across polls
private cumulativeBytesIn = 0;
private cumulativeBytesOut = 0;
private cumulativeConnections = 0;
constructor(bridge: RustProxyBridge, pollIntervalMs = 1000) {
this.bridge = bridge;
this.pollIntervalMs = pollIntervalMs;
}
/**
* Poll Rust for metrics once. Can be awaited to ensure cache is fresh.
*/
public async poll(): Promise<void> {
try {
this.cache = await this.bridge.getMetrics();
} catch {
// Ignore poll errors (bridge may be shutting down)
}
}
public startPolling(): void {
if (this.pollTimer) return;
this.pollTimer = setInterval(async () => {
try {
this.cache = await this.bridge.getMetrics();
// Update cumulative totals
if (this.cache) {
this.cumulativeBytesIn = this.cache.totalBytesIn ?? this.cache.total_bytes_in ?? 0;
this.cumulativeBytesOut = this.cache.totalBytesOut ?? this.cache.total_bytes_out ?? 0;
this.cumulativeConnections = this.cache.totalConnections ?? this.cache.total_connections ?? 0;
}
} catch {
// Ignore poll errors (bridge may be shutting down)
}
// Immediate first poll so cache is populated ASAP
this.poll();
this.pollTimer = setInterval(() => {
this.poll();
}, this.pollIntervalMs);
if (this.pollTimer.unref) {
this.pollTimer.unref();
@@ -55,28 +57,53 @@ export class RustMetricsAdapter implements IMetrics {
public connections = {
active: (): number => {
return this.cache?.activeConnections ?? this.cache?.active_connections ?? 0;
return this.cache?.activeConnections ?? 0;
},
total: (): number => {
return this.cumulativeConnections;
return this.cache?.totalConnections ?? 0;
},
byRoute: (): Map<string, number> => {
return new Map();
const result = new Map<string, number>();
if (this.cache?.routes) {
for (const [name, rm] of Object.entries(this.cache.routes)) {
result.set(name, (rm as any).activeConnections ?? 0);
}
}
return result;
},
byIP: (): Map<string, number> => {
return new Map();
const result = new Map<string, number>();
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.set(ip, (im as any).activeConnections ?? 0);
}
}
return result;
},
topIPs: (_limit?: number): Array<{ ip: string; count: number }> => {
return [];
topIPs: (limit: number = 10): Array<{ ip: string; count: number }> => {
const result: Array<{ ip: string; count: number }> = [];
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.push({ ip, count: (im as any).activeConnections ?? 0 });
}
}
result.sort((a, b) => b.count - a.count);
return result.slice(0, limit);
},
};
public throughput = {
instant: (): IThroughputData => {
return { in: this.cache?.bytesInPerSecond ?? 0, out: this.cache?.bytesOutPerSecond ?? 0 };
return {
in: this.cache?.throughputInBytesPerSec ?? 0,
out: this.cache?.throughputOutBytesPerSec ?? 0,
};
},
recent: (): IThroughputData => {
return this.throughput.instant();
return {
in: this.cache?.throughputRecentInBytesPerSec ?? 0,
out: this.cache?.throughputRecentOutBytesPerSec ?? 0,
};
},
average: (): IThroughputData => {
return this.throughput.instant();
@@ -84,38 +111,61 @@ export class RustMetricsAdapter implements IMetrics {
custom: (_seconds: number): IThroughputData => {
return this.throughput.instant();
},
history: (_seconds: number): Array<IThroughputHistoryPoint> => {
return [];
history: (seconds: number): Array<IThroughputHistoryPoint> => {
if (!this.cache?.throughputHistory) return [];
return this.cache.throughputHistory.slice(-seconds).map((p: any) => ({
timestamp: p.timestampMs,
in: p.bytesIn,
out: p.bytesOut,
}));
},
byRoute: (_windowSeconds?: number): Map<string, IThroughputData> => {
return new Map();
const result = new Map<string, IThroughputData>();
if (this.cache?.routes) {
for (const [name, rm] of Object.entries(this.cache.routes)) {
result.set(name, {
in: (rm as any).throughputInBytesPerSec ?? 0,
out: (rm as any).throughputOutBytesPerSec ?? 0,
});
}
}
return result;
},
byIP: (_windowSeconds?: number): Map<string, IThroughputData> => {
return new Map();
const result = new Map<string, IThroughputData>();
if (this.cache?.ips) {
for (const [ip, im] of Object.entries(this.cache.ips)) {
result.set(ip, {
in: (im as any).throughputInBytesPerSec ?? 0,
out: (im as any).throughputOutBytesPerSec ?? 0,
});
}
}
return result;
},
};
public requests = {
perSecond: (): number => {
return this.cache?.requestsPerSecond ?? 0;
return this.cache?.httpRequestsPerSec ?? 0;
},
perMinute: (): number => {
return (this.cache?.requestsPerSecond ?? 0) * 60;
return (this.cache?.httpRequestsPerSecRecent ?? 0) * 60;
},
total: (): number => {
return this.cache?.totalRequests ?? this.cache?.total_requests ?? 0;
return this.cache?.totalHttpRequests ?? this.cache?.totalConnections ?? 0;
},
};
public totals = {
bytesIn: (): number => {
return this.cumulativeBytesIn;
return this.cache?.bytesIn ?? 0;
},
bytesOut: (): number => {
return this.cumulativeBytesOut;
return this.cache?.bytesOut ?? 0;
},
connections: (): number => {
return this.cumulativeConnections;
return this.cache?.totalConnections ?? 0;
},
};

View File

@@ -1,278 +1,180 @@
import * as plugins from '../../plugins.js';
import { logger } from '../../core/utils/logger.js';
import { RustBinaryLocator } from './rust-binary-locator.js';
import type { IRouteConfig } from './models/route-types.js';
import { ChildProcess, spawn } from 'child_process';
import { createInterface, Interface as ReadlineInterface } from 'readline';
/**
* Management request sent to the Rust binary via stdin.
* Type-safe command definitions for the Rust proxy IPC protocol.
*/
interface IManagementRequest {
id: string;
method: string;
params: Record<string, any>;
type TSmartProxyCommands = {
start: { params: { config: any }; result: void };
stop: { params: Record<string, never>; result: void };
updateRoutes: { params: { routes: IRouteConfig[] }; result: void };
getMetrics: { params: Record<string, never>; result: any };
getStatistics: { params: Record<string, never>; result: any };
provisionCertificate: { params: { routeName: string }; result: void };
renewCertificate: { params: { routeName: string }; result: void };
getCertificateStatus: { params: { routeName: string }; result: any };
getListeningPorts: { params: Record<string, never>; result: { ports: number[] } };
getNftablesStatus: { params: Record<string, never>; result: any };
setSocketHandlerRelay: { params: { socketPath: string }; result: void };
addListeningPort: { params: { port: number }; result: void };
removeListeningPort: { params: { port: number }; result: void };
loadCertificate: { params: { domain: string; cert: string; key: string; ca?: string }; result: void };
};
/**
* Get the package root directory using import.meta.url.
* This file is at ts/proxies/smart-proxy/, so package root is 3 levels up.
*/
function getPackageRoot(): string {
const thisDir = plugins.path.dirname(plugins.url.fileURLToPath(import.meta.url));
return plugins.path.resolve(thisDir, '..', '..', '..');
}
/**
* Management response received from the Rust binary via stdout.
* Map Node.js process.platform/process.arch to tsrust's friendly name suffix.
* tsrust names cross-compiled binaries as: rustproxy_linux_amd64, rustproxy_linux_arm64, etc.
*/
interface IManagementResponse {
id: string;
success: boolean;
result?: any;
error?: string;
function getTsrustPlatformSuffix(): string | null {
const archMap: Record<string, string> = { x64: 'amd64', arm64: 'arm64' };
const osMap: Record<string, string> = { linux: 'linux', darwin: 'macos' };
const os = osMap[process.platform];
const arch = archMap[process.arch];
if (os && arch) {
return `${os}_${arch}`;
}
return null;
}
/**
* Management event received from the Rust binary (unsolicited).
* Build local search paths for the Rust binary, including dist_rust/ candidates
* (built by tsrust) and local development build paths.
*/
interface IManagementEvent {
event: string;
data: any;
function buildLocalPaths(): string[] {
const packageRoot = getPackageRoot();
const suffix = getTsrustPlatformSuffix();
const paths: string[] = [];
// dist_rust/ candidates (tsrust cross-compiled output)
if (suffix) {
paths.push(plugins.path.join(packageRoot, 'dist_rust', `rustproxy_${suffix}`));
}
paths.push(plugins.path.join(packageRoot, 'dist_rust', 'rustproxy'));
// Local dev build paths
paths.push(plugins.path.resolve(process.cwd(), 'rust', 'target', 'release', 'rustproxy'));
paths.push(plugins.path.resolve(process.cwd(), 'rust', 'target', 'debug', 'rustproxy'));
return paths;
}
/**
* Bridge between TypeScript SmartProxy and the Rust binary.
* Communicates via JSON-over-stdin/stdout IPC protocol.
* Wraps @push.rocks/smartrust's RustBridge with type-safe command definitions.
*/
export class RustProxyBridge extends plugins.EventEmitter {
private locator = new RustBinaryLocator();
private process: ChildProcess | null = null;
private readline: ReadlineInterface | null = null;
private pendingRequests = new Map<string, {
resolve: (value: any) => void;
reject: (error: Error) => void;
timer: NodeJS.Timeout;
}>();
private requestCounter = 0;
private isRunning = false;
private binaryPath: string | null = null;
private readonly requestTimeoutMs = 30000;
private bridge: plugins.smartrust.RustBridge<TSmartProxyCommands>;
constructor() {
super();
this.bridge = new plugins.smartrust.RustBridge<TSmartProxyCommands>({
binaryName: 'rustproxy',
envVarName: 'SMARTPROXY_RUST_BINARY',
platformPackagePrefix: '@push.rocks/smartproxy',
localPaths: buildLocalPaths(),
maxPayloadSize: 100 * 1024 * 1024, // 100 MB route configs with many entries can be large
logger: {
log: (level: string, message: string, data?: Record<string, any>) => {
logger.log(level as any, message, data);
},
},
});
// Forward events from the inner bridge
this.bridge.on('exit', (code: number | null, signal: string | null) => {
this.emit('exit', code, signal);
});
}
/**
* Spawn the Rust binary in management mode.
* Returns true if the binary was found and spawned successfully.
*/
public async spawn(): Promise<boolean> {
this.binaryPath = await this.locator.findBinary();
if (!this.binaryPath) {
return false;
}
return new Promise<boolean>((resolve) => {
try {
this.process = spawn(this.binaryPath!, ['--management'], {
stdio: ['pipe', 'pipe', 'pipe'],
env: { ...process.env },
});
// Handle stderr (logging from Rust goes here)
this.process.stderr?.on('data', (data: Buffer) => {
const lines = data.toString().split('\n').filter(l => l.trim());
for (const line of lines) {
logger.log('debug', `[rustproxy] ${line}`, { component: 'rust-bridge' });
}
});
// Handle stdout (JSON IPC)
this.readline = createInterface({ input: this.process.stdout! });
this.readline.on('line', (line: string) => {
this.handleLine(line.trim());
});
// Handle process exit
this.process.on('exit', (code, signal) => {
logger.log('info', `RustProxy process exited (code=${code}, signal=${signal})`, { component: 'rust-bridge' });
this.cleanup();
this.emit('exit', code, signal);
});
this.process.on('error', (err) => {
logger.log('error', `RustProxy process error: ${err.message}`, { component: 'rust-bridge' });
this.cleanup();
resolve(false);
});
// Wait for the 'ready' event from Rust
const readyTimeout = setTimeout(() => {
logger.log('error', 'RustProxy did not send ready event within 10s', { component: 'rust-bridge' });
this.kill();
resolve(false);
}, 10000);
this.once('management:ready', () => {
clearTimeout(readyTimeout);
this.isRunning = true;
logger.log('info', 'RustProxy bridge connected', { component: 'rust-bridge' });
resolve(true);
});
} catch (err: any) {
logger.log('error', `Failed to spawn RustProxy: ${err.message}`, { component: 'rust-bridge' });
resolve(false);
}
});
return this.bridge.spawn();
}
/**
* Send a management command to the Rust process and wait for the response.
*/
public async sendCommand(method: string, params: Record<string, any> = {}): Promise<any> {
if (!this.process || !this.isRunning) {
throw new Error('RustProxy bridge is not running');
}
const id = `req_${++this.requestCounter}`;
const request: IManagementRequest = { id, method, params };
return new Promise<any>((resolve, reject) => {
const timer = setTimeout(() => {
this.pendingRequests.delete(id);
reject(new Error(`RustProxy command '${method}' timed out after ${this.requestTimeoutMs}ms`));
}, this.requestTimeoutMs);
this.pendingRequests.set(id, { resolve, reject, timer });
const json = JSON.stringify(request) + '\n';
this.process!.stdin!.write(json, (err) => {
if (err) {
clearTimeout(timer);
this.pendingRequests.delete(id);
reject(new Error(`Failed to write to RustProxy stdin: ${err.message}`));
}
});
});
}
// Convenience methods for each management command
public async startProxy(config: any): Promise<void> {
await this.sendCommand('start', { config });
}
public async stopProxy(): Promise<void> {
await this.sendCommand('stop');
}
public async updateRoutes(routes: IRouteConfig[]): Promise<void> {
await this.sendCommand('updateRoutes', { routes });
}
public async getMetrics(): Promise<any> {
return this.sendCommand('getMetrics');
}
public async getStatistics(): Promise<any> {
return this.sendCommand('getStatistics');
}
public async provisionCertificate(routeName: string): Promise<void> {
await this.sendCommand('provisionCertificate', { routeName });
}
public async renewCertificate(routeName: string): Promise<void> {
await this.sendCommand('renewCertificate', { routeName });
}
public async getCertificateStatus(routeName: string): Promise<any> {
return this.sendCommand('getCertificateStatus', { routeName });
}
public async getListeningPorts(): Promise<number[]> {
const result = await this.sendCommand('getListeningPorts');
return result?.ports ?? [];
}
public async getNftablesStatus(): Promise<any> {
return this.sendCommand('getNftablesStatus');
}
public async setSocketHandlerRelay(socketPath: string): Promise<void> {
await this.sendCommand('setSocketHandlerRelay', { socketPath });
}
public async addListeningPort(port: number): Promise<void> {
await this.sendCommand('addListeningPort', { port });
}
public async removeListeningPort(port: number): Promise<void> {
await this.sendCommand('removeListeningPort', { port });
}
public async loadCertificate(domain: string, cert: string, key: string, ca?: string): Promise<void> {
await this.sendCommand('loadCertificate', { domain, cert, key, ca });
}
/**
* Kill the Rust process.
* Kill the Rust process and clean up.
*/
public kill(): void {
if (this.process) {
this.process.kill('SIGTERM');
// Force kill after 5 seconds
setTimeout(() => {
if (this.process) {
this.process.kill('SIGKILL');
}
}, 5000).unref();
}
this.bridge.kill();
}
/**
* Whether the bridge is currently running.
*/
public get running(): boolean {
return this.isRunning;
return this.bridge.running;
}
private handleLine(line: string): void {
if (!line) return;
// --- Convenience methods for each management command ---
let parsed: any;
try {
parsed = JSON.parse(line);
} catch {
logger.log('warn', `Non-JSON output from RustProxy: ${line}`, { component: 'rust-bridge' });
return;
}
// Check if it's an event (has 'event' field)
if ('event' in parsed) {
const event = parsed as IManagementEvent;
this.emit(`management:${event.event}`, event.data);
return;
}
// Otherwise it's a response (has 'id' field)
if ('id' in parsed) {
const response = parsed as IManagementResponse;
const pending = this.pendingRequests.get(response.id);
if (pending) {
clearTimeout(pending.timer);
this.pendingRequests.delete(response.id);
if (response.success) {
pending.resolve(response.result);
} else {
pending.reject(new Error(response.error || 'Unknown error from RustProxy'));
}
}
}
public async startProxy(config: any): Promise<void> {
await this.bridge.sendCommand('start', { config });
}
private cleanup(): void {
this.isRunning = false;
this.process = null;
public async stopProxy(): Promise<void> {
await this.bridge.sendCommand('stop', {} as Record<string, never>);
}
if (this.readline) {
this.readline.close();
this.readline = null;
}
public async updateRoutes(routes: IRouteConfig[]): Promise<void> {
await this.bridge.sendCommand('updateRoutes', { routes });
}
// Reject all pending requests
for (const [id, pending] of this.pendingRequests) {
clearTimeout(pending.timer);
pending.reject(new Error('RustProxy process exited'));
}
this.pendingRequests.clear();
public async getMetrics(): Promise<any> {
return this.bridge.sendCommand('getMetrics', {} as Record<string, never>);
}
public async getStatistics(): Promise<any> {
return this.bridge.sendCommand('getStatistics', {} as Record<string, never>);
}
public async provisionCertificate(routeName: string): Promise<void> {
await this.bridge.sendCommand('provisionCertificate', { routeName });
}
public async renewCertificate(routeName: string): Promise<void> {
await this.bridge.sendCommand('renewCertificate', { routeName });
}
public async getCertificateStatus(routeName: string): Promise<any> {
return this.bridge.sendCommand('getCertificateStatus', { routeName });
}
public async getListeningPorts(): Promise<number[]> {
const result = await this.bridge.sendCommand('getListeningPorts', {} as Record<string, never>);
return result?.ports ?? [];
}
public async getNftablesStatus(): Promise<any> {
return this.bridge.sendCommand('getNftablesStatus', {} as Record<string, never>);
}
public async setSocketHandlerRelay(socketPath: string): Promise<void> {
await this.bridge.sendCommand('setSocketHandlerRelay', { socketPath });
}
public async addListeningPort(port: number): Promise<void> {
await this.bridge.sendCommand('addListeningPort', { port });
}
public async removeListeningPort(port: number): Promise<void> {
await this.bridge.sendCommand('removeListeningPort', { port });
}
public async loadCertificate(domain: string, cert: string, key: string, ca?: string): Promise<void> {
await this.bridge.sendCommand('loadCertificate', { domain, cert, key, ca });
}
}

View File

@@ -3,7 +3,6 @@ import { logger } from '../../core/utils/logger.js';
// Rust bridge and helpers
import { RustProxyBridge } from './rust-proxy-bridge.js';
import { RustBinaryLocator } from './rust-binary-locator.js';
import { RoutePreprocessor } from './route-preprocessor.js';
import { SocketHandlerServer } from './socket-handler-server.js';
import { RustMetricsAdapter } from './rust-metrics-adapter.js';
@@ -11,10 +10,12 @@ import { RustMetricsAdapter } from './rust-metrics-adapter.js';
// Route management
import { SharedRouteManager as RouteManager } from '../../core/routing/route-manager.js';
import { RouteValidator } from './utils/route-validator.js';
import { generateDefaultCertificate } from './utils/default-cert-generator.js';
import { Mutex } from './utils/mutex.js';
import { ConcurrencySemaphore } from './utils/concurrency-semaphore.js';
// Types
import type { ISmartProxyOptions, TSmartProxyCertProvisionObject } from './models/interfaces.js';
import type { ISmartProxyOptions, TSmartProxyCertProvisionObject, IAcmeOptions, ICertProvisionEventComms, ICertificateIssuedEvent, ICertificateFailedEvent } from './models/interfaces.js';
import type { IRouteConfig } from './models/route-types.js';
import type { IMetrics } from './models/metrics-types.js';
@@ -37,6 +38,8 @@ export class SmartProxy extends plugins.EventEmitter {
private socketHandlerServer: SocketHandlerServer | null = null;
private metricsAdapter: RustMetricsAdapter;
private routeUpdateLock: Mutex;
private stopping = false;
private certProvisionPromise: Promise<void> | null = null;
constructor(settingsArg: ISmartProxyOptions) {
super();
@@ -68,7 +71,6 @@ export class SmartProxy extends plugins.EventEmitter {
useProduction: this.settings.acme.useProduction || false,
renewThresholdDays: this.settings.acme.renewThresholdDays || 30,
autoRenew: this.settings.acme.autoRenew !== false,
certificateStore: this.settings.acme.certificateStore || './certs',
skipConfiguredCerts: this.settings.acme.skipConfiguredCerts || false,
renewCheckIntervalHours: this.settings.acme.renewCheckIntervalHours || 24,
routeForwards: this.settings.acme.routeForwards || [],
@@ -102,7 +104,10 @@ export class SmartProxy extends plugins.EventEmitter {
this.bridge = new RustProxyBridge();
this.preprocessor = new RoutePreprocessor();
this.metricsAdapter = new RustMetricsAdapter(this.bridge);
this.metricsAdapter = new RustMetricsAdapter(
this.bridge,
this.settings.metrics?.sampleIntervalMs ?? 1000
);
this.routeUpdateLock = new Mutex();
}
@@ -116,45 +121,90 @@ export class SmartProxy extends plugins.EventEmitter {
if (!spawned) {
throw new Error(
'RustProxy binary not found. Set SMARTPROXY_RUST_BINARY env var, install the platform package, ' +
'or build locally with: cd rust && cargo build --release'
'or build locally with: pnpm build'
);
}
// Handle unexpected exit
// Handle unexpected exit (only emits error if not intentionally stopping)
this.bridge.on('exit', (code: number | null, signal: string | null) => {
if (this.stopping) return;
logger.log('error', `RustProxy exited unexpectedly (code=${code}, signal=${signal})`, { component: 'smart-proxy' });
this.emit('error', new Error(`RustProxy exited (code=${code}, signal=${signal})`));
});
// Start socket handler relay if any routes need TS-side handling
// Check if any routes need TS-side handling (socket handlers, dynamic functions)
const hasHandlerRoutes = this.settings.routes.some(
(r) =>
(r.action.type === 'socket-handler' && r.action.socketHandler) ||
r.action.targets?.some((t) => typeof t.host === 'function' || typeof t.port === 'function')
);
// Start socket handler relay server (but don't tell Rust yet - proxy not started)
if (hasHandlerRoutes) {
this.socketHandlerServer = new SocketHandlerServer(this.preprocessor);
await this.socketHandlerServer.start();
await this.bridge.setSocketHandlerRelay(this.socketHandlerServer.getSocketPath());
}
// Preprocess routes (strip JS functions, convert socket-handler routes)
const rustRoutes = this.preprocessor.preprocessForRust(this.settings.routes);
// When certProvisionFunction handles cert provisioning,
// disable Rust's built-in ACME to prevent race condition.
let acmeForRust = this.settings.acme;
if (this.settings.certProvisionFunction && acmeForRust?.enabled) {
acmeForRust = { ...acmeForRust, enabled: false };
logger.log('info', 'Rust ACME disabled — certProvisionFunction will handle certificate provisioning', { component: 'smart-proxy' });
}
// Build Rust config
const config = this.buildRustConfig(rustRoutes);
const config = this.buildRustConfig(rustRoutes, acmeForRust);
// Start the Rust proxy
await this.bridge.startProxy(config);
// Handle certProvisionFunction
await this.provisionCertificatesViaCallback();
// Now that Rust proxy is running, configure socket handler relay
if (this.socketHandlerServer) {
await this.bridge.setSocketHandlerRelay(this.socketHandlerServer.getSocketPath());
}
// Start metrics polling
// Load default self-signed fallback certificate (domain: '*')
if (!this.settings.disableDefaultCert) {
try {
const defaultCert = generateDefaultCertificate();
await this.bridge.loadCertificate('*', defaultCert.cert, defaultCert.key);
logger.log('info', 'Default self-signed fallback certificate loaded', { component: 'smart-proxy' });
} catch (err: any) {
logger.log('warn', `Failed to generate default certificate: ${err.message}`, { component: 'smart-proxy' });
}
}
// Load consumer-stored certificates
const preloadedDomains = new Set<string>();
if (this.settings.certStore) {
try {
const stored = await this.settings.certStore.loadAll();
for (const entry of stored) {
await this.bridge.loadCertificate(entry.domain, entry.publicKey, entry.privateKey, entry.ca);
preloadedDomains.add(entry.domain);
}
logger.log('info', `Loaded ${stored.length} certificate(s) from consumer store`, { component: 'smart-proxy' });
} catch (err: any) {
logger.log('warn', `Failed to load certificates from consumer store: ${err.message}`, { component: 'smart-proxy' });
}
}
// Start metrics polling BEFORE cert provisioning — the Rust engine is already
// running and accepting connections, so metrics should be available immediately.
// Cert provisioning can hang indefinitely (e.g. DNS-01 ACME timeouts) and must
// not block metrics collection.
this.metricsAdapter.startPolling();
logger.log('info', 'SmartProxy started (Rust engine)', { component: 'smart-proxy' });
// Fire-and-forget cert provisioning — Rust engine is already running and serving traffic.
// Events (certificate-issued / certificate-failed) fire independently per domain.
this.certProvisionPromise = this.provisionCertificatesViaCallback(preloadedDomains)
.catch((err) => logger.log('error', `Unexpected error in cert provisioning: ${err.message}`, { component: 'smart-proxy' }));
}
/**
@@ -162,10 +212,20 @@ export class SmartProxy extends plugins.EventEmitter {
*/
public async stop(): Promise<void> {
logger.log('info', 'SmartProxy shutting down...', { component: 'smart-proxy' });
this.stopping = true;
// Wait for in-flight cert provisioning to bail out (it checks this.stopping)
if (this.certProvisionPromise) {
await this.certProvisionPromise;
this.certProvisionPromise = null;
}
// Stop metrics polling
this.metricsAdapter.stopPolling();
// Remove exit listener before killing to avoid spurious error events
this.bridge.removeAllListeners('exit');
// Stop Rust proxy
try {
await this.bridge.stopProxy();
@@ -187,7 +247,7 @@ export class SmartProxy extends plugins.EventEmitter {
* Update routes atomically.
*/
public async updateRoutes(newRoutes: IRouteConfig[]): Promise<void> {
return this.routeUpdateLock.runExclusive(async () => {
await this.routeUpdateLock.runExclusive(async () => {
// Validate
const validation = RouteValidator.validateRoutes(newRoutes);
if (!validation.valid) {
@@ -223,11 +283,13 @@ export class SmartProxy extends plugins.EventEmitter {
// Update stored routes
this.settings.routes = newRoutes;
// Handle cert provisioning for new routes
await this.provisionCertificatesViaCallback();
logger.log('info', `Routes updated (${newRoutes.length} routes)`, { component: 'smart-proxy' });
});
// Fire-and-forget cert provisioning outside the mutex — routes are already updated,
// cert provisioning doesn't need the route update lock and may be slow.
this.certProvisionPromise = this.provisionCertificatesViaCallback()
.catch((err) => logger.log('error', `Unexpected error in cert provisioning after route update: ${err.message}`, { component: 'smart-proxy' }));
}
/**
@@ -283,6 +345,7 @@ export class SmartProxy extends plugins.EventEmitter {
* Get all currently listening ports (async - calls Rust).
*/
public async getListeningPorts(): Promise<number[]> {
if (!this.bridge.running) return [];
return this.bridge.getListeningPorts();
}
@@ -320,20 +383,20 @@ export class SmartProxy extends plugins.EventEmitter {
/**
* Build the Rust configuration object from TS settings.
*/
private buildRustConfig(routes: IRouteConfig[]): any {
private buildRustConfig(routes: IRouteConfig[], acmeOverride?: IAcmeOptions): any {
const acme = acmeOverride !== undefined ? acmeOverride : this.settings.acme;
return {
routes,
defaults: this.settings.defaults,
acme: this.settings.acme
acme: acme
? {
enabled: this.settings.acme.enabled,
email: this.settings.acme.email,
useProduction: this.settings.acme.useProduction,
port: this.settings.acme.port,
renewThresholdDays: this.settings.acme.renewThresholdDays,
autoRenew: this.settings.acme.autoRenew,
certificateStore: this.settings.acme.certificateStore,
renewCheckIntervalHours: this.settings.acme.renewCheckIntervalHours,
enabled: acme.enabled,
email: acme.email,
useProduction: acme.useProduction,
port: acme.port,
renewThresholdDays: acme.renewThresholdDays,
autoRenew: acme.autoRenew,
renewCheckIntervalHours: acme.renewCheckIntervalHours,
}
: undefined,
connectionTimeout: this.settings.connectionTimeout,
@@ -348,6 +411,7 @@ export class SmartProxy extends plugins.EventEmitter {
extendedKeepAliveLifetime: this.settings.extendedKeepAliveLifetime,
acceptProxyProtocol: this.settings.acceptProxyProtocol,
sendProxyProtocol: this.settings.sendProxyProtocol,
metrics: this.settings.metrics,
};
}
@@ -356,49 +420,192 @@ export class SmartProxy extends plugins.EventEmitter {
* If the callback returns a cert object, load it into Rust.
* If it returns 'http01', let Rust handle ACME.
*/
private async provisionCertificatesViaCallback(): Promise<void> {
private async provisionCertificatesViaCallback(skipDomains: Set<string> = new Set()): Promise<void> {
const provisionFn = this.settings.certProvisionFunction;
if (!provisionFn) return;
// Phase 1: Collect all unique (domain, route) pairs that need provisioning
const seen = new Set<string>(skipDomains);
const tasks: Array<{ domain: string; route: IRouteConfig }> = [];
for (const route of this.settings.routes) {
if (route.action.tls?.certificate !== 'auto') continue;
if (!route.match.domains) continue;
const domains = Array.isArray(route.match.domains) ? route.match.domains : [route.match.domains];
const rawDomains = Array.isArray(route.match.domains) ? route.match.domains : [route.match.domains];
const certDomains = this.normalizeDomainsForCertProvisioning(rawDomains);
for (const domain of domains) {
if (domain.includes('*')) continue;
for (const domain of certDomains) {
if (seen.has(domain)) continue;
seen.add(domain);
tasks.push({ domain, route });
}
}
if (tasks.length === 0) return;
// Phase 2: Process all domains in parallel with concurrency limit
const concurrency = this.settings.certProvisionConcurrency ?? 4;
const semaphore = new ConcurrencySemaphore(concurrency);
const promises = tasks.map(async ({ domain, route }) => {
await semaphore.acquire();
try {
await this.provisionSingleDomain(domain, route, provisionFn);
} finally {
semaphore.release();
}
});
await Promise.allSettled(promises);
}
/**
* Provision a single domain's certificate via the callback.
* Includes per-domain timeout and shutdown checks.
*/
private async provisionSingleDomain(
domain: string,
route: IRouteConfig,
provisionFn: (domain: string, eventComms: ICertProvisionEventComms) => Promise<TSmartProxyCertProvisionObject>,
): Promise<void> {
if (this.stopping) return;
let expiryDate: string | undefined;
let source = 'certProvisionFunction';
const eventComms: ICertProvisionEventComms = {
log: (msg) => logger.log('info', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
warn: (msg) => logger.log('warn', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
error: (msg) => logger.log('error', `[certProvision ${domain}] ${msg}`, { component: 'smart-proxy' }),
setExpiryDate: (date) => { expiryDate = date.toISOString(); },
setSource: (s) => { source = s; },
};
const timeoutMs = this.settings.certProvisionTimeout ?? 300_000; // 5 min default
try {
const result: TSmartProxyCertProvisionObject = await this.withTimeout(
provisionFn(domain, eventComms),
timeoutMs,
`Certificate provisioning timed out for ${domain} after ${timeoutMs}ms`,
);
if (this.stopping) return;
if (result === 'http01') {
if (route.name) {
try {
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Triggered Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (provisionErr: any) {
logger.log('warn', `Cannot provision cert for ${domain} — callback returned 'http01' but Rust ACME failed: ${provisionErr.message}. ` +
'Note: Rust ACME is disabled when certProvisionFunction is set.', { component: 'smart-proxy' });
}
}
return;
}
if (result && typeof result === 'object') {
if (this.stopping) return;
const certObj = result as plugins.tsclass.network.ICert;
await this.bridge.loadCertificate(
domain,
certObj.publicKey,
certObj.privateKey,
);
logger.log('info', `Certificate loaded via provision function for ${domain}`, { component: 'smart-proxy' });
// Persist to consumer store
if (this.settings.certStore?.save) {
try {
await this.settings.certStore.save(domain, certObj.publicKey, certObj.privateKey);
} catch (storeErr: any) {
logger.log('warn', `certStore.save() failed for ${domain}: ${storeErr.message}`, { component: 'smart-proxy' });
}
}
this.emit('certificate-issued', {
domain,
expiryDate: expiryDate || (certObj.validUntil ? new Date(certObj.validUntil).toISOString() : undefined),
source,
} satisfies ICertificateIssuedEvent);
}
} catch (err: any) {
logger.log('warn', `certProvisionFunction failed for ${domain}: ${err.message}`, { component: 'smart-proxy' });
this.emit('certificate-failed', {
domain,
error: err.message,
source,
} satisfies ICertificateFailedEvent);
// Fallback to ACME if enabled and route has a name
if (this.settings.certProvisionFallbackToAcme !== false && route.name) {
try {
const result: TSmartProxyCertProvisionObject = await provisionFn(domain);
if (result === 'http01') {
// Rust handles ACME for this domain
continue;
}
// Got a static cert object - load it into Rust
if (result && typeof result === 'object') {
const certObj = result as plugins.tsclass.network.ICert;
await this.bridge.loadCertificate(
domain,
certObj.publicKey,
certObj.privateKey,
);
logger.log('info', `Certificate loaded via provision function for ${domain}`, { component: 'smart-proxy' });
}
} catch (err: any) {
logger.log('warn', `certProvisionFunction failed for ${domain}: ${err.message}`, { component: 'smart-proxy' });
// Fallback to ACME if enabled
if (this.settings.certProvisionFallbackToAcme !== false) {
logger.log('info', `Falling back to ACME for ${domain}`, { component: 'smart-proxy' });
}
await this.bridge.provisionCertificate(route.name);
logger.log('info', `Falling back to Rust ACME for ${domain} (route: ${route.name})`, { component: 'smart-proxy' });
} catch (acmeErr: any) {
logger.log('warn', `ACME fallback also failed for ${domain}: ${acmeErr.message}` +
(this.settings.disableDefaultCert
? ' — TLS will fail for this domain (disableDefaultCert is true)'
: ' — default self-signed fallback cert will be used'), { component: 'smart-proxy' });
}
}
}
}
/**
* Race a promise against a timeout. Rejects with the given message if the timeout fires first.
*/
private withTimeout<T>(promise: Promise<T>, ms: number, message: string): Promise<T> {
return new Promise<T>((resolve, reject) => {
const timer = setTimeout(() => reject(new Error(message)), ms);
promise.then(
(val) => { clearTimeout(timer); resolve(val); },
(err) => { clearTimeout(timer); reject(err); },
);
});
}
/**
* Normalize routing glob patterns into valid domain identifiers for cert provisioning.
* - `*nevermind.cloud` → `['nevermind.cloud', '*.nevermind.cloud']`
* - `*.lossless.digital` → `['*.lossless.digital']` (already valid wildcard)
* - `code.foss.global` → `['code.foss.global']` (plain domain)
* - `*mid*.example.com` → skipped with warning (unsupported glob)
*/
private normalizeDomainsForCertProvisioning(rawDomains: string[]): string[] {
const result: string[] = [];
for (const raw of rawDomains) {
// Plain domain — no glob characters
if (!raw.includes('*')) {
result.push(raw);
continue;
}
// Valid wildcard: *.example.com
if (raw.startsWith('*.') && !raw.slice(2).includes('*')) {
result.push(raw);
continue;
}
// Routing glob like *example.com (leading star, no dot after it)
// Convert to bare domain + wildcard pair
if (raw.startsWith('*') && !raw.startsWith('*.') && !raw.slice(1).includes('*')) {
const baseDomain = raw.slice(1); // Remove leading *
result.push(baseDomain);
result.push(`*.${baseDomain}`);
continue;
}
// Unsupported glob pattern (e.g. *mid*.example.com)
logger.log('warn', `Skipping unsupported glob pattern for cert provisioning: ${raw}`, { component: 'smart-proxy' });
}
return result;
}
private isValidDomain(domain: string): boolean {
if (!domain || domain.length === 0) return false;
if (domain.includes('*')) return false;

View File

@@ -15,6 +15,7 @@ export class SocketHandlerServer {
private server: plugins.net.Server | null = null;
private socketPath: string;
private preprocessor: RoutePreprocessor;
private activeSockets = new Set<plugins.net.Socket>();
constructor(preprocessor: RoutePreprocessor) {
this.preprocessor = preprocessor;
@@ -41,6 +42,8 @@ export class SocketHandlerServer {
return new Promise<void>((resolve, reject) => {
this.server = plugins.net.createServer((socket) => {
this.activeSockets.add(socket);
socket.on('close', () => this.activeSockets.delete(socket));
this.handleConnection(socket);
});
@@ -61,6 +64,12 @@ export class SocketHandlerServer {
* Stop the server and clean up.
*/
public async stop(): Promise<void> {
// Destroy all active connections first
for (const socket of this.activeSockets) {
socket.destroy();
}
this.activeSockets.clear();
if (this.server) {
return new Promise<void>((resolve) => {
this.server!.close(() => {
@@ -100,6 +109,7 @@ export class SocketHandlerServer {
metadataParsed = true;
socket.removeListener('data', onData);
socket.pause(); // Prevent data loss between handler removal and pipe setup
const metadataJson = metadataBuffer.slice(0, newlineIndex);
const remainingData = metadataBuffer.slice(newlineIndex + 1);
@@ -140,13 +150,6 @@ export class SocketHandlerServer {
return;
}
const handler = originalRoute.action.socketHandler;
if (!handler) {
logger.log('error', `Route ${routeKey} has no socketHandler`, { component: 'socket-handler-server' });
socket.destroy();
return;
}
// Build route context
const context: IRouteContext = {
port: metadata.localPort || 0,
@@ -167,12 +170,110 @@ export class SocketHandlerServer {
socket.unshift(Buffer.from(remainingData, 'utf8'));
}
// Call the handler
try {
handler(socket, context);
} catch (err: any) {
logger.log('error', `Socket handler threw for route ${routeKey}: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
const handler = originalRoute.action.socketHandler;
if (handler) {
// Route has an explicit socket handler callback
try {
const result = handler(socket, context);
// If the handler is async, wait for it to finish setup before resuming.
// This prevents data loss when async handlers need to do work before
// attaching their `data` listeners.
if (result && typeof (result as any).then === 'function') {
(result as any).then(() => {
socket.resume();
}).catch((err: any) => {
logger.log('error', `Async socket handler rejected for route ${routeKey}: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
});
} else {
// Synchronous handler — listeners are already attached, safe to resume.
socket.resume();
}
} catch (err: any) {
logger.log('error', `Socket handler threw for route ${routeKey}: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
}
return;
}
// Route has dynamic host/port functions - resolve and forward
if (originalRoute.action.targets && originalRoute.action.targets.length > 0) {
this.forwardDynamicRoute(socket, originalRoute, context);
return;
}
logger.log('error', `Route ${routeKey} has no socketHandler and no targets`, { component: 'socket-handler-server' });
socket.destroy();
}
/**
* Forward a connection to a dynamically resolved target.
* Used for routes with function-based host/port that Rust cannot handle.
*/
private forwardDynamicRoute(socket: plugins.net.Socket, route: IRouteConfig, context: IRouteContext): void {
const targets = route.action.targets!;
// Pick a target (round-robin would be ideal, but simple random for now)
const target = targets[Math.floor(Math.random() * targets.length)];
// Resolve host
let host: string;
if (typeof target.host === 'function') {
try {
const result = target.host(context);
host = Array.isArray(result) ? result[Math.floor(Math.random() * result.length)] : result;
} catch (err: any) {
logger.log('error', `Dynamic host function failed: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
return;
}
} else if (typeof target.host === 'string') {
host = target.host;
} else if (Array.isArray(target.host)) {
host = target.host[Math.floor(Math.random() * target.host.length)];
} else {
host = 'localhost';
}
// Resolve port
let port: number;
if (typeof target.port === 'function') {
try {
port = target.port(context);
} catch (err: any) {
logger.log('error', `Dynamic port function failed: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
return;
}
} else if (typeof target.port === 'number') {
port = target.port;
} else {
port = context.port;
}
logger.log('debug', `Dynamic forward: ${context.clientIp} -> ${host}:${port}`, { component: 'socket-handler-server' });
// Connect to the resolved target
const backend = plugins.net.connect(port, host, () => {
// Pipe bidirectionally
socket.pipe(backend);
backend.pipe(socket);
});
backend.on('error', (err) => {
logger.log('error', `Dynamic forward backend error: ${err.message}`, { component: 'socket-handler-server' });
socket.destroy();
});
socket.on('error', () => {
backend.destroy();
});
socket.on('close', () => {
backend.destroy();
});
backend.on('close', () => {
socket.destroy();
});
}
}

View File

@@ -0,0 +1,28 @@
/**
* Async concurrency semaphore — limits the number of concurrent async operations.
*/
export class ConcurrencySemaphore {
private running = 0;
private waitQueue: Array<() => void> = [];
constructor(private readonly maxConcurrency: number) {}
async acquire(): Promise<void> {
if (this.running < this.maxConcurrency) {
this.running++;
return;
}
return new Promise<void>((resolve) => {
this.waitQueue.push(() => {
this.running++;
resolve();
});
});
}
release(): void {
this.running--;
const next = this.waitQueue.shift();
if (next) next();
}
}

View File

@@ -0,0 +1,36 @@
import * as plugins from '../../../plugins.js';
/**
* Generate a self-signed fallback certificate (CN=SmartProxy Default Certificate, SAN=*).
* Used as the '*' wildcard fallback so TLS handshakes never reset due to missing certs.
*/
export function generateDefaultCertificate(): { cert: string; key: string } {
const forge = plugins.smartcrypto.nodeForge;
// Generate 2048-bit RSA keypair
const keypair = forge.pki.rsa.generateKeyPair({ bits: 2048 });
// Create self-signed X.509 certificate
const cert = forge.pki.createCertificate();
cert.publicKey = keypair.publicKey;
cert.serialNumber = '01';
cert.validity.notBefore = new Date();
cert.validity.notAfter = new Date();
cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 1);
const attrs = [{ name: 'commonName', value: 'SmartProxy Default Certificate' }];
cert.setSubject(attrs);
cert.setIssuer(attrs);
// Add wildcard SAN
cert.setExtensions([
{ name: 'subjectAltName', altNames: [{ type: 2 /* DNS */, value: '*' }] },
]);
cert.sign(keypair.privateKey, forge.md.sha256.create());
return {
cert: forge.pki.certificateToPem(cert),
key: forge.pki.privateKeyToPem(keypair.privateKey),
};
}

View File

@@ -14,6 +14,12 @@ export * from './route-validator.js';
// Export route utilities for route operations
export * from './route-utils.js';
// Export default certificate generator
export { generateDefaultCertificate } from './default-cert-generator.js';
// Export concurrency semaphore
export { ConcurrencySemaphore } from './concurrency-semaphore.js';
// Export additional functions from route-helpers that weren't already exported
export {
createApiGatewayRoute,

View File

@@ -1,4 +1,4 @@
import { Buffer } from 'buffer';
import { Buffer } from 'node:buffer';
import {
TlsRecordType,
TlsHandshakeType,