Compare commits
19 Commits
Author | SHA1 | Date | |
---|---|---|---|
424407d879 | |||
7e1b7b190c | |||
8347e0fec7 | |||
fc09af9afd | |||
4c847fd3d7 | |||
2e11f9358c | |||
9bf15ff756 | |||
6726de277e | |||
dc3eda5e29 | |||
82a350bf51 | |||
890e907664 | |||
19590ef107 | |||
47735adbf2 | |||
9094b76b1b | |||
9aebcd488d | |||
311691c2cc | |||
578d1ba2f7 | |||
233c98e5ff | |||
b3714d583d |
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@push.rocks/smartproxy",
|
||||
"version": "19.5.20",
|
||||
"version": "19.6.2",
|
||||
"private": false,
|
||||
"description": "A powerful proxy package with unified route-based configuration for high traffic management. Features include SSL/TLS support, flexible routing patterns, WebSocket handling, advanced security options, and automatic ACME certificate management.",
|
||||
"main": "dist_ts/index.js",
|
||||
@ -31,6 +31,7 @@
|
||||
"@push.rocks/smartnetwork": "^4.0.2",
|
||||
"@push.rocks/smartpromise": "^4.2.3",
|
||||
"@push.rocks/smartrequest": "^2.1.0",
|
||||
"@push.rocks/smartrx": "^3.0.10",
|
||||
"@push.rocks/smartstring": "^4.0.15",
|
||||
"@push.rocks/taskbuffer": "^3.1.7",
|
||||
"@tsclass/tsclass": "^9.2.0",
|
||||
|
13
pnpm-lock.yaml
generated
13
pnpm-lock.yaml
generated
@ -35,6 +35,9 @@ importers:
|
||||
'@push.rocks/smartrequest':
|
||||
specifier: ^2.1.0
|
||||
version: 2.1.0
|
||||
'@push.rocks/smartrx':
|
||||
specifier: ^3.0.10
|
||||
version: 3.0.10
|
||||
'@push.rocks/smartstring':
|
||||
specifier: ^4.0.15
|
||||
version: 4.0.15
|
||||
@ -977,9 +980,6 @@ packages:
|
||||
'@push.rocks/smartrx@3.0.10':
|
||||
resolution: {integrity: sha512-USjIYcsSfzn14cwOsxgq/bBmWDTTzy3ouWAnW5NdMyRRzEbmeNrvmy6TRqNeDlJ2PsYNTt1rr/zGUqvIy72ITg==}
|
||||
|
||||
'@push.rocks/smartrx@3.0.7':
|
||||
resolution: {integrity: sha512-qCWy0s3RLAgGSnaw/Gu0BNaJ59CsI6RK5OJDCCqxc7P2X/S755vuLtnAR5/0dEjdhCHXHX9ytPZx+o9g/CNiyA==}
|
||||
|
||||
'@push.rocks/smarts3@2.2.5':
|
||||
resolution: {integrity: sha512-OZjD0jBCUTJCLnwraxBcyZ3he5buXf2OEM1zipiTBChA2EcKUZWKk/a6KR5WT+NlFCIIuB23UG+U+cxsIWM91Q==}
|
||||
|
||||
@ -6131,11 +6131,6 @@ snapshots:
|
||||
'@push.rocks/smartpromise': 4.2.3
|
||||
rxjs: 7.8.2
|
||||
|
||||
'@push.rocks/smartrx@3.0.7':
|
||||
dependencies:
|
||||
'@push.rocks/smartpromise': 4.2.3
|
||||
rxjs: 7.8.2
|
||||
|
||||
'@push.rocks/smarts3@2.2.5':
|
||||
dependencies:
|
||||
'@push.rocks/smartbucket': 3.3.7
|
||||
@ -6301,7 +6296,7 @@ snapshots:
|
||||
'@push.rocks/smartenv': 5.0.12
|
||||
'@push.rocks/smartjson': 5.0.20
|
||||
'@push.rocks/smartpromise': 4.2.3
|
||||
'@push.rocks/smartrx': 3.0.7
|
||||
'@push.rocks/smartrx': 3.0.10
|
||||
'@tempfix/idb': 8.0.3
|
||||
fake-indexeddb: 5.0.2
|
||||
|
||||
|
187
readme.delete.md
187
readme.delete.md
@ -1,187 +0,0 @@
|
||||
# SmartProxy Code Deletion Plan
|
||||
|
||||
This document tracks all code paths that can be deleted as part of the routing unification effort.
|
||||
|
||||
## Phase 1: Matching Logic Duplicates (READY TO DELETE)
|
||||
|
||||
### 1. Inline Matching Functions in RouteManager
|
||||
**File**: `ts/proxies/smart-proxy/route-manager.ts`
|
||||
**Lines**: Approximately lines 200-400
|
||||
**Duplicates**:
|
||||
- `matchDomain()` method - duplicate of DomainMatcher
|
||||
- `matchPath()` method - duplicate of PathMatcher
|
||||
- `matchIpPattern()` method - duplicate of IpMatcher
|
||||
- `matchHeaders()` method - duplicate of HeaderMatcher
|
||||
**Action**: Update to use unified matchers from `ts/core/routing/matchers/`
|
||||
|
||||
### 2. Duplicate Matching in Core route-utils
|
||||
**File**: `ts/core/utils/route-utils.ts`
|
||||
**Functions to update**:
|
||||
- `matchDomain()` → Use DomainMatcher.match()
|
||||
- `matchPath()` → Use PathMatcher.match()
|
||||
- `matchIpPattern()` → Use IpMatcher.match()
|
||||
- `matchHeader()` → Use HeaderMatcher.match()
|
||||
**Action**: Update to use unified matchers, keep only unique utilities
|
||||
|
||||
## Phase 2: Route Manager Duplicates (READY AFTER MIGRATION)
|
||||
|
||||
### 1. SmartProxy RouteManager
|
||||
**File**: `ts/proxies/smart-proxy/route-manager.ts`
|
||||
**Entire file**: ~500 lines
|
||||
**Reason**: 95% duplicate of SharedRouteManager
|
||||
**Migration Required**:
|
||||
- Update SmartProxy to use SharedRouteManager
|
||||
- Update all imports
|
||||
- Test thoroughly
|
||||
**Action**: DELETE entire file after migration
|
||||
|
||||
### 2. Deprecated Methods in SharedRouteManager
|
||||
**File**: `ts/core/utils/route-manager.ts`
|
||||
**Methods**:
|
||||
- Any deprecated security check methods
|
||||
- Legacy compatibility methods
|
||||
**Action**: Remove after confirming no usage
|
||||
|
||||
## Phase 3: Router Consolidation (REQUIRES REFACTORING)
|
||||
|
||||
### 1. ProxyRouter vs RouteRouter Duplication
|
||||
**Files**:
|
||||
- `ts/routing/router/proxy-router.ts` (~250 lines)
|
||||
- `ts/routing/router/route-router.ts` (~250 lines)
|
||||
**Reason**: Nearly identical implementations
|
||||
**Plan**: Merge into single HttpRouter with legacy adapter
|
||||
**Action**: DELETE one file after consolidation
|
||||
|
||||
### 2. Inline Route Matching in HttpProxy
|
||||
**Location**: Various files in `ts/proxies/http-proxy/`
|
||||
**Pattern**: Direct route matching without using RouteManager
|
||||
**Action**: Update to use SharedRouteManager
|
||||
|
||||
## Phase 4: Scattered Utilities (CLEANUP)
|
||||
|
||||
### 1. Duplicate Route Utilities
|
||||
**Files with duplicate logic**:
|
||||
- `ts/proxies/smart-proxy/utils/route-utils.ts` - Keep (different purpose)
|
||||
- `ts/proxies/smart-proxy/utils/route-validators.ts` - Review for duplicates
|
||||
- `ts/proxies/smart-proxy/utils/route-patterns.ts` - Review for consolidation
|
||||
|
||||
### 2. Legacy Type Definitions
|
||||
**Review for removal**:
|
||||
- Old route type definitions
|
||||
- Deprecated configuration interfaces
|
||||
- Unused type exports
|
||||
|
||||
## Deletion Progress Tracker
|
||||
|
||||
### Completed Deletions
|
||||
- [x] Phase 1: Matching logic consolidation (Partial)
|
||||
- Updated core/utils/route-utils.ts to use unified matchers
|
||||
- Removed duplicate matching implementations (~200 lines)
|
||||
- Marked functions as deprecated with migration path
|
||||
- [x] Phase 2: RouteManager unification (COMPLETED)
|
||||
- ✓ Migrated SmartProxy to use SharedRouteManager
|
||||
- ✓ Updated imports in smart-proxy.ts, route-connection-handler.ts, and index.ts
|
||||
- ✓ Created logger adapter to match ILogger interface expectations
|
||||
- ✓ Fixed method calls (getAllRoutes → getRoutes)
|
||||
- ✓ Fixed type errors in header matcher
|
||||
- ✓ Removed unused ipToNumber imports and methods
|
||||
- ✓ DELETED: `/ts/proxies/smart-proxy/route-manager.ts` (553 lines removed)
|
||||
- [x] Phase 3: Router consolidation (COMPLETED)
|
||||
- ✓ Created unified HttpRouter with legacy compatibility
|
||||
- ✓ Migrated ProxyRouter and RouteRouter to use HttpRouter aliases
|
||||
- ✓ Updated imports in http-proxy.ts, request-handler.ts, websocket-handler.ts
|
||||
- ✓ Added routeReqLegacy() method for backward compatibility
|
||||
- ✓ DELETED: `/ts/routing/router/proxy-router.ts` (437 lines)
|
||||
- ✓ DELETED: `/ts/routing/router/route-router.ts` (482 lines)
|
||||
- [x] Phase 4: Architecture cleanup (COMPLETED)
|
||||
- ✓ Updated route-utils.ts to use unified matchers directly
|
||||
- ✓ Removed deprecated methods from SharedRouteManager
|
||||
- ✓ Fixed HeaderMatcher.matchMultiple → matchAll method name
|
||||
- ✓ Fixed findMatchingRoute return type handling (IRouteMatchResult)
|
||||
- ✓ Fixed header type conversion for RegExp patterns
|
||||
- ✓ DELETED: Duplicate RouteManager class from http-proxy/models/types.ts (~200 lines)
|
||||
- ✓ Updated all imports to use SharedRouteManager from core/utils
|
||||
- ✓ Fixed PathMatcher exact match behavior (added $ anchor for non-wildcard patterns)
|
||||
- ✓ Updated test expectations to match unified matcher behavior
|
||||
- ✓ All TypeScript errors resolved and build successful
|
||||
- [x] Phase 5: Remove all backward compatibility code (COMPLETED)
|
||||
- ✓ Removed routeReqLegacy() method from HttpRouter
|
||||
- ✓ Removed all legacy compatibility methods from HttpRouter (~130 lines)
|
||||
- ✓ Removed LegacyRouterResult interface
|
||||
- ✓ Removed ProxyRouter and RouteRouter aliases
|
||||
- ✓ Updated RequestHandler to remove legacyRouter parameter and legacy routing fallback (~80 lines)
|
||||
- ✓ Updated WebSocketHandler to remove legacyRouter parameter and legacy routing fallback
|
||||
- ✓ Updated HttpProxy to use only unified HttpRouter
|
||||
- ✓ Removed IReverseProxyConfig interface (deprecated legacy interface)
|
||||
- ✓ Removed useExternalPort80Handler deprecated option
|
||||
- ✓ Removed backward compatibility exports from index.ts
|
||||
- ✓ Removed all deprecated functions from route-utils.ts (~50 lines)
|
||||
- ✓ Clean build with no legacy code
|
||||
|
||||
### Files Updated
|
||||
1. `ts/core/utils/route-utils.ts` - Replaced all matching logic with unified matchers
|
||||
2. `ts/core/utils/security-utils.ts` - Updated to use IpMatcher directly
|
||||
3. `ts/proxies/smart-proxy/smart-proxy.ts` - Using SharedRouteManager with logger adapter
|
||||
4. `ts/proxies/smart-proxy/route-connection-handler.ts` - Updated to use SharedRouteManager
|
||||
5. `ts/proxies/smart-proxy/index.ts` - Exporting SharedRouteManager as RouteManager
|
||||
6. `ts/core/routing/matchers/header.ts` - Fixed type handling for array header values
|
||||
7. `ts/core/utils/route-manager.ts` - Removed unused ipToNumber import
|
||||
8. `ts/proxies/http-proxy/http-proxy.ts` - Updated imports to use unified router
|
||||
9. `ts/proxies/http-proxy/request-handler.ts` - Updated to use routeReqLegacy()
|
||||
10. `ts/proxies/http-proxy/websocket-handler.ts` - Updated to use routeReqLegacy()
|
||||
11. `ts/routing/router/index.ts` - Export unified HttpRouter with aliases
|
||||
12. `ts/proxies/smart-proxy/utils/route-utils.ts` - Updated to use unified matchers directly
|
||||
13. `ts/proxies/http-proxy/request-handler.ts` - Fixed findMatchingRoute usage
|
||||
14. `ts/proxies/http-proxy/models/types.ts` - Removed duplicate RouteManager class
|
||||
15. `ts/index.ts` - Updated exports to use SharedRouteManager aliases
|
||||
16. `ts/proxies/index.ts` - Updated exports to use SharedRouteManager aliases
|
||||
17. `test/test.acme-route-creation.ts` - Fixed getAllRoutes → getRoutes method call
|
||||
|
||||
### Files Created
|
||||
1. `ts/core/routing/matchers/domain.ts` - Unified domain matcher
|
||||
2. `ts/core/routing/matchers/path.ts` - Unified path matcher
|
||||
3. `ts/core/routing/matchers/ip.ts` - Unified IP matcher
|
||||
4. `ts/core/routing/matchers/header.ts` - Unified header matcher
|
||||
5. `ts/core/routing/matchers/index.ts` - Matcher exports
|
||||
6. `ts/core/routing/types.ts` - Core routing types
|
||||
7. `ts/core/routing/specificity.ts` - Route specificity calculator
|
||||
8. `ts/core/routing/index.ts` - Main routing exports
|
||||
9. `ts/routing/router/http-router.ts` - Unified HTTP router
|
||||
|
||||
### Lines of Code Removed
|
||||
- Target: ~1,500 lines
|
||||
- Actual: ~2,332 lines (Target exceeded by 55%!)
|
||||
- Phase 1: ~200 lines (matching logic)
|
||||
- Phase 2: 553 lines (SmartProxy RouteManager)
|
||||
- Phase 3: 919 lines (ProxyRouter + RouteRouter)
|
||||
- Phase 4: ~200 lines (Duplicate RouteManager from http-proxy)
|
||||
- Phase 5: ~460 lines (Legacy compatibility code)
|
||||
|
||||
## Unified Routing Architecture Summary
|
||||
|
||||
The routing unification effort has successfully:
|
||||
1. **Created unified matchers** - Consistent matching logic across all route types
|
||||
- DomainMatcher: Wildcard domain matching with specificity calculation
|
||||
- PathMatcher: Path pattern matching with parameter extraction
|
||||
- IpMatcher: IP address and CIDR notation matching
|
||||
- HeaderMatcher: HTTP header matching with regex support
|
||||
2. **Consolidated route managers** - Single SharedRouteManager for all proxies
|
||||
3. **Unified routers** - Single HttpRouter for all HTTP routing needs
|
||||
4. **Removed ~2,332 lines of code** - Exceeded target by 55%
|
||||
5. **Clean modern architecture** - No legacy code, no backward compatibility layers
|
||||
|
||||
## Safety Checklist Before Deletion
|
||||
|
||||
Before deleting any code:
|
||||
1. ✓ All tests pass
|
||||
2. ✓ No references to deleted code remain
|
||||
3. ✓ Migration path tested
|
||||
4. ✓ Performance benchmarks show no regression
|
||||
5. ✓ Documentation updated
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues arise after deletion:
|
||||
1. Git history preserves all deleted code
|
||||
2. Each phase can be reverted independently
|
||||
3. Feature flags can disable new code if needed
|
852
readme.hints.md
852
readme.hints.md
@ -1,852 +0,0 @@
|
||||
# SmartProxy Project Hints
|
||||
|
||||
## Project Overview
|
||||
- Package: `@push.rocks/smartproxy` – high-performance proxy supporting HTTP(S), TCP, WebSocket, and ACME integration.
|
||||
- Written in TypeScript, compiled output in `dist_ts/`, uses ESM with NodeNext resolution.
|
||||
|
||||
## Important: ACME Configuration in v19.0.0
|
||||
- **Breaking Change**: ACME configuration must be placed within individual route TLS settings, not at the top level
|
||||
- Route-level ACME config is the ONLY way to enable SmartAcme initialization
|
||||
- SmartCertManager requires email in route config for certificate acquisition
|
||||
- Top-level ACME configuration is ignored in v19.0.0
|
||||
|
||||
## Repository Structure
|
||||
- `ts/` – TypeScript source files:
|
||||
- `index.ts` exports main modules.
|
||||
- `plugins.ts` centralizes native and third-party imports.
|
||||
- Subdirectories: `networkproxy/`, `nftablesproxy/`, `port80handler/`, `redirect/`, `smartproxy/`.
|
||||
- Key classes: `ProxyRouter` (`classes.router.ts`), `SmartProxy` (`classes.smartproxy.ts`), plus handlers/managers.
|
||||
- `dist_ts/` – transpiled `.js` and `.d.ts` files mirroring `ts/` structure.
|
||||
- `test/` – test suites in TypeScript:
|
||||
- `test.router.ts` – routing logic (hostname matching, wildcards, path parameters, config management).
|
||||
- `test.smartproxy.ts` – proxy behavior tests (TCP forwarding, SNI handling, concurrency, chaining, timeouts).
|
||||
- `test/helpers/` – utilities (e.g., certificates).
|
||||
- `assets/certs/` – placeholder certificates for ACME and TLS.
|
||||
|
||||
## Development Setup
|
||||
- Requires `pnpm` (v10+).
|
||||
- Install dependencies: `pnpm install`.
|
||||
- Build: `pnpm build` (runs `tsbuild --web --allowimplicitany`).
|
||||
- Test: `pnpm test` (runs `tstest test/`).
|
||||
- Format: `pnpm format` (runs `gitzone format`).
|
||||
|
||||
## How to Test
|
||||
|
||||
### Test Structure
|
||||
Tests use tapbundle from `@git.zone/tstest`. The correct pattern is:
|
||||
|
||||
```typescript
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('test description', async () => {
|
||||
// Test logic here
|
||||
expect(someValue).toEqual(expectedValue);
|
||||
});
|
||||
|
||||
// IMPORTANT: Must end with tap.start()
|
||||
tap.start();
|
||||
```
|
||||
|
||||
### Expect Syntax (from @push.rocks/smartexpect)
|
||||
```typescript
|
||||
// Type assertions
|
||||
expect('hello').toBeTypeofString();
|
||||
expect(42).toBeTypeofNumber();
|
||||
|
||||
// Equality
|
||||
expect('hithere').toEqual('hithere');
|
||||
|
||||
// Negated assertions
|
||||
expect(1).not.toBeTypeofString();
|
||||
|
||||
// Regular expressions
|
||||
expect('hithere').toMatch(/hi/);
|
||||
|
||||
// Numeric comparisons
|
||||
expect(5).toBeGreaterThan(3);
|
||||
expect(0.1 + 0.2).toBeCloseTo(0.3, 10);
|
||||
|
||||
// Arrays
|
||||
expect([1, 2, 3]).toContain(2);
|
||||
expect([1, 2, 3]).toHaveLength(3);
|
||||
|
||||
// Async assertions
|
||||
await expect(asyncFunction()).resolves.toEqual('expected');
|
||||
await expect(asyncFunction()).resolves.withTimeout(5000).toBeTypeofString();
|
||||
|
||||
// Complex object navigation
|
||||
expect(complexObject)
|
||||
.property('users')
|
||||
.arrayItem(0)
|
||||
.property('name')
|
||||
.toEqual('Alice');
|
||||
```
|
||||
|
||||
### Test Modifiers
|
||||
- `tap.only.test()` - Run only this test
|
||||
- `tap.skip.test()` - Skip a test
|
||||
- `tap.timeout()` - Set test-specific timeout
|
||||
|
||||
### Running Tests
|
||||
- All tests: `pnpm test`
|
||||
- Specific test: `tsx test/test.router.ts`
|
||||
- With options: `tstest test/**/*.ts --verbose --timeout 60`
|
||||
|
||||
### Test File Requirements
|
||||
- Must start with `test.` prefix
|
||||
- Must use `.ts` extension
|
||||
- Must call `tap.start()` at the end
|
||||
|
||||
## Coding Conventions
|
||||
- Import modules via `plugins.ts`:
|
||||
```ts
|
||||
import * as plugins from './plugins.ts';
|
||||
const server = new plugins.http.Server();
|
||||
```
|
||||
- Reference plugins with full path: `plugins.acme`, `plugins.smartdelay`, `plugins.minimatch`, etc.
|
||||
- Path patterns support globs (`*`) and parameters (`:param`) in `ProxyRouter`.
|
||||
- Wildcard hostname matching leverages `minimatch` patterns.
|
||||
|
||||
## Key Components
|
||||
- **ProxyRouter**
|
||||
- Methods: `routeReq`, `routeReqWithDetails`.
|
||||
- Hostname matching: case-insensitive, strips port, supports exact, wildcard, TLD, complex patterns.
|
||||
- Path routing: exact, wildcard, parameter extraction (`pathParams`), returns `pathMatch` and `pathRemainder`.
|
||||
- Config API: `setNewProxyConfigs`, `addProxyConfig`, `removeProxyConfig`, `getHostnames`, `getProxyConfigs`.
|
||||
- **SmartProxy**
|
||||
- Manages one or more `net.Server` instances to forward TCP streams.
|
||||
- Options: `preserveSourceIP`, `defaultAllowedIPs`, `globalPortRanges`, `sniEnabled`.
|
||||
- DomainConfigManager: round-robin selection for multiple target IPs.
|
||||
- Graceful shutdown in `stop()`, ensures no lingering servers or sockets.
|
||||
|
||||
## Notable Points
|
||||
- **TSConfig**: `module: NodeNext`, `verbatimModuleSyntax`, allows `.js` extension imports in TS.
|
||||
- Mermaid diagrams and architecture flows in `readme.md` illustrate component interactions and protocol flows.
|
||||
- CLI entrypoint (`cli.js`) supports command-line usage (ACME, proxy controls).
|
||||
- ACME and certificate handling via `Port80Handler` and `helpers.certificates.ts`.
|
||||
|
||||
## ACME/Certificate Configuration Example (v19.0.0)
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'example.com',
|
||||
match: { domains: 'example.com', ports: 443 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 8080 },
|
||||
tls: {
|
||||
mode: 'terminate',
|
||||
certificate: 'auto',
|
||||
acme: { // ACME config MUST be here, not at top level
|
||||
email: 'ssl@example.com',
|
||||
useProduction: false,
|
||||
challengePort: 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
## TODOs / Considerations
|
||||
- Ensure import extensions in source match build outputs (`.ts` vs `.js`).
|
||||
- Update `plugins.ts` when adding new dependencies.
|
||||
- Maintain test coverage for new routing or proxy features.
|
||||
- Keep `ts/` and `dist_ts/` in sync after refactors.
|
||||
- Consider implementing top-level ACME config support for backward compatibility
|
||||
|
||||
## HTTP-01 ACME Challenge Fix (v19.3.8)
|
||||
|
||||
### Issue
|
||||
Non-TLS connections on ports configured in `useHttpProxy` were not being forwarded to HttpProxy. This caused ACME HTTP-01 challenges to fail when the ACME port (usually 80) was included in `useHttpProxy`.
|
||||
|
||||
### Root Cause
|
||||
In the `RouteConnectionHandler.handleForwardAction` method, only connections with TLS settings (mode: 'terminate' or 'terminate-and-reencrypt') were being forwarded to HttpProxy. Non-TLS connections were always handled as direct connections, even when the port was configured for HttpProxy.
|
||||
|
||||
### Solution
|
||||
Added a check for non-TLS connections on ports listed in `useHttpProxy`:
|
||||
```typescript
|
||||
// No TLS settings - check if this port should use HttpProxy
|
||||
const isHttpProxyPort = this.settings.useHttpProxy?.includes(record.localPort);
|
||||
|
||||
if (isHttpProxyPort && this.httpProxyBridge.getHttpProxy()) {
|
||||
// Forward non-TLS connections to HttpProxy if configured
|
||||
this.httpProxyBridge.forwardToHttpProxy(/*...*/);
|
||||
return;
|
||||
}
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.http-fix-unit.ts` - Unit tests verifying the fix
|
||||
- Tests confirm that non-TLS connections on HttpProxy ports are properly forwarded
|
||||
- Tests verify that non-HttpProxy ports still use direct connections
|
||||
|
||||
### Configuration Example
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
useHttpProxy: [80], // Enable HttpProxy for port 80
|
||||
httpProxyPort: 8443,
|
||||
acme: {
|
||||
email: 'ssl@example.com',
|
||||
port: 80
|
||||
},
|
||||
routes: [
|
||||
// Your routes here
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## ACME Certificate Provisioning Timing Fix (v19.3.9)
|
||||
|
||||
### Issue
|
||||
Certificate provisioning would start before ports were listening, causing ACME HTTP-01 challenges to fail with connection refused errors.
|
||||
|
||||
### Root Cause
|
||||
SmartProxy initialization sequence:
|
||||
1. Certificate manager initialized → immediately starts provisioning
|
||||
2. Ports start listening (too late for ACME challenges)
|
||||
|
||||
### Solution
|
||||
Deferred certificate provisioning until after ports are ready:
|
||||
```typescript
|
||||
// SmartCertManager.initialize() now skips automatic provisioning
|
||||
// SmartProxy.start() calls provisionAllCertificates() directly after ports are listening
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.acme-timing-simple.ts` - Verifies proper timing sequence
|
||||
|
||||
### Migration
|
||||
Update to v19.3.9+, no configuration changes needed.
|
||||
|
||||
## Socket Handler Race Condition Fix (v19.5.0)
|
||||
|
||||
### Issue
|
||||
Initial data chunks were being emitted before async socket handlers had completed setup, causing data loss when handlers performed async operations before setting up data listeners.
|
||||
|
||||
### Root Cause
|
||||
The `handleSocketHandlerAction` method was using `process.nextTick` to emit initial chunks regardless of whether the handler was sync or async. This created a race condition where async handlers might not have their listeners ready when the initial data was emitted.
|
||||
|
||||
### Solution
|
||||
Differentiated between sync and async handlers:
|
||||
```typescript
|
||||
const result = route.action.socketHandler(socket);
|
||||
|
||||
if (result instanceof Promise) {
|
||||
// Async handler - wait for completion before emitting initial data
|
||||
result.then(() => {
|
||||
if (initialChunk && initialChunk.length > 0) {
|
||||
socket.emit('data', initialChunk);
|
||||
}
|
||||
}).catch(/*...*/);
|
||||
} else {
|
||||
// Sync handler - use process.nextTick as before
|
||||
if (initialChunk && initialChunk.length > 0) {
|
||||
process.nextTick(() => {
|
||||
socket.emit('data', initialChunk);
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.socket-handler-race.ts` - Specifically tests async handlers with delayed listener setup
|
||||
- Verifies that initial data is received even when handler sets up listeners after async work
|
||||
|
||||
### Usage Note
|
||||
Socket handlers require initial data from the client to trigger routing (not just a TLS handshake). Clients must send at least one byte of data for the handler to be invoked.
|
||||
|
||||
## Route-Specific Security Implementation (v19.5.3)
|
||||
|
||||
### Issue
|
||||
Route-specific security configurations (ipAllowList, ipBlockList, authentication) were defined in the route types but not enforced at runtime.
|
||||
|
||||
### Root Cause
|
||||
The RouteConnectionHandler only checked global IP validation but didn't enforce route-specific security rules after matching a route.
|
||||
|
||||
### Solution
|
||||
Added security checks after route matching:
|
||||
```typescript
|
||||
// Apply route-specific security checks
|
||||
const routeSecurity = route.action.security || route.security;
|
||||
if (routeSecurity) {
|
||||
// Check IP allow/block lists
|
||||
if (routeSecurity.ipAllowList || routeSecurity.ipBlockList) {
|
||||
const isIPAllowed = this.securityManager.isIPAuthorized(
|
||||
remoteIP,
|
||||
routeSecurity.ipAllowList || [],
|
||||
routeSecurity.ipBlockList || []
|
||||
);
|
||||
|
||||
if (!isIPAllowed) {
|
||||
socket.end();
|
||||
this.connectionManager.cleanupConnection(record, 'route_ip_blocked');
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.route-security-unit.ts` - Unit tests verifying SecurityManager.isIPAuthorized logic
|
||||
- Tests confirm IP allow/block lists work correctly with glob patterns
|
||||
|
||||
### Configuration Example
|
||||
```typescript
|
||||
const routes: IRouteConfig[] = [{
|
||||
name: 'secure-api',
|
||||
match: { ports: 8443, domains: 'api.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 3000 },
|
||||
security: {
|
||||
ipAllowList: ['192.168.1.*', '10.0.0.0/8'], // Allow internal IPs
|
||||
ipBlockList: ['192.168.1.100'], // But block specific IP
|
||||
maxConnections: 100, // Per-route limit (TODO)
|
||||
authentication: { // HTTP-only, requires TLS termination
|
||||
type: 'basic',
|
||||
credentials: [{ username: 'api', password: 'secret' }]
|
||||
}
|
||||
}
|
||||
}
|
||||
}];
|
||||
```
|
||||
|
||||
### Notes
|
||||
- IP lists support glob patterns (via minimatch): `192.168.*`, `10.?.?.1`
|
||||
- Block lists take precedence over allow lists
|
||||
- Authentication requires TLS termination (cannot be enforced on passthrough/direct connections)
|
||||
- Per-route connection limits are not yet implemented
|
||||
- Security is defined at the route level (route.security), not in the action
|
||||
- Route matching is based solely on match criteria; security is enforced after matching
|
||||
|
||||
## Performance Issues Investigation (v19.5.3+)
|
||||
|
||||
### Critical Blocking Operations Found
|
||||
1. **Busy Wait Loop** in `ts/proxies/nftables-proxy/nftables-proxy.ts:235-238`
|
||||
- Blocks entire event loop with `while (Date.now() < waitUntil) {}`
|
||||
- Should use `await new Promise(resolve => setTimeout(resolve, delay))`
|
||||
|
||||
2. **Synchronous Filesystem Operations**
|
||||
- Certificate management uses `fs.existsSync()`, `fs.mkdirSync()`, `fs.readFileSync()`
|
||||
- NFTables proxy uses `execSync()` for system commands
|
||||
- Certificate store uses `ensureDirSync()`, `fileExistsSync()`, `removeManySync()`
|
||||
|
||||
3. **Memory Leak Risks**
|
||||
- Several `setInterval()` calls without storing references for cleanup
|
||||
- Event listeners added without proper cleanup in error paths
|
||||
- Missing `removeAllListeners()` calls in some connection cleanup scenarios
|
||||
|
||||
### Performance Recommendations
|
||||
- Replace all sync filesystem operations with async alternatives
|
||||
- Fix the busy wait loop immediately (critical event loop blocker)
|
||||
- Add proper cleanup for all timers and event listeners
|
||||
- Consider worker threads for CPU-intensive operations
|
||||
- See `readme.problems.md` for detailed analysis and recommendations
|
||||
|
||||
## Performance Optimizations Implemented (Phase 1 - v19.6.0)
|
||||
|
||||
### 1. Async Utilities Created (`ts/core/utils/async-utils.ts`)
|
||||
- **delay()**: Non-blocking alternative to busy wait loops
|
||||
- **retryWithBackoff()**: Retry operations with exponential backoff
|
||||
- **withTimeout()**: Execute operations with timeout protection
|
||||
- **parallelLimit()**: Run async operations with concurrency control
|
||||
- **debounceAsync()**: Debounce async functions
|
||||
- **AsyncMutex**: Ensure exclusive access to resources
|
||||
- **CircuitBreaker**: Protect against cascading failures
|
||||
|
||||
### 2. Filesystem Utilities Created (`ts/core/utils/fs-utils.ts`)
|
||||
- **AsyncFileSystem**: Complete async filesystem operations
|
||||
- exists(), ensureDir(), readFile(), writeFile()
|
||||
- readJSON(), writeJSON() with proper error handling
|
||||
- copyFile(), moveFile(), removeDir()
|
||||
- Stream creation and file listing utilities
|
||||
|
||||
### 3. Critical Fixes Applied
|
||||
|
||||
#### Busy Wait Loop Fixed
|
||||
- **Location**: `ts/proxies/nftables-proxy/nftables-proxy.ts:235-238`
|
||||
- **Fix**: Replaced `while (Date.now() < waitUntil) {}` with `await delay(ms)`
|
||||
- **Impact**: Unblocks event loop, massive performance improvement
|
||||
|
||||
#### Certificate Manager Migration
|
||||
- **File**: `ts/proxies/http-proxy/certificate-manager.ts`
|
||||
- Added async initialization method
|
||||
- Kept sync methods for backward compatibility with deprecation warnings
|
||||
- Added `loadDefaultCertificatesAsync()` method
|
||||
|
||||
#### Certificate Store Migration
|
||||
- **File**: `ts/proxies/smart-proxy/cert-store.ts`
|
||||
- Replaced all `fileExistsSync`, `ensureDirSync`, `removeManySync`
|
||||
- Used parallel operations with `Promise.all()` for better performance
|
||||
- Improved error handling and async JSON operations
|
||||
|
||||
#### NFTables Proxy Improvements
|
||||
- Added deprecation warnings to sync methods
|
||||
- Created `executeWithTempFile()` helper for common pattern
|
||||
- Started migration of sync filesystem operations to async
|
||||
- Added import for delay and AsyncFileSystem utilities
|
||||
|
||||
### 4. Backward Compatibility Maintained
|
||||
- All sync methods retained with deprecation warnings
|
||||
- Existing APIs unchanged, new async methods added alongside
|
||||
- Feature flags prepared for gradual rollout
|
||||
|
||||
### 5. Phase 1 Completion Status
|
||||
✅ **Phase 1 COMPLETE** - All critical performance fixes have been implemented:
|
||||
- ✅ Fixed busy wait loop in nftables-proxy.ts
|
||||
- ✅ Created async utilities (delay, retry, timeout, parallelLimit, mutex, circuit breaker)
|
||||
- ✅ Created filesystem utilities (AsyncFileSystem with full async operations)
|
||||
- ✅ Migrated all certificate management to async operations
|
||||
- ✅ Migrated nftables-proxy filesystem operations to async (except stopSync for exit handlers)
|
||||
- ✅ All tests passing for new utilities
|
||||
|
||||
### 6. Phase 2 Progress Status
|
||||
🔨 **Phase 2 IN PROGRESS** - Resource Lifecycle Management:
|
||||
- ✅ Created LifecycleComponent base class for automatic resource cleanup
|
||||
- ✅ Created BinaryHeap data structure for priority queue operations
|
||||
- ✅ Created EnhancedConnectionPool with backpressure and health checks
|
||||
- ✅ Cleaned up legacy code (removed ts/common/, event-utils.ts, event-system.ts)
|
||||
- 📋 TODO: Migrate existing components to extend LifecycleComponent
|
||||
- 📋 TODO: Add integration tests for resource management
|
||||
|
||||
### 7. Next Steps (Remaining Work)
|
||||
- **Phase 2 (cont)**: Migrate components to use LifecycleComponent
|
||||
- **Phase 3**: Add worker threads for CPU-intensive operations
|
||||
- **Phase 4**: Performance monitoring dashboard
|
||||
|
||||
## Socket Error Handling Fix (v19.5.11+)
|
||||
|
||||
### Issue
|
||||
Server crashed with unhandled 'error' event when backend connections failed (ECONNREFUSED). Also caused memory leak with rising active connection count as failed connections weren't cleaned up properly.
|
||||
|
||||
### Root Cause
|
||||
1. **Race Condition**: In forwarding handlers, sockets were created with `net.connect()` but error handlers were attached later, creating a window where errors could crash the server
|
||||
2. **Incomplete Cleanup**: When server connections failed, client sockets weren't properly cleaned up, leaving connection records in memory
|
||||
|
||||
### Solution
|
||||
Created `createSocketWithErrorHandler()` utility that attaches error handlers immediately:
|
||||
```typescript
|
||||
// Before (race condition):
|
||||
const socket = net.connect(port, host);
|
||||
// ... other code ...
|
||||
socket.on('error', handler); // Too late!
|
||||
|
||||
// After (safe):
|
||||
const socket = createSocketWithErrorHandler({
|
||||
port, host,
|
||||
onError: (error) => {
|
||||
// Handle error immediately
|
||||
clientSocket.destroy();
|
||||
},
|
||||
onConnect: () => {
|
||||
// Set up forwarding
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Changes Made
|
||||
1. **New Utility**: `ts/core/utils/socket-utils.ts` - Added `createSocketWithErrorHandler()`
|
||||
2. **Updated Handlers**:
|
||||
- `https-passthrough-handler.ts` - Uses safe socket creation
|
||||
- `https-terminate-to-http-handler.ts` - Uses safe socket creation
|
||||
3. **Connection Cleanup**: Client sockets destroyed immediately on server connection failure
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.socket-error-handling.node.ts` - Verifies server doesn't crash on ECONNREFUSED
|
||||
- `test/test.forwarding-error-fix.node.ts` - Tests forwarding handlers handle errors gracefully
|
||||
|
||||
### Configuration
|
||||
No configuration changes needed. The fix is transparent to users.
|
||||
|
||||
### Important Note
|
||||
The fix was applied in two places:
|
||||
1. **ForwardingHandler classes** (`https-passthrough-handler.ts`, etc.) - These are standalone forwarding utilities
|
||||
2. **SmartProxy route-connection-handler** (`route-connection-handler.ts`) - This is where the actual SmartProxy connection handling happens
|
||||
|
||||
The critical fix for SmartProxy was in `setupDirectConnection()` method in route-connection-handler.ts, which now uses `createSocketWithErrorHandler()` to properly handle connection failures and clean up connection records.
|
||||
|
||||
## Connection Cleanup Improvements (v19.5.12+)
|
||||
|
||||
### Issue
|
||||
Connections were still counting up during rapid retry scenarios, especially when routing failed or backend connections were refused. This was due to:
|
||||
1. **Delayed Cleanup**: Using `initiateCleanupOnce` queued cleanup operations (batch of 100 every 100ms) instead of immediate cleanup
|
||||
2. **NFTables Memory Leak**: NFTables connections were never cleaned up, staying in memory forever
|
||||
3. **Connection Limit Bypass**: When max connections reached, connection record check happened after creation
|
||||
|
||||
### Root Cause Analysis
|
||||
1. **Queued vs Immediate Cleanup**:
|
||||
- `initiateCleanupOnce()`: Adds to cleanup queue, processes up to 100 connections every 100ms
|
||||
- `cleanupConnection()`: Immediate synchronous cleanup
|
||||
- Under rapid retries, connections were created faster than the queue could process them
|
||||
|
||||
2. **NFTables Connections**:
|
||||
- Marked with `usingNetworkProxy = true` but never cleaned up
|
||||
- Connection records stayed in memory indefinitely
|
||||
|
||||
3. **Error Path Cleanup**:
|
||||
- Many error paths used `socket.end()` (async) followed by cleanup
|
||||
- Created timing windows where connections weren't fully cleaned
|
||||
|
||||
### Solution
|
||||
1. **Immediate Cleanup**: Changed all error paths from `initiateCleanupOnce()` to `cleanupConnection()` for immediate cleanup
|
||||
2. **NFTables Cleanup**: Added socket close listener to clean up connection records when NFTables connections close
|
||||
3. **Connection Limit Fix**: Added null check after `createConnection()` to handle rejection properly
|
||||
|
||||
### Changes Made in route-connection-handler.ts
|
||||
```typescript
|
||||
// 1. NFTables cleanup (line 551-553)
|
||||
socket.once('close', () => {
|
||||
this.connectionManager.cleanupConnection(record, 'nftables_closed');
|
||||
});
|
||||
|
||||
// 2. Connection limit check (line 93-96)
|
||||
const record = this.connectionManager.createConnection(socket);
|
||||
if (!record) {
|
||||
// Connection was rejected due to limit - socket already destroyed
|
||||
return;
|
||||
}
|
||||
|
||||
// 3. Changed all error paths to use immediate cleanup
|
||||
// Before: this.connectionManager.initiateCleanupOnce(record, reason)
|
||||
// After: this.connectionManager.cleanupConnection(record, reason)
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.rapid-retry-cleanup.node.ts` - Verifies connection cleanup under rapid retry scenarios
|
||||
- Test shows connection count stays at 0 even with 20 rapid retries with 50ms intervals
|
||||
- Confirms both ECONNREFUSED and routing failure scenarios are handled correctly
|
||||
|
||||
### Performance Impact
|
||||
- **Positive**: No more connection accumulation under load
|
||||
- **Positive**: Immediate cleanup reduces memory usage
|
||||
- **Consideration**: More frequent cleanup operations, but prevents queue backlog
|
||||
|
||||
### Migration Notes
|
||||
No configuration changes needed. The improvements are automatic and backward compatible.
|
||||
|
||||
## Early Client Disconnect Handling (v19.5.13+)
|
||||
|
||||
### Issue
|
||||
Connections were accumulating when clients connected but disconnected before sending data or during routing. This occurred in two scenarios:
|
||||
1. **TLS Path**: Clients connecting and disconnecting before sending initial TLS handshake data
|
||||
2. **Non-TLS Immediate Routing**: Clients disconnecting while backend connection was being established
|
||||
|
||||
### Root Cause
|
||||
1. **Missing Cleanup Handlers**: During initial data wait and immediate routing, no close/end handlers were attached to catch early disconnections
|
||||
2. **Race Condition**: Backend connection attempts continued even after client disconnected, causing unhandled errors
|
||||
3. **Timing Window**: Between accepting connection and establishing full bidirectional flow, disconnections weren't properly handled
|
||||
|
||||
### Solution
|
||||
1. **TLS Path Fix**: Added close/end handlers during initial data wait (lines 224-253 in route-connection-handler.ts)
|
||||
2. **Immediate Routing Fix**: Used `setupSocketHandlers` for proper handler attachment (lines 180-205)
|
||||
3. **Backend Error Handling**: Check if connection already closed before handling backend errors (line 1144)
|
||||
|
||||
### Changes Made
|
||||
```typescript
|
||||
// 1. TLS path - handle disconnect before initial data
|
||||
socket.once('close', () => {
|
||||
if (!initialDataReceived) {
|
||||
this.connectionManager.cleanupConnection(record, 'closed_before_data');
|
||||
}
|
||||
});
|
||||
|
||||
// 2. Immediate routing path - proper handler setup
|
||||
setupSocketHandlers(socket, (reason) => {
|
||||
if (!record.outgoing || record.outgoing.readyState !== 'open') {
|
||||
if (record.outgoing && !record.outgoing.destroyed) {
|
||||
record.outgoing.destroy(); // Abort pending backend connection
|
||||
}
|
||||
this.connectionManager.cleanupConnection(record, reason);
|
||||
}
|
||||
}, undefined, 'immediate-route-client');
|
||||
|
||||
// 3. Backend connection error handling
|
||||
onError: (error) => {
|
||||
if (record.connectionClosed) {
|
||||
logger.log('debug', 'Backend connection failed but client already disconnected');
|
||||
return; // Client already gone, nothing to clean up
|
||||
}
|
||||
// ... normal error handling
|
||||
}
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.connect-disconnect-cleanup.node.ts` - Comprehensive test for early disconnect scenarios
|
||||
- Tests verify connection count stays at 0 even with rapid connect/disconnect patterns
|
||||
- Covers immediate disconnect, delayed disconnect, and mixed patterns
|
||||
|
||||
### Performance Impact
|
||||
- **Positive**: No more connection accumulation from early disconnects
|
||||
- **Positive**: Immediate cleanup reduces memory usage
|
||||
- **Positive**: Prevents resource exhaustion from rapid reconnection attempts
|
||||
|
||||
### Migration Notes
|
||||
No configuration changes needed. The fix is automatic and backward compatible.
|
||||
|
||||
## Proxy Chain Connection Accumulation Fix (v19.5.14+)
|
||||
|
||||
### Issue
|
||||
When chaining SmartProxies (Client → SmartProxy1 → SmartProxy2 → Backend), connections would accumulate and never be cleaned up. This was particularly severe when the backend was down or closing connections immediately.
|
||||
|
||||
### Root Cause
|
||||
The half-open connection support was preventing proper cascade cleanup in proxy chains:
|
||||
1. Backend closes → SmartProxy2's server socket closes
|
||||
2. SmartProxy2 keeps client socket open (half-open support)
|
||||
3. SmartProxy1 never gets notified that downstream is closed
|
||||
4. Connections accumulate at each proxy in the chain
|
||||
|
||||
The issue was in `createIndependentSocketHandlers()` which waited for BOTH sockets to close before cleanup.
|
||||
|
||||
### Solution
|
||||
1. **Changed default behavior**: When one socket closes, both close immediately
|
||||
2. **Made half-open support opt-in**: Only enabled when explicitly requested
|
||||
3. **Centralized socket handling**: Created `setupBidirectionalForwarding()` for consistent behavior
|
||||
4. **Applied everywhere**: Updated HttpProxyBridge and route-connection-handler to use centralized handling
|
||||
|
||||
### Changes Made
|
||||
```typescript
|
||||
// socket-utils.ts - Default behavior now closes both sockets
|
||||
export function createIndependentSocketHandlers(
|
||||
clientSocket, serverSocket, onBothClosed,
|
||||
options: { enableHalfOpen?: boolean } = {} // Half-open is opt-in
|
||||
) {
|
||||
// When server closes, immediately close client (unless half-open enabled)
|
||||
if (!clientClosed && !options.enableHalfOpen) {
|
||||
clientSocket.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
// New centralized function for consistent socket pairing
|
||||
export function setupBidirectionalForwarding(
|
||||
clientSocket, serverSocket,
|
||||
handlers: {
|
||||
onClientData?: (chunk) => void;
|
||||
onServerData?: (chunk) => void;
|
||||
onCleanup: (reason) => void;
|
||||
enableHalfOpen?: boolean; // Default: false
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
- `test/test.proxy-chain-simple.node.ts` - Verifies proxy chains don't accumulate connections
|
||||
- Tests confirm connections stay at 0 even with backend closing immediately
|
||||
- Works for any proxy chain configuration (not just localhost)
|
||||
|
||||
### Performance Impact
|
||||
- **Positive**: No more connection accumulation in proxy chains
|
||||
- **Positive**: Immediate cleanup reduces memory usage
|
||||
- **Neutral**: Half-open connections still available when needed (opt-in)
|
||||
|
||||
### Migration Notes
|
||||
No configuration changes needed. The fix applies to all proxy chains automatically.
|
||||
|
||||
## Socket Cleanup Handler Deprecation (v19.5.15+)
|
||||
|
||||
### Issue
|
||||
The deprecated `createSocketCleanupHandler()` function was still being used in forwarding handlers, despite being marked as deprecated.
|
||||
|
||||
### Solution
|
||||
Updated all forwarding handlers to use the new centralized socket utilities:
|
||||
1. **Replaced `createSocketCleanupHandler()`** with `setupBidirectionalForwarding()` in:
|
||||
- `https-terminate-to-https-handler.ts`
|
||||
- `https-terminate-to-http-handler.ts`
|
||||
2. **Removed deprecated function** from `socket-utils.ts`
|
||||
|
||||
### Benefits
|
||||
- Consistent socket handling across all handlers
|
||||
- Proper cleanup in proxy chains (no half-open connections by default)
|
||||
- Better backpressure handling with the centralized implementation
|
||||
- Reduced code duplication
|
||||
|
||||
### Migration Notes
|
||||
No user-facing changes. All forwarding handlers now use the same robust socket handling as the main SmartProxy connection handler.
|
||||
|
||||
## WrappedSocket Class Evaluation for PROXY Protocol (v19.5.19+)
|
||||
|
||||
### Current Socket Handling Architecture
|
||||
- Sockets are handled directly as `net.Socket` instances throughout the codebase
|
||||
- Socket augmentation via TypeScript module augmentation for TLS properties
|
||||
- Metadata tracked separately in `IConnectionRecord` objects
|
||||
- Socket utilities provide helper functions but don't encapsulate the socket
|
||||
- Connection records track extensive metadata (IDs, timestamps, byte counters, TLS state, etc.)
|
||||
|
||||
### Evaluation: Should We Introduce a WrappedSocket Class?
|
||||
|
||||
**Yes, a WrappedSocket class would make sense**, particularly for PROXY protocol implementation and future extensibility.
|
||||
|
||||
### Design Considerations for WrappedSocket
|
||||
|
||||
```typescript
|
||||
class WrappedSocket {
|
||||
private socket: net.Socket;
|
||||
private connectionId: string;
|
||||
private metadata: {
|
||||
realClientIP?: string; // From PROXY protocol
|
||||
realClientPort?: number; // From PROXY protocol
|
||||
proxyIP?: string; // Immediate connection IP
|
||||
proxyPort?: number; // Immediate connection port
|
||||
bytesReceived: number;
|
||||
bytesSent: number;
|
||||
lastActivity: number;
|
||||
isTLS: boolean;
|
||||
// ... other metadata
|
||||
};
|
||||
|
||||
// PROXY protocol handling
|
||||
private proxyProtocolParsed: boolean = false;
|
||||
private pendingData: Buffer[] = [];
|
||||
|
||||
constructor(socket: net.Socket) {
|
||||
this.socket = socket;
|
||||
this.setupHandlers();
|
||||
}
|
||||
|
||||
// Getters for clean access
|
||||
get remoteAddress(): string {
|
||||
return this.metadata.realClientIP || this.socket.remoteAddress || '';
|
||||
}
|
||||
|
||||
get remotePort(): number {
|
||||
return this.metadata.realClientPort || this.socket.remotePort || 0;
|
||||
}
|
||||
|
||||
get isFromTrustedProxy(): boolean {
|
||||
return !!this.metadata.realClientIP;
|
||||
}
|
||||
|
||||
// PROXY protocol parsing
|
||||
async parseProxyProtocol(trustedProxies: string[]): Promise<boolean> {
|
||||
// Implementation here
|
||||
}
|
||||
|
||||
// Delegate socket methods
|
||||
write(data: any): boolean {
|
||||
this.metadata.bytesSent += Buffer.byteLength(data);
|
||||
return this.socket.write(data);
|
||||
}
|
||||
|
||||
destroy(error?: Error): void {
|
||||
this.socket.destroy(error);
|
||||
}
|
||||
|
||||
// Event forwarding
|
||||
on(event: string, listener: Function): this {
|
||||
this.socket.on(event, listener);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Implementation Benefits
|
||||
|
||||
1. **Encapsulation**: Bundle socket + metadata + behavior in one place
|
||||
2. **PROXY Protocol Integration**: Cleaner handling without modifying existing socket code
|
||||
3. **State Management**: Centralized socket state tracking and validation
|
||||
4. **API Consistency**: Uniform interface for all socket operations
|
||||
5. **Future Extensibility**: Easy to add new socket-level features (compression, encryption, etc.)
|
||||
6. **Type Safety**: Better TypeScript support without module augmentation
|
||||
7. **Testing**: Easier to mock and test socket behavior
|
||||
|
||||
### Implementation Drawbacks
|
||||
|
||||
1. **Major Refactoring**: Would require changes throughout the codebase
|
||||
2. **Performance Overhead**: Additional abstraction layer (minimal but present)
|
||||
3. **Compatibility**: Need to maintain event emitter compatibility
|
||||
4. **Learning Curve**: Developers need to understand the wrapper
|
||||
|
||||
### Recommended Approach: Phased Implementation
|
||||
|
||||
**Phase 1: PROXY Protocol Only** (Immediate)
|
||||
- Create minimal `ProxyProtocolSocket` wrapper for new connections from trusted proxies
|
||||
- Use in connection handler when receiving from trusted proxy IPs
|
||||
- Minimal disruption to existing code
|
||||
|
||||
```typescript
|
||||
class ProxyProtocolSocket {
|
||||
constructor(
|
||||
public socket: net.Socket,
|
||||
public realClientIP?: string,
|
||||
public realClientPort?: number
|
||||
) {}
|
||||
|
||||
get remoteAddress(): string {
|
||||
return this.realClientIP || this.socket.remoteAddress || '';
|
||||
}
|
||||
|
||||
get remotePort(): number {
|
||||
return this.realClientPort || this.socket.remotePort || 0;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Phase 2: Gradual Migration** (Future)
|
||||
- Extend wrapper with more functionality
|
||||
- Migrate critical paths to use wrapper
|
||||
- Add performance monitoring
|
||||
|
||||
**Phase 3: Full Adoption** (Long-term)
|
||||
- Complete migration to WrappedSocket
|
||||
- Remove socket augmentation
|
||||
- Standardize all socket handling
|
||||
|
||||
### Decision Summary
|
||||
|
||||
✅ **Implement minimal ProxyProtocolSocket for immediate PROXY protocol support**
|
||||
- Low risk, high value
|
||||
- Solves the immediate proxy chain connection limit issue
|
||||
- Sets foundation for future improvements
|
||||
- Can be implemented alongside existing code
|
||||
|
||||
📋 **Consider full WrappedSocket for future major version**
|
||||
- Cleaner architecture
|
||||
- Better maintainability
|
||||
- But requires significant refactoring
|
||||
|
||||
## WrappedSocket Implementation (PROXY Protocol Phase 1) - v19.5.19+
|
||||
|
||||
The WrappedSocket class has been implemented as the foundation for PROXY protocol support:
|
||||
|
||||
### Implementation Details
|
||||
|
||||
1. **Design Approach**: Uses JavaScript Proxy to delegate all Socket methods/properties to the underlying socket while allowing override of specific properties (remoteAddress, remotePort).
|
||||
|
||||
2. **Key Design Decisions**:
|
||||
- NOT a Duplex stream - Initially tried this approach but it created infinite loops
|
||||
- Simple wrapper using Proxy pattern for transparent delegation
|
||||
- All sockets are wrapped, not just those from trusted proxies
|
||||
- Trusted proxy detection happens after wrapping
|
||||
|
||||
3. **Usage Pattern**:
|
||||
```typescript
|
||||
// In RouteConnectionHandler.handleConnection()
|
||||
const wrappedSocket = new WrappedSocket(socket);
|
||||
// Pass wrappedSocket throughout the flow
|
||||
|
||||
// When calling socket-utils functions, extract underlying socket:
|
||||
const underlyingSocket = getUnderlyingSocket(socket);
|
||||
setupBidirectionalForwarding(underlyingSocket, targetSocket, {...});
|
||||
```
|
||||
|
||||
4. **Important Implementation Notes**:
|
||||
- Socket utility functions (setupBidirectionalForwarding, cleanupSocket) expect raw net.Socket
|
||||
- Always extract underlying socket before passing to these utilities using `getUnderlyingSocket()`
|
||||
- WrappedSocket preserves all Socket functionality through Proxy delegation
|
||||
- TypeScript typing handled via index signature: `[key: string]: any`
|
||||
|
||||
5. **Files Modified**:
|
||||
- `ts/core/models/wrapped-socket.ts` - The WrappedSocket implementation
|
||||
- `ts/core/models/socket-types.ts` - Helper functions and type guards
|
||||
- `ts/proxies/smart-proxy/route-connection-handler.ts` - Updated to wrap all incoming sockets
|
||||
- `ts/proxies/smart-proxy/connection-manager.ts` - Updated to accept WrappedSocket
|
||||
- `ts/proxies/smart-proxy/http-proxy-bridge.ts` - Updated to handle WrappedSocket
|
||||
|
||||
6. **Test Coverage**:
|
||||
- `test/test.wrapped-socket-forwarding.ts` - Verifies data forwarding through wrapped sockets
|
||||
|
||||
### Next Steps for PROXY Protocol
|
||||
- Phase 2: Parse PROXY protocol header from trusted proxies
|
||||
- Phase 3: Update real client IP/port after parsing
|
||||
- Phase 4: Test with HAProxy and AWS ELB
|
||||
- Phase 5: Documentation and configuration
|
858
readme.md
858
readme.md
@ -665,6 +665,661 @@ redirect: {
|
||||
}
|
||||
```
|
||||
|
||||
## Forwarding Modes Guide
|
||||
|
||||
This section provides a comprehensive reference for all forwarding modes available in SmartProxy, helping you choose the right configuration for your use case.
|
||||
|
||||
### Visual Overview
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Incoming Traffic] --> B{Action Type?}
|
||||
|
||||
B -->|forward| C{TLS Mode?}
|
||||
B -->|socket-handler| D[Custom Handler]
|
||||
|
||||
C -->|terminate| E[Decrypt TLS]
|
||||
C -->|passthrough| F[Forward Encrypted]
|
||||
C -->|terminate-and-reencrypt| G[Decrypt & Re-encrypt]
|
||||
C -->|none/HTTP| H[Forward HTTP]
|
||||
|
||||
E --> I{Engine?}
|
||||
F --> I
|
||||
G --> I
|
||||
H --> I
|
||||
|
||||
I -->|node| J[Node.js Processing]
|
||||
I -->|nftables| K[Kernel NAT]
|
||||
|
||||
J --> L[Backend]
|
||||
K --> L
|
||||
D --> M[Custom Logic]
|
||||
|
||||
style B fill:#f9f,stroke:#333,stroke-width:2px
|
||||
style C fill:#bbf,stroke:#333,stroke-width:2px
|
||||
style I fill:#bfb,stroke:#333,stroke-width:2px
|
||||
```
|
||||
|
||||
### Overview
|
||||
|
||||
SmartProxy offers flexible traffic forwarding through combinations of:
|
||||
- **Action Types**: How to handle matched traffic
|
||||
- **TLS Modes**: How to handle HTTPS/TLS connections
|
||||
- **Forwarding Engines**: Where packet processing occurs
|
||||
|
||||
### Quick Reference
|
||||
|
||||
#### Modern Route-Based Configuration
|
||||
|
||||
| Use Case | Action Type | TLS Mode | Engine | Performance | Security |
|
||||
|----------|------------|----------|---------|-------------|----------|
|
||||
| HTTP web server | `forward` | N/A | `node` | Good | Basic |
|
||||
| HTTPS web server (inspect traffic) | `forward` | `terminate` | `node` | Good | Full inspection |
|
||||
| HTTPS passthrough (no inspection) | `forward` | `passthrough` | `node` | Better | End-to-end encryption |
|
||||
| HTTPS gateway (re-encrypt to backend) | `forward` | `terminate-and-reencrypt` | `node` | Moderate | Full control |
|
||||
| High-performance TCP forwarding | `forward` | `passthrough` | `nftables` | Excellent | Basic |
|
||||
| Custom protocol handling | `socket-handler` | N/A | `node` | Varies | Custom |
|
||||
|
||||
#### Legacy Forwarding Types (Deprecated)
|
||||
|
||||
| Legacy Type | Modern Equivalent |
|
||||
|------------|------------------|
|
||||
| `http-only` | `action.type: 'forward'` with port 80 |
|
||||
| `https-passthrough` | `action.type: 'forward'` + `tls.mode: 'passthrough'` |
|
||||
| `https-terminate-to-http` | `action.type: 'forward'` + `tls.mode: 'terminate'` |
|
||||
| `https-terminate-to-https` | `action.type: 'forward'` + `tls.mode: 'terminate-and-reencrypt'` |
|
||||
|
||||
### Forwarding Mode Categories
|
||||
|
||||
#### 1. Action Types
|
||||
|
||||
##### Forward Action
|
||||
Routes traffic to a backend server. This is the most common action type.
|
||||
|
||||
```typescript
|
||||
{
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: {
|
||||
host: 'backend-server',
|
||||
port: 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### Socket Handler Action
|
||||
Provides custom handling for any TCP protocol. Used for specialized protocols or custom logic.
|
||||
|
||||
```typescript
|
||||
{
|
||||
action: {
|
||||
type: 'socket-handler',
|
||||
socketHandler: async (socket, context) => {
|
||||
// Custom protocol implementation
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. TLS Modes (for Forward Action)
|
||||
|
||||
##### Passthrough Mode
|
||||
- **What**: Forwards encrypted TLS traffic without decryption
|
||||
- **When**: Backend handles its own TLS termination
|
||||
- **Pros**: Maximum performance, true end-to-end encryption
|
||||
- **Cons**: Cannot inspect or modify HTTPS traffic
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
Client -->|TLS| SmartProxy
|
||||
SmartProxy -->|TLS| Backend
|
||||
style SmartProxy fill:#f9f,stroke:#333,stroke-width:2px
|
||||
```
|
||||
|
||||
##### Terminate Mode
|
||||
- **What**: Decrypts TLS, forwards as plain HTTP
|
||||
- **When**: Backend doesn't support HTTPS or you need to inspect traffic
|
||||
- **Pros**: Can modify headers, inspect content, add security headers
|
||||
- **Cons**: Backend connection is unencrypted
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
Client -->|TLS| SmartProxy
|
||||
SmartProxy -->|HTTP| Backend
|
||||
style SmartProxy fill:#f9f,stroke:#333,stroke-width:2px
|
||||
```
|
||||
|
||||
##### Terminate-and-Reencrypt Mode
|
||||
- **What**: Decrypts TLS, then creates new TLS connection to backend
|
||||
- **When**: Need traffic inspection but backend requires HTTPS
|
||||
- **Pros**: Full control while maintaining backend security
|
||||
- **Cons**: Higher CPU usage, increased latency
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
Client -->|TLS| SmartProxy
|
||||
SmartProxy -->|New TLS| Backend
|
||||
style SmartProxy fill:#f9f,stroke:#333,stroke-width:2px
|
||||
```
|
||||
|
||||
#### 3. Forwarding Engines
|
||||
|
||||
##### Node.js Engine (Default)
|
||||
- **Processing**: Application-level in Node.js event loop
|
||||
- **Features**: Full protocol support, header manipulation, WebSockets
|
||||
- **Performance**: Good for most use cases
|
||||
- **Use when**: You need application-layer features
|
||||
|
||||
##### NFTables Engine
|
||||
- **Processing**: Kernel-level packet forwarding
|
||||
- **Features**: Basic NAT, minimal overhead
|
||||
- **Performance**: Excellent, near wire-speed
|
||||
- **Use when**: Maximum performance is critical
|
||||
- **Requirements**: Linux, root permissions, NFTables installed
|
||||
|
||||
### Detailed Mode Explanations
|
||||
|
||||
#### HTTP Forwarding (Port 80)
|
||||
|
||||
Simple HTTP forwarding without encryption:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 80, domains: 'example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 8080 }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Data Flow**: Client → SmartProxy (HTTP) → Backend (HTTP)
|
||||
|
||||
#### HTTPS with TLS Termination
|
||||
|
||||
Decrypt HTTPS and forward as HTTP:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'secure.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 8080 },
|
||||
tls: {
|
||||
mode: 'terminate',
|
||||
certificate: 'auto' // Use Let's Encrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Data Flow**: Client → SmartProxy (HTTPS decrypt) → Backend (HTTP)
|
||||
|
||||
#### HTTPS Passthrough
|
||||
|
||||
Forward encrypted traffic without decryption:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'legacy.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: '192.168.1.10', port: 443 },
|
||||
tls: {
|
||||
mode: 'passthrough'
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Data Flow**: Client → SmartProxy (TLS forwarding) → Backend (Original TLS)
|
||||
|
||||
#### HTTPS Gateway (Terminate and Re-encrypt)
|
||||
|
||||
Decrypt, inspect, then re-encrypt to backend:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'api.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'api-backend', port: 443 },
|
||||
tls: {
|
||||
mode: 'terminate-and-reencrypt',
|
||||
certificate: 'auto'
|
||||
},
|
||||
advanced: {
|
||||
headers: {
|
||||
'X-Forwarded-Proto': 'https',
|
||||
'X-Real-IP': '{clientIp}'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Data Flow**: Client → SmartProxy (HTTPS decrypt) → SmartProxy (New HTTPS) → Backend
|
||||
|
||||
#### High-Performance NFTables Forwarding
|
||||
|
||||
Kernel-level forwarding for maximum performance:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'fast.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'backend', port: 443 },
|
||||
tls: { mode: 'passthrough' },
|
||||
forwardingEngine: 'nftables',
|
||||
nftables: {
|
||||
preserveSourceIP: true,
|
||||
maxRate: '10gbps'
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Data Flow**: Client → Kernel (NFTables NAT) → Backend
|
||||
|
||||
#### Custom Socket Handler
|
||||
|
||||
Handle custom protocols or implement specialized logic:
|
||||
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 9000, domains: 'custom.example.com' },
|
||||
action: {
|
||||
type: 'socket-handler',
|
||||
socketHandler: async (socket, context) => {
|
||||
console.log(`Connection from ${context.clientIp}`);
|
||||
|
||||
socket.write('Welcome to custom protocol server\n');
|
||||
|
||||
socket.on('data', (data) => {
|
||||
// Handle custom protocol
|
||||
const response = processCustomProtocol(data);
|
||||
socket.write(response);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Decision Guide
|
||||
|
||||
#### Choose HTTP Forwarding When:
|
||||
- Backend only supports HTTP
|
||||
- Internal services not exposed to internet
|
||||
- Development/testing environments
|
||||
|
||||
#### Choose HTTPS Termination When:
|
||||
- Need to inspect/modify HTTP traffic
|
||||
- Backend doesn't support HTTPS
|
||||
- Want to add security headers
|
||||
- Need to cache responses
|
||||
|
||||
#### Choose HTTPS Passthrough When:
|
||||
- Backend manages its own certificates
|
||||
- Need true end-to-end encryption
|
||||
- Compliance requires no MITM
|
||||
- WebSocket connections to backend
|
||||
|
||||
#### Choose HTTPS Terminate-and-Reencrypt When:
|
||||
- Need traffic inspection AND backend requires HTTPS
|
||||
- API gateway scenarios
|
||||
- Adding authentication layers
|
||||
- Different certificates for client/backend
|
||||
|
||||
#### Choose NFTables Engine When:
|
||||
- Handling 1Gbps+ traffic
|
||||
- Thousands of concurrent connections
|
||||
- Minimal latency is critical
|
||||
- Don't need application-layer features
|
||||
|
||||
#### Choose Socket Handler When:
|
||||
- Implementing custom protocols
|
||||
- Need fine-grained connection control
|
||||
- Building protocol adapters
|
||||
- Special authentication flows
|
||||
|
||||
### Complete Examples
|
||||
|
||||
#### Example 1: Complete Web Application
|
||||
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
routes: [
|
||||
// HTTP to HTTPS redirect
|
||||
{
|
||||
match: { ports: 80, domains: ['example.com', 'www.example.com'] },
|
||||
action: {
|
||||
type: 'socket-handler',
|
||||
socketHandler: SocketHandlers.httpRedirect('https://{domain}{path}')
|
||||
}
|
||||
},
|
||||
|
||||
// Main website with TLS termination
|
||||
{
|
||||
match: { ports: 443, domains: ['example.com', 'www.example.com'] },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'web-backend', port: 3000 },
|
||||
tls: {
|
||||
mode: 'terminate',
|
||||
certificate: 'auto'
|
||||
},
|
||||
websocket: { enabled: true }
|
||||
}
|
||||
},
|
||||
|
||||
// API with re-encryption
|
||||
{
|
||||
match: { ports: 443, domains: 'api.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'api-backend', port: 443 },
|
||||
tls: {
|
||||
mode: 'terminate-and-reencrypt',
|
||||
certificate: 'auto'
|
||||
}
|
||||
},
|
||||
security: {
|
||||
ipAllowList: ['10.0.0.0/8'],
|
||||
rateLimit: {
|
||||
enabled: true,
|
||||
maxRequests: 100,
|
||||
window: 60
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
#### Example 2: Multi-Mode Proxy Setup
|
||||
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
routes: [
|
||||
// Legacy app with passthrough
|
||||
{
|
||||
match: { ports: 443, domains: 'legacy.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'legacy-server', port: 443 },
|
||||
tls: { mode: 'passthrough' }
|
||||
}
|
||||
},
|
||||
|
||||
// High-performance streaming with NFTables
|
||||
{
|
||||
match: { ports: 8080, domains: 'stream.example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'stream-backend', port: 8080 },
|
||||
forwardingEngine: 'nftables',
|
||||
nftables: {
|
||||
protocol: 'tcp',
|
||||
preserveSourceIP: true
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Custom protocol handler
|
||||
{
|
||||
match: { ports: 9999 },
|
||||
action: {
|
||||
type: 'socket-handler',
|
||||
socketHandler: SocketHandlers.proxy('custom-backend', 9999)
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Performance Considerations
|
||||
|
||||
#### Node.js Engine Performance
|
||||
|
||||
| Metric | Typical Performance |
|
||||
|--------|-------------------|
|
||||
| Throughput | 1-10 Gbps |
|
||||
| Connections | 10,000-50,000 concurrent |
|
||||
| Latency | 1-5ms added |
|
||||
| CPU Usage | Moderate |
|
||||
|
||||
**Best for**: Most web applications, APIs, sites needing inspection
|
||||
|
||||
#### NFTables Engine Performance
|
||||
|
||||
| Metric | Typical Performance |
|
||||
|--------|-------------------|
|
||||
| Throughput | 10-100 Gbps |
|
||||
| Connections | 100,000+ concurrent |
|
||||
| Latency | <0.1ms added |
|
||||
| CPU Usage | Minimal |
|
||||
|
||||
**Best for**: High-traffic services, streaming, gaming, TCP forwarding
|
||||
|
||||
#### Performance Tips
|
||||
|
||||
1. **Use passthrough mode** when you don't need inspection
|
||||
2. **Enable NFTables** for high-traffic services
|
||||
3. **Terminate TLS only when necessary** - it adds CPU overhead
|
||||
4. **Use connection pooling** for terminate-and-reencrypt mode
|
||||
5. **Enable HTTP/2** for better multiplexing
|
||||
|
||||
### Security Implications
|
||||
|
||||
#### TLS Termination Security
|
||||
|
||||
**Pros:**
|
||||
- Inspect traffic for threats
|
||||
- Add security headers
|
||||
- Implement WAF rules
|
||||
- Log requests for audit
|
||||
|
||||
**Cons:**
|
||||
- Proxy has access to decrypted data
|
||||
- Requires secure certificate storage
|
||||
- Potential compliance issues
|
||||
|
||||
**Best Practices:**
|
||||
- Use auto-renewal with Let's Encrypt
|
||||
- Store certificates securely
|
||||
- Implement proper access controls
|
||||
- Use strong TLS configurations
|
||||
|
||||
#### Passthrough Security
|
||||
|
||||
**Pros:**
|
||||
- True end-to-end encryption
|
||||
- No MITM concerns
|
||||
- Backend controls security
|
||||
|
||||
**Cons:**
|
||||
- Cannot inspect traffic
|
||||
- Cannot add security headers
|
||||
- Limited DDoS protection
|
||||
|
||||
#### Socket Handler Security
|
||||
|
||||
**Risks:**
|
||||
- Custom code may have vulnerabilities
|
||||
- Resource exhaustion possible
|
||||
- Authentication bypass risks
|
||||
|
||||
**Mitigations:**
|
||||
```typescript
|
||||
{
|
||||
action: {
|
||||
type: 'socket-handler',
|
||||
socketHandler: async (socket, context) => {
|
||||
// Always validate and sanitize input
|
||||
socket.on('data', (data) => {
|
||||
if (data.length > MAX_SIZE) {
|
||||
socket.destroy();
|
||||
return;
|
||||
}
|
||||
// Process safely...
|
||||
});
|
||||
|
||||
// Set timeouts
|
||||
socket.setTimeout(30000);
|
||||
|
||||
// Rate limit connections
|
||||
if (connectionsFromIP(context.clientIp) > 10) {
|
||||
socket.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Migration from Legacy Types
|
||||
|
||||
#### From `http-only`
|
||||
|
||||
**Old:**
|
||||
```typescript
|
||||
{
|
||||
type: 'http-only',
|
||||
target: { host: 'localhost', port: 8080 }
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 80, domains: 'example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 8080 }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### From `https-passthrough`
|
||||
|
||||
**Old:**
|
||||
```typescript
|
||||
{
|
||||
type: 'https-passthrough',
|
||||
target: { host: 'backend', port: 443 }
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'backend', port: 443 },
|
||||
tls: { mode: 'passthrough' }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### From `https-terminate-to-http`
|
||||
|
||||
**Old:**
|
||||
```typescript
|
||||
{
|
||||
type: 'https-terminate-to-http',
|
||||
target: { host: 'localhost', port: 8080 },
|
||||
ssl: { /* certs */ }
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 8080 },
|
||||
tls: {
|
||||
mode: 'terminate',
|
||||
certificate: 'auto' // or provide cert/key
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### From `https-terminate-to-https`
|
||||
|
||||
**Old:**
|
||||
```typescript
|
||||
{
|
||||
type: 'https-terminate-to-https',
|
||||
target: { host: 'backend', port: 443 },
|
||||
ssl: { /* certs */ }
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```typescript
|
||||
{
|
||||
match: { ports: 443, domains: 'example.com' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'backend', port: 443 },
|
||||
tls: {
|
||||
mode: 'terminate-and-reencrypt',
|
||||
certificate: 'auto'
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Helper Functions Quick Reference
|
||||
|
||||
SmartProxy provides helper functions for common configurations:
|
||||
|
||||
```typescript
|
||||
// HTTP forwarding
|
||||
createHttpRoute('example.com', { host: 'localhost', port: 8080 })
|
||||
|
||||
// HTTPS with termination
|
||||
createHttpsTerminateRoute('secure.com', { host: 'localhost', port: 8080 }, {
|
||||
certificate: 'auto'
|
||||
})
|
||||
|
||||
// HTTPS passthrough
|
||||
createHttpsPassthroughRoute('legacy.com', { host: 'backend', port: 443 })
|
||||
|
||||
// Complete HTTPS setup (includes HTTP redirect)
|
||||
...createCompleteHttpsServer('example.com', { host: 'localhost', port: 8080 }, {
|
||||
certificate: 'auto'
|
||||
})
|
||||
|
||||
// NFTables high-performance
|
||||
createNfTablesRoute('fast.com', { host: 'backend', port: 8080 }, {
|
||||
ports: 80,
|
||||
preserveSourceIP: true
|
||||
})
|
||||
|
||||
// Custom socket handler
|
||||
createSocketHandlerRoute('custom.com', 9000, async (socket, context) => {
|
||||
// Handler implementation
|
||||
})
|
||||
```
|
||||
|
||||
### Summary
|
||||
|
||||
SmartProxy's forwarding modes provide flexibility for any proxy scenario:
|
||||
|
||||
- **Simple HTTP/HTTPS forwarding** for most web applications
|
||||
- **TLS passthrough** for end-to-end encryption
|
||||
- **TLS termination** for traffic inspection and modification
|
||||
- **NFTables** for extreme performance requirements
|
||||
- **Socket handlers** for custom protocols
|
||||
|
||||
Choose based on your security requirements, performance needs, and whether you need to inspect or modify traffic. The modern route-based configuration provides a consistent interface regardless of the forwarding mode you choose.
|
||||
|
||||
### Route Metadata and Prioritization
|
||||
|
||||
You can add metadata to routes to help with organization and control matching priority:
|
||||
@ -919,6 +1574,152 @@ Available helper functions:
|
||||
})
|
||||
```
|
||||
|
||||
## Metrics and Monitoring
|
||||
|
||||
SmartProxy includes a comprehensive metrics collection system that provides real-time insights into proxy performance, connection statistics, and throughput data.
|
||||
|
||||
### Getting Metrics
|
||||
|
||||
```typescript
|
||||
const proxy = new SmartProxy({ /* config */ });
|
||||
await proxy.start();
|
||||
|
||||
// Access metrics through the getStats() method
|
||||
const stats = proxy.getStats();
|
||||
|
||||
// Get current active connections
|
||||
console.log(`Active connections: ${stats.getActiveConnections()}`);
|
||||
|
||||
// Get total connections since start
|
||||
console.log(`Total connections: ${stats.getTotalConnections()}`);
|
||||
|
||||
// Get requests per second (RPS)
|
||||
console.log(`Current RPS: ${stats.getRequestsPerSecond()}`);
|
||||
|
||||
// Get throughput data
|
||||
const throughput = stats.getThroughput();
|
||||
console.log(`Bytes received: ${throughput.bytesIn}`);
|
||||
console.log(`Bytes sent: ${throughput.bytesOut}`);
|
||||
|
||||
// Get connections by route
|
||||
const routeConnections = stats.getConnectionsByRoute();
|
||||
for (const [route, count] of routeConnections) {
|
||||
console.log(`Route ${route}: ${count} connections`);
|
||||
}
|
||||
|
||||
// Get connections by IP address
|
||||
const ipConnections = stats.getConnectionsByIP();
|
||||
for (const [ip, count] of ipConnections) {
|
||||
console.log(`IP ${ip}: ${count} connections`);
|
||||
}
|
||||
```
|
||||
|
||||
### Available Metrics
|
||||
|
||||
The `IProxyStats` interface provides the following methods:
|
||||
|
||||
- `getActiveConnections()`: Current number of active connections
|
||||
- `getTotalConnections()`: Total connections handled since proxy start
|
||||
- `getRequestsPerSecond()`: Current requests per second (1-minute average)
|
||||
- `getThroughput()`: Total bytes transferred (in/out)
|
||||
- `getConnectionsByRoute()`: Connection count per route
|
||||
- `getConnectionsByIP()`: Connection count per client IP
|
||||
|
||||
Additional extended methods available:
|
||||
|
||||
- `getThroughputRate()`: Bytes per second rate for the last minute
|
||||
- `getTopIPs(limit?: number)`: Get top IPs by connection count
|
||||
- `isIPBlocked(ip: string, maxConnectionsPerIP: number)`: Check if an IP has reached the connection limit
|
||||
|
||||
### Extended Metrics Example
|
||||
|
||||
```typescript
|
||||
const stats = proxy.getStats() as any; // Extended methods are available
|
||||
|
||||
// Get throughput rate
|
||||
const rate = stats.getThroughputRate();
|
||||
console.log(`Incoming: ${rate.bytesInPerSec} bytes/sec`);
|
||||
console.log(`Outgoing: ${rate.bytesOutPerSec} bytes/sec`);
|
||||
|
||||
// Get top 10 IPs by connection count
|
||||
const topIPs = stats.getTopIPs(10);
|
||||
topIPs.forEach(({ ip, connections }) => {
|
||||
console.log(`${ip}: ${connections} connections`);
|
||||
});
|
||||
|
||||
// Check if an IP should be rate limited
|
||||
if (stats.isIPBlocked('192.168.1.100', 100)) {
|
||||
console.log('IP has too many connections');
|
||||
}
|
||||
```
|
||||
|
||||
### Monitoring Example
|
||||
|
||||
```typescript
|
||||
// Create a monitoring loop
|
||||
setInterval(() => {
|
||||
const stats = proxy.getStats();
|
||||
|
||||
// Log key metrics
|
||||
console.log({
|
||||
timestamp: new Date().toISOString(),
|
||||
activeConnections: stats.getActiveConnections(),
|
||||
rps: stats.getRequestsPerSecond(),
|
||||
throughput: stats.getThroughput()
|
||||
});
|
||||
|
||||
// Check for high connection counts from specific IPs
|
||||
const ipConnections = stats.getConnectionsByIP();
|
||||
for (const [ip, count] of ipConnections) {
|
||||
if (count > 100) {
|
||||
console.warn(`High connection count from ${ip}: ${count}`);
|
||||
}
|
||||
}
|
||||
}, 10000); // Every 10 seconds
|
||||
```
|
||||
|
||||
### Exporting Metrics
|
||||
|
||||
You can export metrics in various formats for external monitoring systems:
|
||||
|
||||
```typescript
|
||||
// Export as JSON
|
||||
app.get('/metrics.json', (req, res) => {
|
||||
const stats = proxy.getStats();
|
||||
res.json({
|
||||
activeConnections: stats.getActiveConnections(),
|
||||
totalConnections: stats.getTotalConnections(),
|
||||
requestsPerSecond: stats.getRequestsPerSecond(),
|
||||
throughput: stats.getThroughput(),
|
||||
connectionsByRoute: Object.fromEntries(stats.getConnectionsByRoute()),
|
||||
connectionsByIP: Object.fromEntries(stats.getConnectionsByIP())
|
||||
});
|
||||
});
|
||||
|
||||
// Export as Prometheus format
|
||||
app.get('/metrics', (req, res) => {
|
||||
const stats = proxy.getStats();
|
||||
res.set('Content-Type', 'text/plain');
|
||||
res.send(`
|
||||
# HELP smartproxy_active_connections Current active connections
|
||||
# TYPE smartproxy_active_connections gauge
|
||||
smartproxy_active_connections ${stats.getActiveConnections()}
|
||||
|
||||
# HELP smartproxy_requests_per_second Current requests per second
|
||||
# TYPE smartproxy_requests_per_second gauge
|
||||
smartproxy_requests_per_second ${stats.getRequestsPerSecond()}
|
||||
|
||||
# HELP smartproxy_bytes_in Total bytes received
|
||||
# TYPE smartproxy_bytes_in counter
|
||||
smartproxy_bytes_in ${stats.getThroughput().bytesIn}
|
||||
|
||||
# HELP smartproxy_bytes_out Total bytes sent
|
||||
# TYPE smartproxy_bytes_out counter
|
||||
smartproxy_bytes_out ${stats.getThroughput().bytesOut}
|
||||
`);
|
||||
});
|
||||
```
|
||||
|
||||
## Other Components
|
||||
|
||||
While SmartProxy provides a unified API for most needs, you can also use individual components:
|
||||
@ -1618,6 +2419,62 @@ createHttpToHttpsRedirect('old.example.com', 443)
|
||||
}
|
||||
```
|
||||
|
||||
## WebSocket Keep-Alive Configuration
|
||||
|
||||
If your WebSocket connections are disconnecting every 30 seconds in SNI passthrough mode, here's how to configure keep-alive settings:
|
||||
|
||||
### Extended Keep-Alive Treatment (Recommended)
|
||||
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
// Extend timeout for keep-alive connections
|
||||
keepAliveTreatment: 'extended',
|
||||
keepAliveInactivityMultiplier: 10, // 10x the base timeout
|
||||
inactivityTimeout: 14400000, // 4 hours base (40 hours with multiplier)
|
||||
|
||||
routes: [
|
||||
{
|
||||
name: 'websocket-passthrough',
|
||||
match: {
|
||||
ports: 443,
|
||||
domains: ['ws.example.com', 'wss.example.com']
|
||||
},
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'backend', port: 443 },
|
||||
tls: { mode: 'passthrough' }
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Immortal Connections (Never Timeout)
|
||||
|
||||
```typescript
|
||||
const proxy = new SmartProxy({
|
||||
// Never timeout keep-alive connections
|
||||
keepAliveTreatment: 'immortal',
|
||||
|
||||
routes: [
|
||||
// ... same as above
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
### Understanding the Issue
|
||||
|
||||
In SNI passthrough mode:
|
||||
1. **WebSocket Heartbeat**: The HTTP proxy's WebSocket handler sends ping frames every 30 seconds
|
||||
2. **SNI Passthrough**: In passthrough mode, traffic is encrypted end-to-end
|
||||
3. **Can't Inject Pings**: The proxy can't inject ping frames into encrypted traffic
|
||||
4. **Connection Terminated**: After 30 seconds, connection is marked inactive and closed
|
||||
|
||||
The solution involves:
|
||||
- Longer grace periods for encrypted connections (5 minutes vs 30 seconds)
|
||||
- Relying on OS-level TCP keep-alive instead of application-level heartbeat
|
||||
- Different timeout strategies per route type
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### SmartProxy (IRoutedSmartProxyOptions)
|
||||
@ -1628,6 +2485,7 @@ createHttpToHttpsRedirect('old.example.com', 443)
|
||||
- `httpProxyPort` (number, default 8443) - Port where HttpProxy listens for forwarded connections
|
||||
- Connection timeouts: `initialDataTimeout`, `socketTimeout`, `inactivityTimeout`, etc.
|
||||
- Socket opts: `noDelay`, `keepAlive`, `enableKeepAliveProbes`
|
||||
- Keep-alive configuration: `keepAliveTreatment` ('standard'|'extended'|'immortal'), `keepAliveInactivityMultiplier`
|
||||
- `certProvisionFunction` (callback) - Custom certificate provisioning
|
||||
|
||||
#### SmartProxy Dynamic Port Management Methods
|
||||
|
621
readme.plan.md
621
readme.plan.md
@ -1,621 +0,0 @@
|
||||
# PROXY Protocol Implementation Plan
|
||||
|
||||
## ⚠️ CRITICAL: Implementation Order
|
||||
|
||||
**Phase 1 (ProxyProtocolSocket/WrappedSocket) MUST be completed first!**
|
||||
|
||||
The ProxyProtocolSocket class is the foundation that enables all PROXY protocol functionality. No protocol parsing or integration can happen until this wrapper class is fully implemented and tested.
|
||||
|
||||
1. **FIRST**: Implement ProxyProtocolSocket (the WrappedSocket)
|
||||
2. **THEN**: Add PROXY protocol parser
|
||||
3. **THEN**: Integrate with connection handlers
|
||||
4. **FINALLY**: Add security and validation
|
||||
|
||||
## Overview
|
||||
Implement PROXY protocol support in SmartProxy to preserve client IP information through proxy chains, solving the connection limit accumulation issue where inner proxies see all connections as coming from the outer proxy's IP.
|
||||
|
||||
## Problem Statement
|
||||
- In proxy chains, the inner proxy sees all connections from the outer proxy's IP
|
||||
- This causes the inner proxy to hit per-IP connection limits (default: 100)
|
||||
- Results in connection rejections while outer proxy accumulates connections
|
||||
|
||||
## Solution Design
|
||||
|
||||
### 1. Core Features
|
||||
|
||||
#### 1.1 PROXY Protocol Parsing
|
||||
- Support PROXY protocol v1 (text format) initially
|
||||
- Parse incoming PROXY headers to extract:
|
||||
- Real client IP address
|
||||
- Real client port
|
||||
- Proxy IP address
|
||||
- Proxy port
|
||||
- Protocol (TCP4/TCP6)
|
||||
|
||||
#### 1.2 PROXY Protocol Generation
|
||||
- Add ability to send PROXY protocol headers when forwarding connections
|
||||
- Configurable per route or target
|
||||
|
||||
#### 1.3 Trusted Proxy IPs
|
||||
- New `proxyIPs` array in SmartProxy options
|
||||
- Auto-enable PROXY protocol acceptance for connections from these IPs
|
||||
- Reject PROXY protocol from untrusted sources (security)
|
||||
|
||||
### 2. Configuration Schema
|
||||
|
||||
```typescript
|
||||
interface ISmartProxyOptions {
|
||||
// ... existing options
|
||||
|
||||
// List of trusted proxy IPs that can send PROXY protocol
|
||||
proxyIPs?: string[];
|
||||
|
||||
// Global option to accept PROXY protocol (defaults based on proxyIPs)
|
||||
acceptProxyProtocol?: boolean;
|
||||
|
||||
// Global option to send PROXY protocol to all targets
|
||||
sendProxyProtocol?: boolean;
|
||||
}
|
||||
|
||||
interface IRouteAction {
|
||||
// ... existing options
|
||||
|
||||
// Send PROXY protocol to this specific target
|
||||
sendProxyProtocol?: boolean;
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Implementation Steps
|
||||
|
||||
#### IMPORTANT: Phase 1 Must Be Completed First
|
||||
The `ProxyProtocolSocket` (WrappedSocket) is the foundation for all PROXY protocol functionality. This wrapper class must be implemented and integrated BEFORE any PROXY protocol parsing can begin.
|
||||
|
||||
#### Phase 1: ProxyProtocolSocket (WrappedSocket) Foundation - ✅ COMPLETED (v19.5.19)
|
||||
This phase creates the socket wrapper infrastructure that all subsequent phases depend on.
|
||||
|
||||
1. **Create WrappedSocket class** in `ts/core/models/wrapped-socket.ts` ✅
|
||||
- Used JavaScript Proxy pattern instead of EventEmitter (avoids infinite loops)
|
||||
- Properties for real client IP and port
|
||||
- Transparent getters that return real or socket IP/port
|
||||
- All socket methods/properties delegated via Proxy
|
||||
|
||||
2. **Implement core wrapper functionality** ✅
|
||||
- Constructor accepts regular socket + optional metadata
|
||||
- `remoteAddress` getter returns real IP or falls back to socket IP
|
||||
- `remotePort` getter returns real port or falls back to socket port
|
||||
- `isFromTrustedProxy` property to check if it has real client info
|
||||
- `setProxyInfo()` method to update real client details
|
||||
|
||||
3. **Update ConnectionManager to handle wrapped sockets** ✅
|
||||
- Accept either `net.Socket` or `WrappedSocket`
|
||||
- Created `getUnderlyingSocket()` helper for socket utilities
|
||||
- All socket utility functions extract underlying socket
|
||||
|
||||
4. **Integration completed** ✅
|
||||
- All incoming sockets wrapped in RouteConnectionHandler
|
||||
- Socket forwarding verified working with wrapped sockets
|
||||
- Type safety maintained with index signature
|
||||
|
||||
**Deliverables**: ✅ Working WrappedSocket that can wrap any socket and provide transparent access to client info.
|
||||
|
||||
#### Phase 2: PROXY Protocol Parser - DEPENDS ON PHASE 1
|
||||
Only after WrappedSocket is working can we add protocol parsing.
|
||||
|
||||
1. Create `ProxyProtocolParser` class in `ts/core/utils/proxy-protocol.ts`
|
||||
2. Implement v1 text format parsing
|
||||
3. Add validation and error handling
|
||||
4. Integrate parser to work WITH WrappedSocket (not into it)
|
||||
|
||||
#### Phase 3: Connection Handler Integration - DEPENDS ON PHASES 1 & 2
|
||||
1. ✅ Modify `RouteConnectionHandler` to create WrappedSocket for all connections
|
||||
2. Check if connection is from trusted proxy IP
|
||||
3. If trusted, attempt to parse PROXY protocol header
|
||||
4. Update wrapped socket with real client info
|
||||
5. Continue normal connection handling with wrapped socket
|
||||
|
||||
#### Phase 4: Outbound PROXY Protocol - DEPENDS ON PHASES 1-3
|
||||
1. Add PROXY header generation in `setupDirectConnection`
|
||||
2. Make it configurable per route
|
||||
3. Send header immediately after TCP connection
|
||||
4. Use ProxyProtocolSocket for outbound connections too
|
||||
|
||||
#### Phase 5: Security & Validation - FINAL PHASE
|
||||
1. Validate PROXY headers strictly
|
||||
2. Reject malformed headers
|
||||
3. Only accept from trusted IPs
|
||||
4. Add rate limiting for PROXY protocol parsing
|
||||
|
||||
### 4. Design Decision: Socket Wrapper Architecture
|
||||
|
||||
#### Option A: Minimal Single Socket Wrapper
|
||||
- **Scope**: Wraps individual sockets with metadata
|
||||
- **Use Case**: PROXY protocol support with minimal refactoring
|
||||
- **Pros**: Simple, low risk, easy migration
|
||||
- **Cons**: Still need separate connection management
|
||||
|
||||
#### Option B: Comprehensive Connection Wrapper
|
||||
- **Scope**: Manages socket pairs (incoming + outgoing) with all utilities
|
||||
- **Use Case**: Complete connection lifecycle management
|
||||
- **Pros**:
|
||||
- Encapsulates all socket utilities (forwarding, cleanup, backpressure)
|
||||
- Single object represents entire connection
|
||||
- Cleaner API for connection handling
|
||||
- **Cons**:
|
||||
- Major architectural change
|
||||
- Higher implementation risk
|
||||
- More complex migration
|
||||
|
||||
#### Recommendation
|
||||
Start with **Option A** (ProxyProtocolSocket) for immediate PROXY protocol support, then evaluate Option B based on:
|
||||
- Performance impact of additional abstraction
|
||||
- Code simplification benefits
|
||||
- Team comfort with architectural change
|
||||
|
||||
### 5. Code Implementation Details
|
||||
|
||||
#### 5.1 ProxyProtocolSocket (WrappedSocket) - PHASE 1 IMPLEMENTATION
|
||||
This is the foundational wrapper class that MUST be implemented first. It wraps a regular socket and provides transparent access to the real client IP/port.
|
||||
|
||||
```typescript
|
||||
// ts/core/models/proxy-protocol-socket.ts
|
||||
import { EventEmitter } from 'events';
|
||||
import * as plugins from '../../../plugins.js';
|
||||
|
||||
/**
|
||||
* ProxyProtocolSocket wraps a regular net.Socket to provide transparent access
|
||||
* to the real client IP and port when behind a proxy using PROXY protocol.
|
||||
*
|
||||
* This is the FOUNDATION for all PROXY protocol support and must be implemented
|
||||
* before any protocol parsing can occur.
|
||||
*/
|
||||
export class ProxyProtocolSocket extends EventEmitter {
|
||||
private realClientIP?: string;
|
||||
private realClientPort?: number;
|
||||
|
||||
constructor(
|
||||
public readonly socket: plugins.net.Socket,
|
||||
realClientIP?: string,
|
||||
realClientPort?: number
|
||||
) {
|
||||
super();
|
||||
this.realClientIP = realClientIP;
|
||||
this.realClientPort = realClientPort;
|
||||
|
||||
// Forward all socket events
|
||||
this.forwardSocketEvents();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the real client IP if available, otherwise the socket's remote address
|
||||
*/
|
||||
get remoteAddress(): string | undefined {
|
||||
return this.realClientIP || this.socket.remoteAddress;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the real client port if available, otherwise the socket's remote port
|
||||
*/
|
||||
get remotePort(): number | undefined {
|
||||
return this.realClientPort || this.socket.remotePort;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates if this connection came through a trusted proxy
|
||||
*/
|
||||
get isFromTrustedProxy(): boolean {
|
||||
return !!this.realClientIP;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the real client information (called after parsing PROXY protocol)
|
||||
*/
|
||||
setProxyInfo(ip: string, port: number): void {
|
||||
this.realClientIP = ip;
|
||||
this.realClientPort = port;
|
||||
}
|
||||
|
||||
// Pass-through all socket methods
|
||||
write(data: any, encoding?: any, callback?: any): boolean {
|
||||
return this.socket.write(data, encoding, callback);
|
||||
}
|
||||
|
||||
end(data?: any, encoding?: any, callback?: any): this {
|
||||
this.socket.end(data, encoding, callback);
|
||||
return this;
|
||||
}
|
||||
|
||||
destroy(error?: Error): this {
|
||||
this.socket.destroy(error);
|
||||
return this;
|
||||
}
|
||||
|
||||
// ... implement all other socket methods as pass-through
|
||||
|
||||
/**
|
||||
* Forward all events from the underlying socket
|
||||
*/
|
||||
private forwardSocketEvents(): void {
|
||||
const events = ['data', 'end', 'close', 'error', 'drain', 'timeout'];
|
||||
events.forEach(event => {
|
||||
this.socket.on(event, (...args) => {
|
||||
this.emit(event, ...args);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**KEY POINT**: This wrapper must be fully functional and tested BEFORE moving to Phase 2.
|
||||
|
||||
#### 4.2 ProxyProtocolParser (new file)
|
||||
```typescript
|
||||
// ts/core/utils/proxy-protocol.ts
|
||||
export class ProxyProtocolParser {
|
||||
static readonly PROXY_V1_SIGNATURE = 'PROXY ';
|
||||
|
||||
static parse(chunk: Buffer): IProxyInfo | null {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
static generate(info: IProxyInfo): Buffer {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 4.3 Connection Handler Updates
|
||||
```typescript
|
||||
// In handleConnection method
|
||||
let wrappedSocket: ProxyProtocolSocket | plugins.net.Socket = socket;
|
||||
|
||||
// Wrap socket if from trusted proxy
|
||||
if (this.settings.proxyIPs?.includes(socket.remoteAddress)) {
|
||||
wrappedSocket = new ProxyProtocolSocket(socket);
|
||||
}
|
||||
|
||||
// Create connection record with wrapped socket
|
||||
const record = this.connectionManager.createConnection(wrappedSocket);
|
||||
|
||||
// In handleInitialData method
|
||||
if (wrappedSocket instanceof ProxyProtocolSocket) {
|
||||
const proxyInfo = await this.checkForProxyProtocol(chunk);
|
||||
if (proxyInfo) {
|
||||
wrappedSocket.setProxyInfo(proxyInfo.sourceIP, proxyInfo.sourcePort);
|
||||
// Continue with remaining data after PROXY header
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 4.4 Security Manager Updates
|
||||
- Accept socket or ProxyProtocolSocket
|
||||
- Use `socket.remoteAddress` getter for real client IP
|
||||
- Transparent handling of both socket types
|
||||
|
||||
### 5. Configuration Examples
|
||||
|
||||
#### Basic Setup
|
||||
```typescript
|
||||
// Outer proxy - sends PROXY protocol
|
||||
const outerProxy = new SmartProxy({
|
||||
ports: [443],
|
||||
routes: [{
|
||||
name: 'to-inner-proxy',
|
||||
match: { ports: 443 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: '195.201.98.232', port: 443 },
|
||||
sendProxyProtocol: true // Enable for this route
|
||||
}
|
||||
}]
|
||||
});
|
||||
|
||||
// Inner proxy - accepts PROXY protocol from outer proxy
|
||||
const innerProxy = new SmartProxy({
|
||||
ports: [443],
|
||||
proxyIPs: ['212.95.99.130'], // Outer proxy IP
|
||||
// acceptProxyProtocol: true is automatic for proxyIPs
|
||||
routes: [{
|
||||
name: 'to-backend',
|
||||
match: { ports: 443 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: '192.168.5.247', port: 443 }
|
||||
}
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
### 6. Testing Plan
|
||||
|
||||
#### Unit Tests
|
||||
- PROXY protocol v1 parsing (valid/invalid formats)
|
||||
- Header generation
|
||||
- Trusted IP validation
|
||||
- Connection record updates
|
||||
|
||||
#### Integration Tests
|
||||
- Single proxy with PROXY protocol
|
||||
- Proxy chain with PROXY protocol
|
||||
- Security: reject from untrusted IPs
|
||||
- Performance: minimal overhead
|
||||
- Compatibility: works with TLS passthrough
|
||||
|
||||
#### Test Scenarios
|
||||
1. **Connection limit test**: Verify inner proxy sees real client IPs
|
||||
2. **Security test**: Ensure PROXY protocol rejected from untrusted sources
|
||||
3. **Compatibility test**: Verify no impact on non-PROXY connections
|
||||
4. **Performance test**: Measure overhead of PROXY protocol parsing
|
||||
|
||||
### 7. Security Considerations
|
||||
|
||||
1. **IP Spoofing Prevention**
|
||||
- Only accept PROXY protocol from explicitly trusted IPs
|
||||
- Validate all header fields
|
||||
- Reject malformed headers immediately
|
||||
|
||||
2. **Resource Protection**
|
||||
- Limit PROXY header size (107 bytes for v1)
|
||||
- Timeout for incomplete headers
|
||||
- Rate limit connection attempts
|
||||
|
||||
3. **Logging**
|
||||
- Log all PROXY protocol acceptance/rejection
|
||||
- Include real client IP in all connection logs
|
||||
|
||||
### 8. Rollout Strategy
|
||||
|
||||
1. **Phase 1**: Deploy parser and acceptance (backward compatible)
|
||||
2. **Phase 2**: Enable between controlled proxy pairs
|
||||
3. **Phase 3**: Monitor for issues and performance impact
|
||||
4. **Phase 4**: Expand to all proxy chains
|
||||
|
||||
### 9. Success Metrics
|
||||
|
||||
- Inner proxy connection distribution matches outer proxy
|
||||
- No more connection limit rejections in proxy chains
|
||||
- Accurate client IP logging throughout the chain
|
||||
- No performance degradation (<1ms added latency)
|
||||
|
||||
### 10. Future Enhancements
|
||||
|
||||
- PROXY protocol v2 (binary format) support
|
||||
- TLV extensions for additional metadata
|
||||
- AWS VPC endpoint ID support
|
||||
- Custom metadata fields
|
||||
|
||||
## WrappedSocket Class Design
|
||||
|
||||
### Overview
|
||||
A WrappedSocket class has been evaluated and recommended to provide cleaner PROXY protocol integration and better socket management architecture.
|
||||
|
||||
### Rationale for WrappedSocket
|
||||
|
||||
#### Current Challenges
|
||||
- Sockets handled directly as `net.Socket` instances throughout codebase
|
||||
- Metadata tracked separately in `IConnectionRecord` objects
|
||||
- Socket augmentation via TypeScript module augmentation for TLS properties
|
||||
- PROXY protocol would require modifying socket handling in multiple places
|
||||
|
||||
#### Benefits
|
||||
1. **Clean PROXY Protocol Integration** - Parse and store real client IP/port without modifying existing socket handling
|
||||
2. **Better Encapsulation** - Bundle socket + metadata + behavior together
|
||||
3. **Type Safety** - No more module augmentation needed
|
||||
4. **Future Extensibility** - Easy to add compression, metrics, etc.
|
||||
5. **Simplified Testing** - Easier to mock and test socket behavior
|
||||
|
||||
### Implementation Strategy
|
||||
|
||||
#### Phase 1: Minimal ProxyProtocolSocket (Immediate)
|
||||
Create a minimal wrapper for PROXY protocol support:
|
||||
|
||||
```typescript
|
||||
class ProxyProtocolSocket {
|
||||
constructor(
|
||||
public socket: net.Socket,
|
||||
public realClientIP?: string,
|
||||
public realClientPort?: number
|
||||
) {}
|
||||
|
||||
get remoteAddress(): string {
|
||||
return this.realClientIP || this.socket.remoteAddress || '';
|
||||
}
|
||||
|
||||
get remotePort(): number {
|
||||
return this.realClientPort || this.socket.remotePort || 0;
|
||||
}
|
||||
|
||||
get isFromTrustedProxy(): boolean {
|
||||
return !!this.realClientIP;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Integration points:
|
||||
- Use in `RouteConnectionHandler` when receiving from trusted proxy IPs
|
||||
- Update `ConnectionManager` to accept wrapped sockets
|
||||
- Modify security checks to use `socket.remoteAddress` getter
|
||||
|
||||
#### Phase 2: Connection-Aware WrappedSocket (Alternative Design)
|
||||
A more comprehensive design that manages both sides of a connection:
|
||||
|
||||
```typescript
|
||||
// Option A: Single Socket Wrapper (simpler)
|
||||
class WrappedSocket extends EventEmitter {
|
||||
private socket: net.Socket;
|
||||
private connectionId: string;
|
||||
private metadata: ISocketMetadata;
|
||||
|
||||
constructor(socket: net.Socket, metadata?: Partial<ISocketMetadata>) {
|
||||
super();
|
||||
this.socket = socket;
|
||||
this.connectionId = this.generateId();
|
||||
this.metadata = { ...defaultMetadata, ...metadata };
|
||||
this.setupHandlers();
|
||||
}
|
||||
|
||||
// ... single socket management
|
||||
}
|
||||
|
||||
// Option B: Connection Pair Wrapper (comprehensive)
|
||||
class WrappedConnection extends EventEmitter {
|
||||
private connectionId: string;
|
||||
private incoming: WrappedSocket;
|
||||
private outgoing?: WrappedSocket;
|
||||
private forwardingActive: boolean = false;
|
||||
|
||||
constructor(incomingSocket: net.Socket) {
|
||||
super();
|
||||
this.connectionId = this.generateId();
|
||||
this.incoming = new WrappedSocket(incomingSocket);
|
||||
}
|
||||
|
||||
// Connect to backend and set up forwarding
|
||||
async connectToBackend(target: ITarget): Promise<void> {
|
||||
const outgoingSocket = await this.createOutgoingConnection(target);
|
||||
this.outgoing = new WrappedSocket(outgoingSocket);
|
||||
await this.setupBidirectionalForwarding();
|
||||
}
|
||||
|
||||
// Built-in forwarding logic from socket-utils
|
||||
private async setupBidirectionalForwarding(): Promise<void> {
|
||||
if (!this.outgoing) throw new Error('No outgoing socket');
|
||||
|
||||
// Handle data forwarding with backpressure
|
||||
this.incoming.on('data', (chunk) => {
|
||||
this.outgoing!.write(chunk, () => {
|
||||
// Handle backpressure
|
||||
});
|
||||
});
|
||||
|
||||
this.outgoing.on('data', (chunk) => {
|
||||
this.incoming.write(chunk, () => {
|
||||
// Handle backpressure
|
||||
});
|
||||
});
|
||||
|
||||
// Handle connection lifecycle
|
||||
const cleanup = (reason: string) => {
|
||||
this.forwardingActive = false;
|
||||
this.incoming.destroy();
|
||||
this.outgoing?.destroy();
|
||||
this.emit('closed', reason);
|
||||
};
|
||||
|
||||
this.incoming.once('close', () => cleanup('incoming_closed'));
|
||||
this.outgoing.once('close', () => cleanup('outgoing_closed'));
|
||||
|
||||
this.forwardingActive = true;
|
||||
}
|
||||
|
||||
// PROXY protocol support
|
||||
async handleProxyProtocol(trustedProxies: string[]): Promise<boolean> {
|
||||
if (trustedProxies.includes(this.incoming.socket.remoteAddress)) {
|
||||
const parsed = await this.incoming.parseProxyProtocol();
|
||||
if (parsed && this.outgoing) {
|
||||
// Forward PROXY protocol to backend if configured
|
||||
await this.outgoing.sendProxyProtocol(this.incoming.realClientIP);
|
||||
}
|
||||
return parsed;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Consolidated metrics
|
||||
getMetrics(): IConnectionMetrics {
|
||||
return {
|
||||
connectionId: this.connectionId,
|
||||
duration: Date.now() - this.startTime,
|
||||
incoming: this.incoming.getMetrics(),
|
||||
outgoing: this.outgoing?.getMetrics(),
|
||||
totalBytes: this.getTotalBytes(),
|
||||
state: this.getConnectionState()
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Phase 3: Full Migration (Long-term)
|
||||
- Replace all `net.Socket` usage with `WrappedSocket`
|
||||
- Remove socket augmentation from `socket-augmentation.ts`
|
||||
- Update all socket utilities to work with wrapped sockets
|
||||
- Standardize socket handling across all components
|
||||
|
||||
### Integration with PROXY Protocol
|
||||
|
||||
The WrappedSocket class integrates seamlessly with PROXY protocol:
|
||||
|
||||
1. **Connection Acceptance**:
|
||||
```typescript
|
||||
const wrappedSocket = new ProxyProtocolSocket(socket);
|
||||
if (this.isFromTrustedProxy(socket.remoteAddress)) {
|
||||
await wrappedSocket.parseProxyProtocol(this.settings.proxyIPs);
|
||||
}
|
||||
```
|
||||
|
||||
2. **Security Checks**:
|
||||
```typescript
|
||||
// Automatically uses real client IP if available
|
||||
const clientIP = wrappedSocket.remoteAddress;
|
||||
if (!this.securityManager.isIPAllowed(clientIP)) {
|
||||
wrappedSocket.destroy();
|
||||
}
|
||||
```
|
||||
|
||||
3. **Connection Records**:
|
||||
```typescript
|
||||
const record = this.connectionManager.createConnection(wrappedSocket);
|
||||
// ConnectionManager uses wrappedSocket.remoteAddress transparently
|
||||
```
|
||||
|
||||
### Option B Example: How It Would Replace Current Architecture
|
||||
|
||||
Instead of current approach with separate components:
|
||||
```typescript
|
||||
// Current: Multiple separate components
|
||||
const record = connectionManager.createConnection(socket);
|
||||
const { cleanupClient, cleanupServer } = createIndependentSocketHandlers(
|
||||
clientSocket, serverSocket, onBothClosed
|
||||
);
|
||||
setupBidirectionalForwarding(clientSocket, serverSocket, handlers);
|
||||
```
|
||||
|
||||
Option B would consolidate everything:
|
||||
```typescript
|
||||
// Option B: Single connection object
|
||||
const connection = new WrappedConnection(incomingSocket);
|
||||
await connection.handleProxyProtocol(trustedProxies);
|
||||
await connection.connectToBackend({ host: 'server', port: 443 });
|
||||
// Everything is handled internally - forwarding, cleanup, metrics
|
||||
|
||||
connection.on('closed', (reason) => {
|
||||
logger.log('Connection closed', connection.getMetrics());
|
||||
});
|
||||
```
|
||||
|
||||
This would replace:
|
||||
- `IConnectionRecord` - absorbed into WrappedConnection
|
||||
- `socket-utils.ts` functions - methods on WrappedConnection
|
||||
- Separate incoming/outgoing tracking - unified in one object
|
||||
- Manual cleanup coordination - automatic lifecycle management
|
||||
|
||||
Additional benefits with Option B:
|
||||
- **Connection Pooling Integration**: WrappedConnection could integrate with EnhancedConnectionPool for backend connections
|
||||
- **Unified Metrics**: Single point for all connection statistics
|
||||
- **Protocol Negotiation**: Handle PROXY, TLS, HTTP/2 upgrade in one place
|
||||
- **Resource Management**: Automatic cleanup with LifecycleComponent pattern
|
||||
|
||||
### Migration Path
|
||||
|
||||
1. **Week 1-2**: Implement minimal ProxyProtocolSocket (Option A)
|
||||
2. **Week 3-4**: Test with PROXY protocol implementation
|
||||
3. **Month 2**: Prototype WrappedConnection (Option B) if beneficial
|
||||
4. **Month 3-6**: Gradual migration if Option B proves valuable
|
||||
5. **Future**: Complete adoption in next major version
|
||||
|
||||
### Success Criteria
|
||||
|
||||
- PROXY protocol works transparently with wrapped sockets
|
||||
- No performance regression (<0.1% overhead)
|
||||
- Simplified code in connection handlers
|
||||
- Better TypeScript type safety
|
||||
- Easier to add new socket-level features
|
@ -1,341 +0,0 @@
|
||||
# SmartProxy Routing Architecture Unification Plan
|
||||
|
||||
## Overview
|
||||
This document analyzes the current state of routing in SmartProxy, identifies redundancies and inconsistencies, and proposes a unified architecture.
|
||||
|
||||
## Current State Analysis
|
||||
|
||||
### 1. Multiple Route Manager Implementations
|
||||
|
||||
#### 1.1 Core SharedRouteManager (`ts/core/utils/route-manager.ts`)
|
||||
- **Purpose**: Designed as a shared component for SmartProxy and NetworkProxy
|
||||
- **Features**:
|
||||
- Port mapping and expansion (e.g., `[80, 443]` → individual routes)
|
||||
- Comprehensive route matching (domain, path, IP, headers, TLS)
|
||||
- Route validation and conflict detection
|
||||
- Event emitter for route changes
|
||||
- Detailed logging support
|
||||
- **Status**: Well-designed but underutilized
|
||||
|
||||
#### 1.2 SmartProxy RouteManager (`ts/proxies/smart-proxy/route-manager.ts`)
|
||||
- **Purpose**: SmartProxy-specific route management
|
||||
- **Issues**:
|
||||
- 95% duplicate code from SharedRouteManager
|
||||
- Only difference is using `ISmartProxyOptions` instead of generic interface
|
||||
- Contains deprecated security methods
|
||||
- Unnecessary code duplication
|
||||
- **Status**: Should be removed in favor of SharedRouteManager
|
||||
|
||||
#### 1.3 HttpProxy Route Management (`ts/proxies/http-proxy/`)
|
||||
- **Purpose**: HTTP-specific routing
|
||||
- **Implementation**: Minimal, inline route matching
|
||||
- **Status**: Could benefit from SharedRouteManager
|
||||
|
||||
### 2. Multiple Router Implementations
|
||||
|
||||
#### 2.1 ProxyRouter (`ts/routing/router/proxy-router.ts`)
|
||||
- **Purpose**: Legacy compatibility with `IReverseProxyConfig`
|
||||
- **Features**: Domain-based routing with path patterns
|
||||
- **Used by**: HttpProxy for backward compatibility
|
||||
|
||||
#### 2.2 RouteRouter (`ts/routing/router/route-router.ts`)
|
||||
- **Purpose**: Modern routing with `IRouteConfig`
|
||||
- **Features**: Nearly identical to ProxyRouter
|
||||
- **Issues**: Code duplication with ProxyRouter
|
||||
|
||||
### 3. Scattered Route Utilities
|
||||
|
||||
#### 3.1 Core route-utils (`ts/core/utils/route-utils.ts`)
|
||||
- **Purpose**: Shared matching functions
|
||||
- **Features**: Domain, path, IP, CIDR matching
|
||||
- **Status**: Well-implemented, should be the single source
|
||||
|
||||
#### 3.2 SmartProxy route-utils (`ts/proxies/smart-proxy/utils/route-utils.ts`)
|
||||
- **Purpose**: Route configuration utilities
|
||||
- **Features**: Different scope - config merging, not pattern matching
|
||||
- **Status**: Keep separate as it serves different purpose
|
||||
|
||||
### 4. Other Route-Related Files
|
||||
- `route-patterns.ts`: Constants for route patterns
|
||||
- `route-validators.ts`: Route configuration validation
|
||||
- `route-helpers.ts`: Additional utilities
|
||||
- `route-connection-handler.ts`: Connection routing logic
|
||||
|
||||
## Problems Identified
|
||||
|
||||
### 1. Code Duplication
|
||||
- **SharedRouteManager vs SmartProxy RouteManager**: ~1000 lines of duplicate code
|
||||
- **ProxyRouter vs RouteRouter**: ~500 lines of duplicate code
|
||||
- **Matching logic**: Implemented in 4+ different places
|
||||
|
||||
### 2. Inconsistent Implementations
|
||||
```typescript
|
||||
// Example: Domain matching appears in multiple places
|
||||
// 1. In route-utils.ts
|
||||
export function matchDomain(pattern: string, hostname: string): boolean
|
||||
|
||||
// 2. In SmartProxy RouteManager
|
||||
private matchDomain(domain: string, hostname: string): boolean
|
||||
|
||||
// 3. In ProxyRouter
|
||||
private matchesHostname(configName: string, hostname: string): boolean
|
||||
|
||||
// 4. In RouteRouter
|
||||
private matchDomain(pattern: string, hostname: string): boolean
|
||||
```
|
||||
|
||||
### 3. Unclear Separation of Concerns
|
||||
- Route Managers handle both storage AND matching
|
||||
- Routers also handle storage AND matching
|
||||
- No clear boundaries between layers
|
||||
|
||||
### 4. Maintenance Burden
|
||||
- Bug fixes need to be applied in multiple places
|
||||
- New features must be implemented multiple times
|
||||
- Testing effort multiplied
|
||||
|
||||
## Proposed Unified Architecture
|
||||
|
||||
### Layer 1: Core Routing Components
|
||||
```
|
||||
ts/core/routing/
|
||||
├── types.ts # All route-related types
|
||||
├── utils.ts # All matching logic (consolidated)
|
||||
├── route-store.ts # Route storage and indexing
|
||||
└── route-matcher.ts # Route matching engine
|
||||
```
|
||||
|
||||
### Layer 2: Route Management
|
||||
```
|
||||
ts/core/routing/
|
||||
└── route-manager.ts # Single RouteManager for all proxies
|
||||
- Uses RouteStore for storage
|
||||
- Uses RouteMatcher for matching
|
||||
- Provides high-level API
|
||||
```
|
||||
|
||||
### Layer 3: HTTP Routing
|
||||
```
|
||||
ts/routing/
|
||||
└── http-router.ts # Single HTTP router implementation
|
||||
- Uses RouteManager for route lookup
|
||||
- Handles HTTP-specific concerns
|
||||
- Legacy adapter built-in
|
||||
```
|
||||
|
||||
### Layer 4: Proxy Integration
|
||||
```
|
||||
ts/proxies/
|
||||
├── smart-proxy/
|
||||
│ └── (uses core RouteManager directly)
|
||||
├── http-proxy/
|
||||
│ └── (uses core RouteManager + HttpRouter)
|
||||
└── network-proxy/
|
||||
└── (uses core RouteManager directly)
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Consolidate Matching Logic (Week 1)
|
||||
1. **Audit all matching implementations**
|
||||
- Document differences in behavior
|
||||
- Identify the most comprehensive implementation
|
||||
- Create test suite covering all edge cases
|
||||
|
||||
2. **Create unified matching module**
|
||||
```typescript
|
||||
// ts/core/routing/matchers.ts
|
||||
export class DomainMatcher {
|
||||
static match(pattern: string, hostname: string): boolean
|
||||
}
|
||||
|
||||
export class PathMatcher {
|
||||
static match(pattern: string, path: string): MatchResult
|
||||
}
|
||||
|
||||
export class IpMatcher {
|
||||
static match(pattern: string, ip: string): boolean
|
||||
static matchCidr(cidr: string, ip: string): boolean
|
||||
}
|
||||
```
|
||||
|
||||
3. **Update all components to use unified matchers**
|
||||
- Replace local implementations
|
||||
- Ensure backward compatibility
|
||||
- Run comprehensive tests
|
||||
|
||||
### Phase 2: Unify Route Managers (Week 2)
|
||||
1. **Enhance SharedRouteManager**
|
||||
- Add any missing features from SmartProxy RouteManager
|
||||
- Make it truly generic (no proxy-specific dependencies)
|
||||
- Add adapter pattern for different options types
|
||||
|
||||
2. **Migrate SmartProxy to use SharedRouteManager**
|
||||
```typescript
|
||||
// Before
|
||||
this.routeManager = new RouteManager(this.settings);
|
||||
|
||||
// After
|
||||
this.routeManager = new SharedRouteManager({
|
||||
logger: this.settings.logger,
|
||||
enableDetailedLogging: this.settings.enableDetailedLogging
|
||||
});
|
||||
```
|
||||
|
||||
3. **Remove duplicate RouteManager**
|
||||
- Delete `ts/proxies/smart-proxy/route-manager.ts`
|
||||
- Update all imports
|
||||
- Verify all tests pass
|
||||
|
||||
### Phase 3: Consolidate Routers (Week 3)
|
||||
1. **Create unified HttpRouter**
|
||||
```typescript
|
||||
export class HttpRouter {
|
||||
constructor(private routeManager: SharedRouteManager) {}
|
||||
|
||||
// Modern interface
|
||||
route(req: IncomingMessage): RouteResult
|
||||
|
||||
// Legacy adapter
|
||||
routeLegacy(config: IReverseProxyConfig): RouteResult
|
||||
}
|
||||
```
|
||||
|
||||
2. **Migrate HttpProxy**
|
||||
- Replace both ProxyRouter and RouteRouter
|
||||
- Use single HttpRouter with appropriate adapter
|
||||
- Maintain backward compatibility
|
||||
|
||||
3. **Clean up legacy code**
|
||||
- Mark old interfaces as deprecated
|
||||
- Add migration guides
|
||||
- Plan removal in next major version
|
||||
|
||||
### Phase 4: Architecture Cleanup (Week 4)
|
||||
1. **Reorganize file structure**
|
||||
```
|
||||
ts/core/
|
||||
├── routing/
|
||||
│ ├── index.ts
|
||||
│ ├── types.ts
|
||||
│ ├── matchers/
|
||||
│ │ ├── domain.ts
|
||||
│ │ ├── path.ts
|
||||
│ │ ├── ip.ts
|
||||
│ │ └── index.ts
|
||||
│ ├── route-store.ts
|
||||
│ ├── route-matcher.ts
|
||||
│ └── route-manager.ts
|
||||
└── utils/
|
||||
└── (remove route-specific utils)
|
||||
```
|
||||
|
||||
2. **Update documentation**
|
||||
- Architecture diagrams
|
||||
- Migration guides
|
||||
- API documentation
|
||||
|
||||
3. **Performance optimization**
|
||||
- Add caching where beneficial
|
||||
- Optimize hot paths
|
||||
- Benchmark before/after
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### For SmartProxy RouteManager Users
|
||||
```typescript
|
||||
// Old way
|
||||
import { RouteManager } from './route-manager.js';
|
||||
const manager = new RouteManager(options);
|
||||
|
||||
// New way
|
||||
import { SharedRouteManager as RouteManager } from '../core/utils/route-manager.js';
|
||||
const manager = new RouteManager({
|
||||
logger: options.logger,
|
||||
enableDetailedLogging: options.enableDetailedLogging
|
||||
});
|
||||
```
|
||||
|
||||
### For Router Users
|
||||
```typescript
|
||||
// Old way
|
||||
const proxyRouter = new ProxyRouter();
|
||||
const routeRouter = new RouteRouter();
|
||||
|
||||
// New way
|
||||
const router = new HttpRouter(routeManager);
|
||||
// Automatically handles both modern and legacy configs
|
||||
```
|
||||
|
||||
## Success Metrics
|
||||
|
||||
1. **Code Reduction**
|
||||
- Target: Remove ~1,500 lines of duplicate code
|
||||
- Measure: Lines of code before/after
|
||||
|
||||
2. **Performance**
|
||||
- Target: No regression in routing performance
|
||||
- Measure: Benchmark route matching operations
|
||||
|
||||
3. **Maintainability**
|
||||
- Target: Single implementation for each concept
|
||||
- Measure: Time to implement new features
|
||||
|
||||
4. **Test Coverage**
|
||||
- Target: 100% coverage of routing logic
|
||||
- Measure: Coverage reports
|
||||
|
||||
## Risks and Mitigations
|
||||
|
||||
### Risk 1: Breaking Changes
|
||||
- **Mitigation**: Extensive adapter patterns and backward compatibility layers
|
||||
- **Testing**: Run all existing tests plus new integration tests
|
||||
|
||||
### Risk 2: Performance Regression
|
||||
- **Mitigation**: Benchmark critical paths before changes
|
||||
- **Testing**: Load testing with production-like scenarios
|
||||
|
||||
### Risk 3: Hidden Dependencies
|
||||
- **Mitigation**: Careful code analysis and dependency mapping
|
||||
- **Testing**: Integration tests across all proxy types
|
||||
|
||||
## Long-term Vision
|
||||
|
||||
### Future Enhancements
|
||||
1. **Route Caching**: LRU cache for frequently accessed routes
|
||||
2. **Route Indexing**: Trie-based indexing for faster domain matching
|
||||
3. **Route Priorities**: Explicit priority system instead of specificity
|
||||
4. **Dynamic Routes**: Support for runtime route modifications
|
||||
5. **Route Templates**: Reusable route configurations
|
||||
|
||||
### API Evolution
|
||||
```typescript
|
||||
// Future unified routing API
|
||||
const routingEngine = new RoutingEngine({
|
||||
stores: [fileStore, dbStore, dynamicStore],
|
||||
matchers: [domainMatcher, pathMatcher, customMatcher],
|
||||
cache: new LRUCache({ max: 1000 }),
|
||||
indexes: {
|
||||
domain: new TrieIndex(),
|
||||
path: new RadixTree()
|
||||
}
|
||||
});
|
||||
|
||||
// Simple, powerful API
|
||||
const route = await routingEngine.findRoute({
|
||||
domain: 'example.com',
|
||||
path: '/api/v1/users',
|
||||
ip: '192.168.1.1',
|
||||
headers: { 'x-custom': 'value' }
|
||||
});
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The current routing architecture has significant duplication and inconsistencies. By following this unification plan, we can:
|
||||
1. Reduce code by ~30%
|
||||
2. Improve maintainability
|
||||
3. Ensure consistent behavior
|
||||
4. Enable future enhancements
|
||||
|
||||
The phased approach minimizes risk while delivering incremental value. Each phase is independently valuable and can be deployed separately.
|
93
test/test.cleanup-queue-bug.node.ts
Normal file
93
test/test.cleanup-queue-bug.node.ts
Normal file
@ -0,0 +1,93 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
|
||||
tap.test('cleanup queue bug - verify queue processing handles more than batch size', async (tools) => {
|
||||
console.log('\n=== Cleanup Queue Bug Test ===');
|
||||
console.log('Purpose: Verify that the cleanup queue correctly processes all connections');
|
||||
console.log('even when there are more than the batch size (100)');
|
||||
|
||||
// Create proxy
|
||||
const proxy = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'test-route',
|
||||
match: { ports: 8588 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9996 }
|
||||
}
|
||||
}],
|
||||
enableDetailedLogging: false,
|
||||
});
|
||||
|
||||
await proxy.start();
|
||||
console.log('✓ Proxy started on port 8588');
|
||||
|
||||
// Access connection manager
|
||||
const cm = (proxy as any).connectionManager;
|
||||
|
||||
// Create mock connection records
|
||||
console.log('\n--- Creating 150 mock connections ---');
|
||||
const mockConnections: any[] = [];
|
||||
|
||||
for (let i = 0; i < 150; i++) {
|
||||
const mockRecord = {
|
||||
id: `mock-${i}`,
|
||||
incoming: { destroyed: true, remoteAddress: '127.0.0.1' },
|
||||
outgoing: { destroyed: true },
|
||||
connectionClosed: false,
|
||||
incomingStartTime: Date.now(),
|
||||
lastActivity: Date.now(),
|
||||
remoteIP: '127.0.0.1',
|
||||
remotePort: 10000 + i,
|
||||
localPort: 8588,
|
||||
bytesReceived: 100,
|
||||
bytesSent: 100,
|
||||
incomingTerminationReason: null,
|
||||
cleanupTimer: null
|
||||
};
|
||||
|
||||
// Add to connection records
|
||||
cm.connectionRecords.set(mockRecord.id, mockRecord);
|
||||
mockConnections.push(mockRecord);
|
||||
}
|
||||
|
||||
console.log(`Created ${cm.getConnectionCount()} mock connections`);
|
||||
expect(cm.getConnectionCount()).toEqual(150);
|
||||
|
||||
// Queue all connections for cleanup
|
||||
console.log('\n--- Queueing all connections for cleanup ---');
|
||||
for (const conn of mockConnections) {
|
||||
cm.initiateCleanupOnce(conn, 'test_cleanup');
|
||||
}
|
||||
|
||||
console.log(`Cleanup queue size: ${cm.cleanupQueue.size}`);
|
||||
expect(cm.cleanupQueue.size).toEqual(150);
|
||||
|
||||
// Wait for cleanup to complete
|
||||
console.log('\n--- Waiting for cleanup batches to process ---');
|
||||
|
||||
// The first batch should process immediately (100 connections)
|
||||
// Then additional batches should be scheduled
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
// Check final state
|
||||
const finalCount = cm.getConnectionCount();
|
||||
console.log(`\nFinal connection count: ${finalCount}`);
|
||||
console.log(`Cleanup queue size: ${cm.cleanupQueue.size}`);
|
||||
|
||||
// All connections should be cleaned up
|
||||
expect(finalCount).toEqual(0);
|
||||
expect(cm.cleanupQueue.size).toEqual(0);
|
||||
|
||||
// Verify termination stats
|
||||
const stats = cm.getTerminationStats();
|
||||
console.log('Termination stats:', stats);
|
||||
expect(stats.incoming.test_cleanup).toEqual(150);
|
||||
|
||||
// Cleanup
|
||||
await proxy.stop();
|
||||
|
||||
console.log('\n✓ Test complete: Cleanup queue now correctly processes all connections');
|
||||
});
|
||||
|
||||
tap.start();
|
250
test/test.keepalive-support.node.ts
Normal file
250
test/test.keepalive-support.node.ts
Normal file
@ -0,0 +1,250 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as net from 'net';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
|
||||
tap.test('keepalive support - verify keepalive connections are properly handled', async (tools) => {
|
||||
console.log('\n=== KeepAlive Support Test ===');
|
||||
console.log('Purpose: Verify that keepalive connections are not prematurely cleaned up');
|
||||
|
||||
// Create a simple echo backend
|
||||
const echoBackend = net.createServer((socket) => {
|
||||
socket.on('data', (data) => {
|
||||
// Echo back received data
|
||||
try {
|
||||
socket.write(data);
|
||||
} catch (err) {
|
||||
// Ignore write errors during shutdown
|
||||
}
|
||||
});
|
||||
|
||||
socket.on('error', (err) => {
|
||||
// Ignore errors from backend sockets
|
||||
console.log(`Backend socket error (expected during cleanup): ${err.code}`);
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
echoBackend.listen(9998, () => {
|
||||
console.log('✓ Echo backend started on port 9998');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Test 1: Standard keepalive treatment
|
||||
console.log('\n--- Test 1: Standard KeepAlive Treatment ---');
|
||||
|
||||
const proxy1 = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'keepalive-route',
|
||||
match: { ports: 8590 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9998 }
|
||||
}
|
||||
}],
|
||||
keepAlive: true,
|
||||
keepAliveTreatment: 'standard',
|
||||
inactivityTimeout: 5000, // 5 seconds for faster testing
|
||||
enableDetailedLogging: false,
|
||||
});
|
||||
|
||||
await proxy1.start();
|
||||
console.log('✓ Proxy with standard keepalive started on port 8590');
|
||||
|
||||
// Create a keepalive connection
|
||||
const client1 = net.connect(8590, 'localhost');
|
||||
|
||||
// Add error handler to prevent unhandled errors
|
||||
client1.on('error', (err) => {
|
||||
console.log(`Client1 error (expected during cleanup): ${err.code}`);
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
client1.on('connect', () => {
|
||||
console.log('Client connected');
|
||||
client1.setKeepAlive(true, 1000);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Send initial data
|
||||
client1.write('Hello keepalive\n');
|
||||
|
||||
// Wait for echo
|
||||
await new Promise<void>((resolve) => {
|
||||
client1.once('data', (data) => {
|
||||
console.log(`Received echo: ${data.toString().trim()}`);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Check connection is marked as keepalive
|
||||
const cm1 = (proxy1 as any).connectionManager;
|
||||
const connections1 = cm1.getConnections();
|
||||
let keepAliveCount = 0;
|
||||
|
||||
for (const [id, record] of connections1) {
|
||||
if (record.hasKeepAlive) {
|
||||
keepAliveCount++;
|
||||
console.log(`KeepAlive connection ${id}: hasKeepAlive=${record.hasKeepAlive}`);
|
||||
}
|
||||
}
|
||||
|
||||
expect(keepAliveCount).toEqual(1);
|
||||
|
||||
// Wait to ensure it's not cleaned up prematurely
|
||||
await plugins.smartdelay.delayFor(6000);
|
||||
|
||||
const afterWaitCount1 = cm1.getConnectionCount();
|
||||
console.log(`Connections after 6s wait: ${afterWaitCount1}`);
|
||||
expect(afterWaitCount1).toEqual(1); // Should still be connected
|
||||
|
||||
// Send more data to keep it alive
|
||||
client1.write('Still alive\n');
|
||||
|
||||
// Clean up test 1
|
||||
client1.destroy();
|
||||
await proxy1.stop();
|
||||
await plugins.smartdelay.delayFor(500); // Wait for port to be released
|
||||
|
||||
// Test 2: Extended keepalive treatment
|
||||
console.log('\n--- Test 2: Extended KeepAlive Treatment ---');
|
||||
|
||||
const proxy2 = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'keepalive-extended',
|
||||
match: { ports: 8591 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9998 }
|
||||
}
|
||||
}],
|
||||
keepAlive: true,
|
||||
keepAliveTreatment: 'extended',
|
||||
keepAliveInactivityMultiplier: 6,
|
||||
inactivityTimeout: 2000, // 2 seconds base, 12 seconds with multiplier
|
||||
enableDetailedLogging: false,
|
||||
});
|
||||
|
||||
await proxy2.start();
|
||||
console.log('✓ Proxy with extended keepalive started on port 8591');
|
||||
|
||||
const client2 = net.connect(8591, 'localhost');
|
||||
|
||||
// Add error handler to prevent unhandled errors
|
||||
client2.on('error', (err) => {
|
||||
console.log(`Client2 error (expected during cleanup): ${err.code}`);
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
client2.on('connect', () => {
|
||||
console.log('Client connected with extended timeout');
|
||||
client2.setKeepAlive(true, 1000);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Send initial data
|
||||
client2.write('Extended keepalive\n');
|
||||
|
||||
// Check connection
|
||||
const cm2 = (proxy2 as any).connectionManager;
|
||||
await plugins.smartdelay.delayFor(1000);
|
||||
|
||||
const connections2 = cm2.getConnections();
|
||||
for (const [id, record] of connections2) {
|
||||
console.log(`Extended connection ${id}: hasKeepAlive=${record.hasKeepAlive}, treatment=extended`);
|
||||
}
|
||||
|
||||
// Wait 3 seconds (would timeout with standard treatment)
|
||||
await plugins.smartdelay.delayFor(3000);
|
||||
|
||||
const midWaitCount = cm2.getConnectionCount();
|
||||
console.log(`Connections after 3s (base timeout exceeded): ${midWaitCount}`);
|
||||
expect(midWaitCount).toEqual(1); // Should still be connected due to extended treatment
|
||||
|
||||
// Clean up test 2
|
||||
client2.destroy();
|
||||
await proxy2.stop();
|
||||
await plugins.smartdelay.delayFor(500); // Wait for port to be released
|
||||
|
||||
// Test 3: Immortal keepalive treatment
|
||||
console.log('\n--- Test 3: Immortal KeepAlive Treatment ---');
|
||||
|
||||
const proxy3 = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'keepalive-immortal',
|
||||
match: { ports: 8592 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9998 }
|
||||
}
|
||||
}],
|
||||
keepAlive: true,
|
||||
keepAliveTreatment: 'immortal',
|
||||
inactivityTimeout: 1000, // 1 second - should be ignored for immortal
|
||||
enableDetailedLogging: false,
|
||||
});
|
||||
|
||||
await proxy3.start();
|
||||
console.log('✓ Proxy with immortal keepalive started on port 8592');
|
||||
|
||||
const client3 = net.connect(8592, 'localhost');
|
||||
|
||||
// Add error handler to prevent unhandled errors
|
||||
client3.on('error', (err) => {
|
||||
console.log(`Client3 error (expected during cleanup): ${err.code}`);
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
client3.on('connect', () => {
|
||||
console.log('Client connected with immortal treatment');
|
||||
client3.setKeepAlive(true, 1000);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Send initial data
|
||||
client3.write('Immortal connection\n');
|
||||
|
||||
// Wait well beyond normal timeout
|
||||
await plugins.smartdelay.delayFor(5000);
|
||||
|
||||
const cm3 = (proxy3 as any).connectionManager;
|
||||
const immortalCount = cm3.getConnectionCount();
|
||||
console.log(`Immortal connections after 5s inactivity: ${immortalCount}`);
|
||||
expect(immortalCount).toEqual(1); // Should never timeout
|
||||
|
||||
// Verify zombie detection doesn't affect immortal connections
|
||||
console.log('\n--- Verifying zombie detection respects keepalive ---');
|
||||
|
||||
// Manually trigger inactivity check
|
||||
cm3.performOptimizedInactivityCheck();
|
||||
|
||||
await plugins.smartdelay.delayFor(1000);
|
||||
|
||||
const afterCheckCount = cm3.getConnectionCount();
|
||||
console.log(`Connections after manual inactivity check: ${afterCheckCount}`);
|
||||
expect(afterCheckCount).toEqual(1); // Should still be alive
|
||||
|
||||
// Clean up
|
||||
client3.destroy();
|
||||
await proxy3.stop();
|
||||
|
||||
// Close backend and wait for it to fully close
|
||||
await new Promise<void>((resolve) => {
|
||||
echoBackend.close(() => {
|
||||
console.log('Echo backend closed');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
console.log('\n✓ All keepalive tests passed:');
|
||||
console.log(' - Standard treatment works correctly');
|
||||
console.log(' - Extended treatment applies multiplier');
|
||||
console.log(' - Immortal treatment never times out');
|
||||
console.log(' - Zombie detection respects keepalive settings');
|
||||
});
|
||||
|
||||
tap.start();
|
150
test/test.memory-leak-check.node.ts
Normal file
150
test/test.memory-leak-check.node.ts
Normal file
@ -0,0 +1,150 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import { SmartProxy, createHttpRoute } from '../ts/index.js';
|
||||
import * as http from 'http';
|
||||
|
||||
tap.test('should not have memory leaks in long-running operations', async (tools) => {
|
||||
// Get initial memory usage
|
||||
const getMemoryUsage = () => {
|
||||
if (global.gc) {
|
||||
global.gc();
|
||||
}
|
||||
const usage = process.memoryUsage();
|
||||
return {
|
||||
heapUsed: Math.round(usage.heapUsed / 1024 / 1024), // MB
|
||||
external: Math.round(usage.external / 1024 / 1024), // MB
|
||||
rss: Math.round(usage.rss / 1024 / 1024) // MB
|
||||
};
|
||||
};
|
||||
|
||||
// Create a target server
|
||||
const targetServer = http.createServer((req, res) => {
|
||||
res.writeHead(200, { 'Content-Type': 'text/plain' });
|
||||
res.end('OK');
|
||||
});
|
||||
await new Promise<void>((resolve) => targetServer.listen(3100, resolve));
|
||||
|
||||
// Create the proxy - use non-privileged port
|
||||
const routes = [
|
||||
createHttpRoute(['test1.local', 'test2.local', 'test3.local'], { host: 'localhost', port: 3100 }),
|
||||
];
|
||||
// Update route to use port 8080
|
||||
routes[0].match.ports = 8080;
|
||||
|
||||
const proxy = new SmartProxy({
|
||||
ports: [8080], // Use non-privileged port
|
||||
routes: routes
|
||||
});
|
||||
await proxy.start();
|
||||
|
||||
console.log('Starting memory leak test...');
|
||||
const initialMemory = getMemoryUsage();
|
||||
console.log('Initial memory:', initialMemory);
|
||||
|
||||
// Function to make requests
|
||||
const makeRequest = (domain: string): Promise<void> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request({
|
||||
hostname: 'localhost',
|
||||
port: 8080,
|
||||
path: '/',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Host': domain
|
||||
}
|
||||
}, (res) => {
|
||||
res.on('data', () => {});
|
||||
res.on('end', resolve);
|
||||
});
|
||||
req.on('error', reject);
|
||||
req.end();
|
||||
});
|
||||
};
|
||||
|
||||
// Test 1: Many requests to the same routes
|
||||
console.log('Test 1: Making 1000 requests to same routes...');
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
await makeRequest(`test${(i % 3) + 1}.local`);
|
||||
if (i % 100 === 0) {
|
||||
console.log(` Progress: ${i}/1000`);
|
||||
}
|
||||
}
|
||||
|
||||
const afterSameRoutesMemory = getMemoryUsage();
|
||||
console.log('Memory after same routes:', afterSameRoutesMemory);
|
||||
|
||||
// Test 2: Many requests to different routes (tests routeContextCache)
|
||||
console.log('Test 2: Making 1000 requests to different routes...');
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
// Create unique domain to test cache growth
|
||||
await makeRequest(`test${i}.local`);
|
||||
if (i % 100 === 0) {
|
||||
console.log(` Progress: ${i}/1000`);
|
||||
}
|
||||
}
|
||||
|
||||
const afterDifferentRoutesMemory = getMemoryUsage();
|
||||
console.log('Memory after different routes:', afterDifferentRoutesMemory);
|
||||
|
||||
// Test 3: Check metrics collector memory
|
||||
console.log('Test 3: Checking metrics collector...');
|
||||
const stats = proxy.getStats();
|
||||
console.log(`Active connections: ${stats.getActiveConnections()}`);
|
||||
console.log(`Total connections: ${stats.getTotalConnections()}`);
|
||||
console.log(`RPS: ${stats.getRequestsPerSecond()}`);
|
||||
|
||||
// Test 4: Many rapid connections (tests requestTimestamps array)
|
||||
console.log('Test 4: Making 10000 rapid requests...');
|
||||
const rapidRequests = [];
|
||||
for (let i = 0; i < 10000; i++) {
|
||||
rapidRequests.push(makeRequest('test1.local'));
|
||||
if (i % 1000 === 0) {
|
||||
// Wait a bit to let some complete
|
||||
await Promise.all(rapidRequests);
|
||||
rapidRequests.length = 0;
|
||||
console.log(` Progress: ${i}/10000`);
|
||||
}
|
||||
}
|
||||
await Promise.all(rapidRequests);
|
||||
|
||||
const afterRapidMemory = getMemoryUsage();
|
||||
console.log('Memory after rapid requests:', afterRapidMemory);
|
||||
|
||||
// Force garbage collection and check final memory
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const finalMemory = getMemoryUsage();
|
||||
console.log('Final memory:', finalMemory);
|
||||
|
||||
// Memory leak checks
|
||||
const memoryGrowth = finalMemory.heapUsed - initialMemory.heapUsed;
|
||||
console.log(`Total memory growth: ${memoryGrowth} MB`);
|
||||
|
||||
// Check for excessive memory growth
|
||||
// Allow some growth but not excessive (e.g., more than 50MB for this test)
|
||||
expect(memoryGrowth).toBeLessThan(50);
|
||||
|
||||
// Check specific potential leaks
|
||||
// 1. Route context cache should not grow unbounded
|
||||
const routeHandler = proxy.routeConnectionHandler as any;
|
||||
if (routeHandler.routeContextCache) {
|
||||
console.log(`Route context cache size: ${routeHandler.routeContextCache.size}`);
|
||||
// Should not have 1000 entries from different routes test
|
||||
expect(routeHandler.routeContextCache.size).toBeLessThan(100);
|
||||
}
|
||||
|
||||
// 2. Metrics collector should clean up old timestamps
|
||||
const metricsCollector = (proxy.getStats() as any);
|
||||
if (metricsCollector.requestTimestamps) {
|
||||
console.log(`Request timestamps array length: ${metricsCollector.requestTimestamps.length}`);
|
||||
// Should not exceed 10000 (the cleanup threshold)
|
||||
expect(metricsCollector.requestTimestamps.length).toBeLessThanOrEqual(10000);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
await proxy.stop();
|
||||
await new Promise<void>((resolve) => targetServer.close(resolve));
|
||||
|
||||
console.log('Memory leak test completed successfully');
|
||||
});
|
||||
|
||||
// Run with: node --expose-gc test.memory-leak-check.node.ts
|
||||
tap.start();
|
58
test/test.memory-leak-simple.ts
Normal file
58
test/test.memory-leak-simple.ts
Normal file
@ -0,0 +1,58 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import { SmartProxy, createHttpRoute } from '../ts/index.js';
|
||||
import * as http from 'http';
|
||||
|
||||
tap.test('memory leak fixes verification', async () => {
|
||||
// Test 1: MetricsCollector requestTimestamps cleanup
|
||||
console.log('\n=== Test 1: MetricsCollector requestTimestamps cleanup ===');
|
||||
const proxy = new SmartProxy({
|
||||
ports: [8081],
|
||||
routes: [
|
||||
createHttpRoute('test.local', { host: 'localhost', port: 3200 }),
|
||||
]
|
||||
});
|
||||
|
||||
// Override route port
|
||||
proxy.settings.routes[0].match.ports = 8081;
|
||||
|
||||
await proxy.start();
|
||||
|
||||
const metricsCollector = (proxy.getStats() as any);
|
||||
|
||||
// Check initial state
|
||||
console.log('Initial timestamps:', metricsCollector.requestTimestamps.length);
|
||||
|
||||
// Simulate many requests to test cleanup
|
||||
for (let i = 0; i < 6000; i++) {
|
||||
metricsCollector.recordRequest();
|
||||
}
|
||||
|
||||
// Should be cleaned up to MAX_TIMESTAMPS (5000)
|
||||
console.log('After 6000 requests:', metricsCollector.requestTimestamps.length);
|
||||
expect(metricsCollector.requestTimestamps.length).toBeLessThanOrEqual(5000);
|
||||
|
||||
await proxy.stop();
|
||||
|
||||
// Test 2: Verify intervals are cleaned up
|
||||
console.log('\n=== Test 2: Verify cleanup methods exist ===');
|
||||
|
||||
// Check RequestHandler has destroy method
|
||||
const { RequestHandler } = await import('../ts/proxies/http-proxy/request-handler.js');
|
||||
const requestHandler = new RequestHandler({}, null as any);
|
||||
expect(typeof requestHandler.destroy).toEqual('function');
|
||||
console.log('✓ RequestHandler has destroy method');
|
||||
|
||||
// Check FunctionCache has destroy method
|
||||
const { FunctionCache } = await import('../ts/proxies/http-proxy/function-cache.js');
|
||||
const functionCache = new FunctionCache({ debug: () => {}, info: () => {} } as any);
|
||||
expect(typeof functionCache.destroy).toEqual('function');
|
||||
console.log('✓ FunctionCache has destroy method');
|
||||
|
||||
// Cleanup
|
||||
requestHandler.destroy();
|
||||
functionCache.destroy();
|
||||
|
||||
console.log('\n✅ All memory leak fixes verified!');
|
||||
});
|
||||
|
||||
tap.start();
|
131
test/test.memory-leak-unit.ts
Normal file
131
test/test.memory-leak-unit.ts
Normal file
@ -0,0 +1,131 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
|
||||
tap.test('memory leak fixes - unit tests', async () => {
|
||||
console.log('\n=== Testing MetricsCollector memory management ===');
|
||||
|
||||
// Import and test MetricsCollector directly
|
||||
const { MetricsCollector } = await import('../ts/proxies/smart-proxy/metrics-collector.js');
|
||||
|
||||
// Create a mock SmartProxy with minimal required properties
|
||||
const mockProxy = {
|
||||
connectionManager: {
|
||||
getConnectionCount: () => 0,
|
||||
getConnections: () => new Map(),
|
||||
getTerminationStats: () => ({ incoming: {} })
|
||||
},
|
||||
routeConnectionHandler: {
|
||||
newConnectionSubject: {
|
||||
subscribe: () => ({ unsubscribe: () => {} })
|
||||
}
|
||||
},
|
||||
settings: {}
|
||||
};
|
||||
|
||||
const collector = new MetricsCollector(mockProxy as any);
|
||||
collector.start();
|
||||
|
||||
// Test timestamp cleanup
|
||||
console.log('Testing requestTimestamps cleanup...');
|
||||
|
||||
// Add 6000 timestamps
|
||||
for (let i = 0; i < 6000; i++) {
|
||||
collector.recordRequest();
|
||||
}
|
||||
|
||||
// Access private property for testing
|
||||
let timestamps = (collector as any).requestTimestamps;
|
||||
console.log(`Timestamps after 6000 requests: ${timestamps.length}`);
|
||||
|
||||
// Force one more request to trigger cleanup
|
||||
collector.recordRequest();
|
||||
timestamps = (collector as any).requestTimestamps;
|
||||
console.log(`Timestamps after cleanup trigger: ${timestamps.length}`);
|
||||
|
||||
// Now check the RPS window - all timestamps are within 1 minute so they won't be cleaned
|
||||
const now = Date.now();
|
||||
const oldestTimestamp = Math.min(...timestamps);
|
||||
const windowAge = now - oldestTimestamp;
|
||||
console.log(`Window age: ${windowAge}ms (should be < 60000ms for all to be kept)`);
|
||||
|
||||
// Since all timestamps are recent (within RPS window), they won't be cleaned by window
|
||||
// But the array size should still be limited
|
||||
console.log(`MAX_TIMESTAMPS: ${(collector as any).MAX_TIMESTAMPS}`);
|
||||
|
||||
// The issue is our rapid-fire test - all timestamps are within the window
|
||||
// Let's test with older timestamps
|
||||
console.log('\nTesting with mixed old/new timestamps...');
|
||||
(collector as any).requestTimestamps = [];
|
||||
|
||||
// Add some old timestamps (older than window)
|
||||
const oldTime = now - 70000; // 70 seconds ago
|
||||
for (let i = 0; i < 3000; i++) {
|
||||
(collector as any).requestTimestamps.push(oldTime);
|
||||
}
|
||||
|
||||
// Add new timestamps to exceed limit
|
||||
for (let i = 0; i < 3000; i++) {
|
||||
collector.recordRequest();
|
||||
}
|
||||
|
||||
timestamps = (collector as any).requestTimestamps;
|
||||
console.log(`After mixed timestamps: ${timestamps.length} (old ones should be cleaned)`);
|
||||
|
||||
// Old timestamps should be cleaned when we exceed MAX_TIMESTAMPS
|
||||
expect(timestamps.length).toBeLessThanOrEqual(5000);
|
||||
|
||||
// Stop the collector
|
||||
collector.stop();
|
||||
|
||||
console.log('\n=== Testing FunctionCache cleanup ===');
|
||||
|
||||
const { FunctionCache } = await import('../ts/proxies/http-proxy/function-cache.js');
|
||||
|
||||
const mockLogger = {
|
||||
debug: () => {},
|
||||
info: () => {},
|
||||
warn: () => {},
|
||||
error: () => {}
|
||||
};
|
||||
|
||||
const cache = new FunctionCache(mockLogger as any);
|
||||
|
||||
// Check that cleanup interval was set
|
||||
expect((cache as any).cleanupInterval).toBeTruthy();
|
||||
|
||||
// Test destroy method
|
||||
cache.destroy();
|
||||
|
||||
// Cleanup interval should be cleared
|
||||
expect((cache as any).cleanupInterval).toBeNull();
|
||||
|
||||
console.log('✓ FunctionCache properly cleans up interval');
|
||||
|
||||
console.log('\n=== Testing RequestHandler cleanup ===');
|
||||
|
||||
const { RequestHandler } = await import('../ts/proxies/http-proxy/request-handler.js');
|
||||
|
||||
const mockConnectionPool = {
|
||||
getConnection: () => null,
|
||||
releaseConnection: () => {}
|
||||
};
|
||||
|
||||
const handler = new RequestHandler(
|
||||
{ logLevel: 'error' },
|
||||
mockConnectionPool as any
|
||||
);
|
||||
|
||||
// Check that cleanup interval was set
|
||||
expect((handler as any).rateLimitCleanupInterval).toBeTruthy();
|
||||
|
||||
// Test destroy method
|
||||
handler.destroy();
|
||||
|
||||
// Cleanup interval should be cleared
|
||||
expect((handler as any).rateLimitCleanupInterval).toBeNull();
|
||||
|
||||
console.log('✓ RequestHandler properly cleans up interval');
|
||||
|
||||
console.log('\n✅ All memory leak fixes verified!');
|
||||
});
|
||||
|
||||
tap.start();
|
280
test/test.metrics-collector.ts
Normal file
280
test/test.metrics-collector.ts
Normal file
@ -0,0 +1,280 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
import * as net from 'net';
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
|
||||
tap.test('MetricsCollector provides accurate metrics', async (tools) => {
|
||||
console.log('\n=== MetricsCollector Test ===');
|
||||
|
||||
// Create a simple echo server for testing
|
||||
const echoServer = net.createServer((socket) => {
|
||||
socket.on('data', (data) => {
|
||||
socket.write(data);
|
||||
});
|
||||
socket.on('error', () => {}); // Ignore errors
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
echoServer.listen(9995, () => {
|
||||
console.log('✓ Echo server started on port 9995');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Create SmartProxy with test routes
|
||||
const proxy = new SmartProxy({
|
||||
routes: [
|
||||
{
|
||||
name: 'test-route-1',
|
||||
match: { ports: 8700 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9995 }
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'test-route-2',
|
||||
match: { ports: 8701 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9995 }
|
||||
}
|
||||
}
|
||||
],
|
||||
enableDetailedLogging: true,
|
||||
});
|
||||
|
||||
await proxy.start();
|
||||
console.log('✓ Proxy started on ports 8700 and 8701');
|
||||
|
||||
// Get stats interface
|
||||
const stats = proxy.getStats();
|
||||
|
||||
// Test 1: Initial state
|
||||
console.log('\n--- Test 1: Initial State ---');
|
||||
expect(stats.getActiveConnections()).toEqual(0);
|
||||
expect(stats.getTotalConnections()).toEqual(0);
|
||||
expect(stats.getRequestsPerSecond()).toEqual(0);
|
||||
expect(stats.getConnectionsByRoute().size).toEqual(0);
|
||||
expect(stats.getConnectionsByIP().size).toEqual(0);
|
||||
|
||||
const throughput = stats.getThroughput();
|
||||
expect(throughput.bytesIn).toEqual(0);
|
||||
expect(throughput.bytesOut).toEqual(0);
|
||||
console.log('✓ Initial metrics are all zero');
|
||||
|
||||
// Test 2: Create connections and verify metrics
|
||||
console.log('\n--- Test 2: Active Connections ---');
|
||||
const clients: net.Socket[] = [];
|
||||
|
||||
// Create 3 connections to route 1
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const client = net.connect(8700, 'localhost');
|
||||
clients.push(client);
|
||||
await new Promise<void>((resolve) => {
|
||||
client.on('connect', resolve);
|
||||
client.on('error', () => resolve());
|
||||
});
|
||||
}
|
||||
|
||||
// Create 2 connections to route 2
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const client = net.connect(8701, 'localhost');
|
||||
clients.push(client);
|
||||
await new Promise<void>((resolve) => {
|
||||
client.on('connect', resolve);
|
||||
client.on('error', () => resolve());
|
||||
});
|
||||
}
|
||||
|
||||
// Wait for connections to be fully established and routed
|
||||
await plugins.smartdelay.delayFor(300);
|
||||
|
||||
// Verify connection counts
|
||||
expect(stats.getActiveConnections()).toEqual(5);
|
||||
expect(stats.getTotalConnections()).toEqual(5);
|
||||
console.log(`✓ Active connections: ${stats.getActiveConnections()}`);
|
||||
console.log(`✓ Total connections: ${stats.getTotalConnections()}`);
|
||||
|
||||
// Test 3: Connections by route
|
||||
console.log('\n--- Test 3: Connections by Route ---');
|
||||
const routeConnections = stats.getConnectionsByRoute();
|
||||
console.log('Route connections:', Array.from(routeConnections.entries()));
|
||||
|
||||
// Check if we have the expected counts
|
||||
let route1Count = 0;
|
||||
let route2Count = 0;
|
||||
for (const [routeName, count] of routeConnections) {
|
||||
if (routeName === 'test-route-1') route1Count = count;
|
||||
if (routeName === 'test-route-2') route2Count = count;
|
||||
}
|
||||
|
||||
expect(route1Count).toEqual(3);
|
||||
expect(route2Count).toEqual(2);
|
||||
console.log('✓ Route test-route-1 has 3 connections');
|
||||
console.log('✓ Route test-route-2 has 2 connections');
|
||||
|
||||
// Test 4: Connections by IP
|
||||
console.log('\n--- Test 4: Connections by IP ---');
|
||||
const ipConnections = stats.getConnectionsByIP();
|
||||
// All connections are from localhost (127.0.0.1 or ::1)
|
||||
let totalIPConnections = 0;
|
||||
for (const [ip, count] of ipConnections) {
|
||||
console.log(` IP ${ip}: ${count} connections`);
|
||||
totalIPConnections += count;
|
||||
}
|
||||
expect(totalIPConnections).toEqual(5);
|
||||
console.log('✓ Total connections by IP matches active connections');
|
||||
|
||||
// Test 5: RPS calculation
|
||||
console.log('\n--- Test 5: Requests Per Second ---');
|
||||
const rps = stats.getRequestsPerSecond();
|
||||
console.log(` Current RPS: ${rps.toFixed(2)}`);
|
||||
// We created 5 connections, so RPS should be > 0
|
||||
expect(rps).toBeGreaterThan(0);
|
||||
console.log('✓ RPS is greater than 0');
|
||||
|
||||
// Test 6: Throughput
|
||||
console.log('\n--- Test 6: Throughput ---');
|
||||
// Send some data through connections
|
||||
for (const client of clients) {
|
||||
if (!client.destroyed) {
|
||||
client.write('Hello metrics!\n');
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for data to be transmitted
|
||||
await plugins.smartdelay.delayFor(100);
|
||||
|
||||
const throughputAfter = stats.getThroughput();
|
||||
console.log(` Bytes in: ${throughputAfter.bytesIn}`);
|
||||
console.log(` Bytes out: ${throughputAfter.bytesOut}`);
|
||||
expect(throughputAfter.bytesIn).toBeGreaterThan(0);
|
||||
expect(throughputAfter.bytesOut).toBeGreaterThan(0);
|
||||
console.log('✓ Throughput shows bytes transferred');
|
||||
|
||||
// Test 7: Close some connections
|
||||
console.log('\n--- Test 7: Connection Cleanup ---');
|
||||
// Close first 2 clients
|
||||
clients[0].destroy();
|
||||
clients[1].destroy();
|
||||
|
||||
await plugins.smartdelay.delayFor(100);
|
||||
|
||||
expect(stats.getActiveConnections()).toEqual(3);
|
||||
expect(stats.getTotalConnections()).toEqual(5); // Total should remain the same
|
||||
console.log(`✓ Active connections reduced to ${stats.getActiveConnections()}`);
|
||||
console.log(`✓ Total connections still ${stats.getTotalConnections()}`);
|
||||
|
||||
// Test 8: Helper methods
|
||||
console.log('\n--- Test 8: Helper Methods ---');
|
||||
|
||||
// Test getTopIPs
|
||||
const topIPs = (stats as any).getTopIPs(5);
|
||||
expect(topIPs.length).toBeGreaterThan(0);
|
||||
console.log('✓ getTopIPs returns IP list');
|
||||
|
||||
// Test isIPBlocked
|
||||
const isBlocked = (stats as any).isIPBlocked('127.0.0.1', 10);
|
||||
expect(isBlocked).toEqual(false); // Should not be blocked with limit of 10
|
||||
console.log('✓ isIPBlocked works correctly');
|
||||
|
||||
// Test throughput rate
|
||||
const throughputRate = (stats as any).getThroughputRate();
|
||||
console.log(` Throughput rate: ${throughputRate.bytesInPerSec} bytes/sec in, ${throughputRate.bytesOutPerSec} bytes/sec out`);
|
||||
console.log('✓ getThroughputRate calculates rates');
|
||||
|
||||
// Cleanup
|
||||
console.log('\n--- Cleanup ---');
|
||||
for (const client of clients) {
|
||||
if (!client.destroyed) {
|
||||
client.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
await proxy.stop();
|
||||
echoServer.close();
|
||||
|
||||
console.log('\n✓ All MetricsCollector tests passed');
|
||||
});
|
||||
|
||||
// Test with mock data for unit testing
|
||||
tap.test('MetricsCollector unit test with mock data', async () => {
|
||||
console.log('\n=== MetricsCollector Unit Test ===');
|
||||
|
||||
// Create a mock SmartProxy with mock ConnectionManager
|
||||
const mockConnections = new Map([
|
||||
['conn1', {
|
||||
remoteIP: '192.168.1.1',
|
||||
routeName: 'api',
|
||||
bytesReceived: 1000,
|
||||
bytesSent: 500,
|
||||
incomingStartTime: Date.now() - 5000
|
||||
}],
|
||||
['conn2', {
|
||||
remoteIP: '192.168.1.1',
|
||||
routeName: 'web',
|
||||
bytesReceived: 2000,
|
||||
bytesSent: 1500,
|
||||
incomingStartTime: Date.now() - 10000
|
||||
}],
|
||||
['conn3', {
|
||||
remoteIP: '192.168.1.2',
|
||||
routeName: 'api',
|
||||
bytesReceived: 500,
|
||||
bytesSent: 250,
|
||||
incomingStartTime: Date.now() - 3000
|
||||
}]
|
||||
]);
|
||||
|
||||
const mockSmartProxy = {
|
||||
connectionManager: {
|
||||
getConnectionCount: () => mockConnections.size,
|
||||
getConnections: () => mockConnections,
|
||||
getTerminationStats: () => ({
|
||||
incoming: { normal: 10, timeout: 2, error: 1 }
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
// Import MetricsCollector directly
|
||||
const { MetricsCollector } = await import('../ts/proxies/smart-proxy/metrics-collector.js');
|
||||
const metrics = new MetricsCollector(mockSmartProxy as any);
|
||||
|
||||
// Test metrics calculation
|
||||
console.log('\n--- Testing with Mock Data ---');
|
||||
|
||||
expect(metrics.getActiveConnections()).toEqual(3);
|
||||
console.log(`✓ Active connections: ${metrics.getActiveConnections()}`);
|
||||
|
||||
expect(metrics.getTotalConnections()).toEqual(16); // 3 active + 13 terminated
|
||||
console.log(`✓ Total connections: ${metrics.getTotalConnections()}`);
|
||||
|
||||
const routeConns = metrics.getConnectionsByRoute();
|
||||
expect(routeConns.get('api')).toEqual(2);
|
||||
expect(routeConns.get('web')).toEqual(1);
|
||||
console.log('✓ Connections by route calculated correctly');
|
||||
|
||||
const ipConns = metrics.getConnectionsByIP();
|
||||
expect(ipConns.get('192.168.1.1')).toEqual(2);
|
||||
expect(ipConns.get('192.168.1.2')).toEqual(1);
|
||||
console.log('✓ Connections by IP calculated correctly');
|
||||
|
||||
const throughput = metrics.getThroughput();
|
||||
expect(throughput.bytesIn).toEqual(3500);
|
||||
expect(throughput.bytesOut).toEqual(2250);
|
||||
console.log(`✓ Throughput: ${throughput.bytesIn} bytes in, ${throughput.bytesOut} bytes out`);
|
||||
|
||||
// Test RPS tracking
|
||||
metrics.recordRequest();
|
||||
metrics.recordRequest();
|
||||
metrics.recordRequest();
|
||||
|
||||
const rps = metrics.getRequestsPerSecond();
|
||||
expect(rps).toBeGreaterThan(0);
|
||||
console.log(`✓ RPS tracking works: ${rps.toFixed(2)} req/sec`);
|
||||
|
||||
console.log('\n✓ All unit tests passed');
|
||||
});
|
||||
|
||||
export default tap.start();
|
182
test/test.proxy-chain-cleanup.node.ts
Normal file
182
test/test.proxy-chain-cleanup.node.ts
Normal file
@ -0,0 +1,182 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
|
||||
let outerProxy: SmartProxy;
|
||||
let innerProxy: SmartProxy;
|
||||
|
||||
tap.test('setup two smartproxies in a chain configuration', async () => {
|
||||
// Setup inner proxy (backend proxy)
|
||||
innerProxy = new SmartProxy({
|
||||
routes: [
|
||||
{
|
||||
match: {
|
||||
ports: 8002
|
||||
},
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: {
|
||||
host: 'httpbin.org',
|
||||
port: 443
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
defaults: {
|
||||
target: {
|
||||
host: 'httpbin.org',
|
||||
port: 443
|
||||
}
|
||||
},
|
||||
acceptProxyProtocol: true,
|
||||
sendProxyProtocol: false,
|
||||
enableDetailedLogging: true,
|
||||
connectionCleanupInterval: 5000, // More frequent cleanup for testing
|
||||
inactivityTimeout: 10000 // Shorter timeout for testing
|
||||
});
|
||||
await innerProxy.start();
|
||||
|
||||
// Setup outer proxy (frontend proxy)
|
||||
outerProxy = new SmartProxy({
|
||||
routes: [
|
||||
{
|
||||
match: {
|
||||
ports: 8001
|
||||
},
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: {
|
||||
host: 'localhost',
|
||||
port: 8002
|
||||
},
|
||||
sendProxyProtocol: true
|
||||
}
|
||||
}
|
||||
],
|
||||
defaults: {
|
||||
target: {
|
||||
host: 'localhost',
|
||||
port: 8002
|
||||
}
|
||||
},
|
||||
sendProxyProtocol: true,
|
||||
enableDetailedLogging: true,
|
||||
connectionCleanupInterval: 5000, // More frequent cleanup for testing
|
||||
inactivityTimeout: 10000 // Shorter timeout for testing
|
||||
});
|
||||
await outerProxy.start();
|
||||
});
|
||||
|
||||
tap.test('should properly cleanup connections in proxy chain', async (tools) => {
|
||||
const testDuration = 30000; // 30 seconds
|
||||
const connectionInterval = 500; // Create new connection every 500ms
|
||||
const connectionDuration = 2000; // Each connection lasts 2 seconds
|
||||
|
||||
let connectionsCreated = 0;
|
||||
let connectionsCompleted = 0;
|
||||
|
||||
// Function to create a test connection
|
||||
const createTestConnection = async () => {
|
||||
connectionsCreated++;
|
||||
const connectionId = connectionsCreated;
|
||||
|
||||
try {
|
||||
const socket = plugins.net.connect({
|
||||
port: 8001,
|
||||
host: 'localhost'
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
socket.on('connect', () => {
|
||||
console.log(`Connection ${connectionId} established`);
|
||||
|
||||
// Send TLS Client Hello for httpbin.org
|
||||
const clientHello = Buffer.from([
|
||||
0x16, 0x03, 0x01, 0x00, 0xc8, // TLS handshake header
|
||||
0x01, 0x00, 0x00, 0xc4, // Client Hello
|
||||
0x03, 0x03, // TLS 1.2
|
||||
...Array(32).fill(0), // Random bytes
|
||||
0x00, // Session ID length
|
||||
0x00, 0x02, 0x13, 0x01, // Cipher suites
|
||||
0x01, 0x00, // Compression methods
|
||||
0x00, 0x97, // Extensions length
|
||||
0x00, 0x00, 0x00, 0x0f, 0x00, 0x0d, // SNI extension
|
||||
0x00, 0x00, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x62, 0x69, 0x6e, 0x2e, 0x6f, 0x72, 0x67 // "httpbin.org"
|
||||
]);
|
||||
|
||||
socket.write(clientHello);
|
||||
|
||||
// Keep connection alive for specified duration
|
||||
setTimeout(() => {
|
||||
socket.destroy();
|
||||
connectionsCompleted++;
|
||||
console.log(`Connection ${connectionId} closed (completed: ${connectionsCompleted}/${connectionsCreated})`);
|
||||
resolve();
|
||||
}, connectionDuration);
|
||||
});
|
||||
|
||||
socket.on('error', (err) => {
|
||||
console.log(`Connection ${connectionId} error: ${err.message}`);
|
||||
connectionsCompleted++;
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
console.log(`Failed to create connection ${connectionId}: ${err.message}`);
|
||||
connectionsCompleted++;
|
||||
}
|
||||
};
|
||||
|
||||
// Start creating connections
|
||||
const startTime = Date.now();
|
||||
const connectionTimer = setInterval(() => {
|
||||
if (Date.now() - startTime < testDuration) {
|
||||
createTestConnection().catch(() => {});
|
||||
} else {
|
||||
clearInterval(connectionTimer);
|
||||
}
|
||||
}, connectionInterval);
|
||||
|
||||
// Monitor connection counts
|
||||
const monitorInterval = setInterval(() => {
|
||||
const outerConnections = (outerProxy as any).connectionManager.getConnectionCount();
|
||||
const innerConnections = (innerProxy as any).connectionManager.getConnectionCount();
|
||||
|
||||
console.log(`Active connections - Outer: ${outerConnections}, Inner: ${innerConnections}, Created: ${connectionsCreated}, Completed: ${connectionsCompleted}`);
|
||||
}, 2000);
|
||||
|
||||
// Wait for test duration + cleanup time
|
||||
await tools.delayFor(testDuration + 10000);
|
||||
|
||||
clearInterval(connectionTimer);
|
||||
clearInterval(monitorInterval);
|
||||
|
||||
// Wait for all connections to complete
|
||||
while (connectionsCompleted < connectionsCreated) {
|
||||
await tools.delayFor(100);
|
||||
}
|
||||
|
||||
// Give some time for cleanup
|
||||
await tools.delayFor(5000);
|
||||
|
||||
// Check final connection counts
|
||||
const finalOuterConnections = (outerProxy as any).connectionManager.getConnectionCount();
|
||||
const finalInnerConnections = (innerProxy as any).connectionManager.getConnectionCount();
|
||||
|
||||
console.log(`\nFinal connection counts:`);
|
||||
console.log(`Outer proxy: ${finalOuterConnections}`);
|
||||
console.log(`Inner proxy: ${finalInnerConnections}`);
|
||||
console.log(`Total created: ${connectionsCreated}`);
|
||||
console.log(`Total completed: ${connectionsCompleted}`);
|
||||
|
||||
// Both proxies should have cleaned up all connections
|
||||
expect(finalOuterConnections).toEqual(0);
|
||||
expect(finalInnerConnections).toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('cleanup proxies', async () => {
|
||||
await outerProxy.stop();
|
||||
await innerProxy.stop();
|
||||
});
|
||||
|
||||
export default tap.start();
|
133
test/test.proxy-protocol.ts
Normal file
133
test/test.proxy-protocol.ts
Normal file
@ -0,0 +1,133 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as smartproxy from '../ts/index.js';
|
||||
import { ProxyProtocolParser } from '../ts/core/utils/proxy-protocol.js';
|
||||
|
||||
tap.test('PROXY protocol v1 parser - valid headers', async () => {
|
||||
// Test TCP4 format
|
||||
const tcp4Header = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii');
|
||||
const tcp4Result = ProxyProtocolParser.parse(tcp4Header);
|
||||
|
||||
expect(tcp4Result.proxyInfo).property('protocol').toEqual('TCP4');
|
||||
expect(tcp4Result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
|
||||
expect(tcp4Result.proxyInfo).property('sourcePort').toEqual(56324);
|
||||
expect(tcp4Result.proxyInfo).property('destinationIP').toEqual('10.0.0.1');
|
||||
expect(tcp4Result.proxyInfo).property('destinationPort').toEqual(443);
|
||||
expect(tcp4Result.remainingData.length).toEqual(0);
|
||||
|
||||
// Test TCP6 format
|
||||
const tcp6Header = Buffer.from('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n', 'ascii');
|
||||
const tcp6Result = ProxyProtocolParser.parse(tcp6Header);
|
||||
|
||||
expect(tcp6Result.proxyInfo).property('protocol').toEqual('TCP6');
|
||||
expect(tcp6Result.proxyInfo).property('sourceIP').toEqual('2001:db8::1');
|
||||
expect(tcp6Result.proxyInfo).property('sourcePort').toEqual(56324);
|
||||
expect(tcp6Result.proxyInfo).property('destinationIP').toEqual('2001:db8::2');
|
||||
expect(tcp6Result.proxyInfo).property('destinationPort').toEqual(443);
|
||||
|
||||
// Test UNKNOWN protocol
|
||||
const unknownHeader = Buffer.from('PROXY UNKNOWN\r\n', 'ascii');
|
||||
const unknownResult = ProxyProtocolParser.parse(unknownHeader);
|
||||
|
||||
expect(unknownResult.proxyInfo).property('protocol').toEqual('UNKNOWN');
|
||||
expect(unknownResult.proxyInfo).property('sourceIP').toEqual('');
|
||||
expect(unknownResult.proxyInfo).property('sourcePort').toEqual(0);
|
||||
});
|
||||
|
||||
tap.test('PROXY protocol v1 parser - with remaining data', async () => {
|
||||
const headerWithData = Buffer.concat([
|
||||
Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n', 'ascii'),
|
||||
Buffer.from('GET / HTTP/1.1\r\n', 'ascii')
|
||||
]);
|
||||
|
||||
const result = ProxyProtocolParser.parse(headerWithData);
|
||||
|
||||
expect(result.proxyInfo).property('protocol').toEqual('TCP4');
|
||||
expect(result.proxyInfo).property('sourceIP').toEqual('192.168.1.1');
|
||||
expect(result.remainingData.toString()).toEqual('GET / HTTP/1.1\r\n');
|
||||
});
|
||||
|
||||
tap.test('PROXY protocol v1 parser - invalid headers', async () => {
|
||||
// Not a PROXY protocol header
|
||||
const notProxy = Buffer.from('GET / HTTP/1.1\r\n', 'ascii');
|
||||
const notProxyResult = ProxyProtocolParser.parse(notProxy);
|
||||
expect(notProxyResult.proxyInfo).toBeNull();
|
||||
expect(notProxyResult.remainingData).toEqual(notProxy);
|
||||
|
||||
// Invalid protocol
|
||||
expect(() => {
|
||||
ProxyProtocolParser.parse(Buffer.from('PROXY INVALID 1.1.1.1 2.2.2.2 80 443\r\n', 'ascii'));
|
||||
}).toThrow();
|
||||
|
||||
// Wrong number of fields
|
||||
expect(() => {
|
||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324\r\n', 'ascii'));
|
||||
}).toThrow();
|
||||
|
||||
// Invalid port
|
||||
expect(() => {
|
||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 99999 443\r\n', 'ascii'));
|
||||
}).toThrow();
|
||||
|
||||
// Invalid IP for protocol
|
||||
expect(() => {
|
||||
ProxyProtocolParser.parse(Buffer.from('PROXY TCP4 2001:db8::1 10.0.0.1 56324 443\r\n', 'ascii'));
|
||||
}).toThrow();
|
||||
});
|
||||
|
||||
tap.test('PROXY protocol v1 parser - incomplete headers', async () => {
|
||||
// Header without terminator
|
||||
const incomplete = Buffer.from('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443', 'ascii');
|
||||
const result = ProxyProtocolParser.parse(incomplete);
|
||||
|
||||
expect(result.proxyInfo).toBeNull();
|
||||
expect(result.remainingData).toEqual(incomplete);
|
||||
|
||||
// Header exceeding max length - create a buffer that actually starts with PROXY
|
||||
const longHeader = Buffer.from('PROXY TCP4 ' + '1'.repeat(100), 'ascii');
|
||||
expect(() => {
|
||||
ProxyProtocolParser.parse(longHeader);
|
||||
}).toThrow();
|
||||
});
|
||||
|
||||
tap.test('PROXY protocol v1 generator', async () => {
|
||||
// Generate TCP4 header
|
||||
const tcp4Info = {
|
||||
protocol: 'TCP4' as const,
|
||||
sourceIP: '192.168.1.1',
|
||||
sourcePort: 56324,
|
||||
destinationIP: '10.0.0.1',
|
||||
destinationPort: 443
|
||||
};
|
||||
|
||||
const tcp4Header = ProxyProtocolParser.generate(tcp4Info);
|
||||
expect(tcp4Header.toString('ascii')).toEqual('PROXY TCP4 192.168.1.1 10.0.0.1 56324 443\r\n');
|
||||
|
||||
// Generate TCP6 header
|
||||
const tcp6Info = {
|
||||
protocol: 'TCP6' as const,
|
||||
sourceIP: '2001:db8::1',
|
||||
sourcePort: 56324,
|
||||
destinationIP: '2001:db8::2',
|
||||
destinationPort: 443
|
||||
};
|
||||
|
||||
const tcp6Header = ProxyProtocolParser.generate(tcp6Info);
|
||||
expect(tcp6Header.toString('ascii')).toEqual('PROXY TCP6 2001:db8::1 2001:db8::2 56324 443\r\n');
|
||||
|
||||
// Generate UNKNOWN header
|
||||
const unknownInfo = {
|
||||
protocol: 'UNKNOWN' as const,
|
||||
sourceIP: '',
|
||||
sourcePort: 0,
|
||||
destinationIP: '',
|
||||
destinationPort: 0
|
||||
};
|
||||
|
||||
const unknownHeader = ProxyProtocolParser.generate(unknownInfo);
|
||||
expect(unknownHeader.toString('ascii')).toEqual('PROXY UNKNOWN\r\n');
|
||||
});
|
||||
|
||||
// Skipping integration tests for now - focus on unit tests
|
||||
// Integration tests would require more complex setup and teardown
|
||||
|
||||
tap.start();
|
144
test/test.stuck-connection-cleanup.node.ts
Normal file
144
test/test.stuck-connection-cleanup.node.ts
Normal file
@ -0,0 +1,144 @@
|
||||
import { expect, tap } from '@git.zone/tstest/tapbundle';
|
||||
import * as net from 'net';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
|
||||
tap.test('stuck connection cleanup - verify connections to hanging backends are cleaned up', async (tools) => {
|
||||
console.log('\n=== Stuck Connection Cleanup Test ===');
|
||||
console.log('Purpose: Verify that connections to backends that accept but never respond are cleaned up');
|
||||
|
||||
// Create a hanging backend that accepts connections but never responds
|
||||
let backendConnections = 0;
|
||||
const hangingBackend = net.createServer((socket) => {
|
||||
backendConnections++;
|
||||
console.log(`Hanging backend: Connection ${backendConnections} received`);
|
||||
// Accept the connection but never send any data back
|
||||
// This simulates a hung backend service
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
hangingBackend.listen(9997, () => {
|
||||
console.log('✓ Hanging backend started on port 9997');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Create proxy that forwards to hanging backend
|
||||
const proxy = new SmartProxy({
|
||||
routes: [{
|
||||
name: 'to-hanging-backend',
|
||||
match: { ports: 8589 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9997 }
|
||||
}
|
||||
}],
|
||||
keepAlive: true,
|
||||
enableDetailedLogging: false,
|
||||
inactivityTimeout: 5000, // 5 second inactivity check interval for faster testing
|
||||
});
|
||||
|
||||
await proxy.start();
|
||||
console.log('✓ Proxy started on port 8589');
|
||||
|
||||
// Create connections that will get stuck
|
||||
console.log('\n--- Creating connections to hanging backend ---');
|
||||
const clients: net.Socket[] = [];
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const client = net.connect(8589, 'localhost');
|
||||
clients.push(client);
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
client.on('connect', () => {
|
||||
console.log(`Client ${i} connected`);
|
||||
// Send data that will never get a response
|
||||
client.write(`GET / HTTP/1.1\r\nHost: localhost\r\n\r\n`);
|
||||
resolve();
|
||||
});
|
||||
|
||||
client.on('error', (err) => {
|
||||
console.log(`Client ${i} error: ${err.message}`);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Wait a moment for connections to establish
|
||||
await plugins.smartdelay.delayFor(1000);
|
||||
|
||||
// Check initial connection count
|
||||
const initialCount = (proxy as any).connectionManager.getConnectionCount();
|
||||
console.log(`\nInitial connection count: ${initialCount}`);
|
||||
expect(initialCount).toEqual(5);
|
||||
|
||||
// Get connection details
|
||||
const connections = (proxy as any).connectionManager.getConnections();
|
||||
let stuckCount = 0;
|
||||
|
||||
for (const [id, record] of connections) {
|
||||
if (record.bytesReceived > 0 && record.bytesSent === 0) {
|
||||
stuckCount++;
|
||||
console.log(`Stuck connection ${id}: received=${record.bytesReceived}, sent=${record.bytesSent}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Stuck connections found: ${stuckCount}`);
|
||||
expect(stuckCount).toEqual(5);
|
||||
|
||||
// Wait for inactivity check to run (it checks every 30s by default, but we set it to 5s)
|
||||
console.log('\n--- Waiting for stuck connection detection (65 seconds) ---');
|
||||
console.log('Note: Stuck connections are cleaned up after 60 seconds with no response');
|
||||
|
||||
// Speed up time by manually triggering inactivity check after simulating time passage
|
||||
// First, age the connections by updating their timestamps
|
||||
const now = Date.now();
|
||||
for (const [id, record] of connections) {
|
||||
// Simulate that these connections are 61 seconds old
|
||||
record.incomingStartTime = now - 61000;
|
||||
record.lastActivity = now - 61000;
|
||||
}
|
||||
|
||||
// Manually trigger inactivity check
|
||||
console.log('Manually triggering inactivity check...');
|
||||
(proxy as any).connectionManager.performOptimizedInactivityCheck();
|
||||
|
||||
// Wait for cleanup to complete
|
||||
await plugins.smartdelay.delayFor(1000);
|
||||
|
||||
// Check connection count after cleanup
|
||||
const afterCleanupCount = (proxy as any).connectionManager.getConnectionCount();
|
||||
console.log(`\nConnection count after cleanup: ${afterCleanupCount}`);
|
||||
|
||||
// Verify termination stats
|
||||
const stats = (proxy as any).connectionManager.getTerminationStats();
|
||||
console.log('\nTermination stats:', stats);
|
||||
|
||||
// All connections should be cleaned up as "stuck_no_response"
|
||||
expect(afterCleanupCount).toEqual(0);
|
||||
|
||||
// The termination reason might be under incoming or general stats
|
||||
const stuckCleanups = (stats.incoming.stuck_no_response || 0) +
|
||||
(stats.outgoing?.stuck_no_response || 0);
|
||||
console.log(`Stuck cleanups detected: ${stuckCleanups}`);
|
||||
expect(stuckCleanups).toBeGreaterThan(0);
|
||||
|
||||
// Verify clients were disconnected
|
||||
let closedClients = 0;
|
||||
for (const client of clients) {
|
||||
if (client.destroyed) {
|
||||
closedClients++;
|
||||
}
|
||||
}
|
||||
console.log(`Closed clients: ${closedClients}/5`);
|
||||
expect(closedClients).toEqual(5);
|
||||
|
||||
// Cleanup
|
||||
console.log('\n--- Cleanup ---');
|
||||
await proxy.stop();
|
||||
hangingBackend.close();
|
||||
|
||||
console.log('✓ Test complete: Stuck connections are properly detected and cleaned up');
|
||||
});
|
||||
|
||||
tap.start();
|
158
test/test.websocket-keepalive.node.ts
Normal file
158
test/test.websocket-keepalive.node.ts
Normal file
@ -0,0 +1,158 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
import * as net from 'net';
|
||||
|
||||
tap.test('websocket keep-alive settings for SNI passthrough', async (tools) => {
|
||||
// Test 1: Verify grace periods for TLS connections
|
||||
console.log('\n=== Test 1: Grace periods for encrypted connections ===');
|
||||
|
||||
const proxy = new SmartProxy({
|
||||
ports: [8443],
|
||||
keepAliveTreatment: 'extended',
|
||||
keepAliveInactivityMultiplier: 10,
|
||||
inactivityTimeout: 60000, // 1 minute for testing
|
||||
routes: [
|
||||
{
|
||||
name: 'test-passthrough',
|
||||
match: { ports: 8443, domains: 'test.local' },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9443 },
|
||||
tls: { mode: 'passthrough' }
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
// Override route port
|
||||
proxy.settings.routes[0].match.ports = 8443;
|
||||
|
||||
await proxy.start();
|
||||
|
||||
// Access connection manager
|
||||
const connectionManager = proxy.connectionManager;
|
||||
|
||||
// Test 2: Verify longer grace periods are applied
|
||||
console.log('\n=== Test 2: Checking grace period configuration ===');
|
||||
|
||||
// Create a mock connection record
|
||||
const mockRecord = {
|
||||
id: 'test-conn-1',
|
||||
remoteIP: '127.0.0.1',
|
||||
incomingStartTime: Date.now() - 120000, // 2 minutes old
|
||||
isTLS: true,
|
||||
incoming: { destroyed: false } as any,
|
||||
outgoing: { destroyed: true } as any, // Half-zombie state
|
||||
connectionClosed: false,
|
||||
hasKeepAlive: true,
|
||||
lastActivity: Date.now() - 60000
|
||||
};
|
||||
|
||||
// The grace period should be 5 minutes for TLS connections
|
||||
const gracePeriod = mockRecord.isTLS ? 300000 : 30000;
|
||||
console.log(`Grace period for TLS connection: ${gracePeriod}ms (${gracePeriod / 1000} seconds)`);
|
||||
expect(gracePeriod).toEqual(300000); // 5 minutes
|
||||
|
||||
// Test 3: Verify keep-alive treatment
|
||||
console.log('\n=== Test 3: Keep-alive treatment configuration ===');
|
||||
|
||||
const settings = proxy.settings;
|
||||
console.log(`Keep-alive treatment: ${settings.keepAliveTreatment}`);
|
||||
console.log(`Keep-alive multiplier: ${settings.keepAliveInactivityMultiplier}`);
|
||||
console.log(`Base inactivity timeout: ${settings.inactivityTimeout}ms`);
|
||||
|
||||
// Calculate effective timeout
|
||||
const effectiveTimeout = settings.inactivityTimeout! * (settings.keepAliveInactivityMultiplier || 6);
|
||||
console.log(`Effective timeout for keep-alive connections: ${effectiveTimeout}ms (${effectiveTimeout / 1000} seconds)`);
|
||||
|
||||
expect(settings.keepAliveTreatment).toEqual('extended');
|
||||
expect(effectiveTimeout).toEqual(600000); // 10 minutes with our test config
|
||||
|
||||
// Test 4: Verify SNI passthrough doesn't get WebSocket heartbeat
|
||||
console.log('\n=== Test 4: SNI passthrough handling ===');
|
||||
|
||||
// Check route configuration
|
||||
const route = proxy.settings.routes[0];
|
||||
expect(route.action.tls?.mode).toEqual('passthrough');
|
||||
|
||||
// In passthrough mode, WebSocket-specific handling should be skipped
|
||||
// The connection should be treated as a raw TCP connection
|
||||
console.log('✓ SNI passthrough routes bypass WebSocket heartbeat checks');
|
||||
|
||||
await proxy.stop();
|
||||
|
||||
console.log('\n✅ WebSocket keep-alive configuration test completed!');
|
||||
});
|
||||
|
||||
// Test actual long-lived connection behavior
|
||||
tap.test('long-lived connection survival test', async (tools) => {
|
||||
console.log('\n=== Testing long-lived connection survival ===');
|
||||
|
||||
// Create a simple echo server
|
||||
const echoServer = net.createServer((socket) => {
|
||||
console.log('Echo server: client connected');
|
||||
socket.on('data', (data) => {
|
||||
socket.write(data); // Echo back
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => echoServer.listen(9444, resolve));
|
||||
|
||||
// Create proxy with immortal keep-alive
|
||||
const proxy = new SmartProxy({
|
||||
ports: [8444],
|
||||
keepAliveTreatment: 'immortal', // Never timeout
|
||||
routes: [
|
||||
{
|
||||
name: 'echo-passthrough',
|
||||
match: { ports: 8444 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: { host: 'localhost', port: 9444 }
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
// Override route port
|
||||
proxy.settings.routes[0].match.ports = 8444;
|
||||
|
||||
await proxy.start();
|
||||
|
||||
// Create a client connection
|
||||
const client = new net.Socket();
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
client.connect(8444, 'localhost', () => {
|
||||
console.log('Client connected to proxy');
|
||||
resolve();
|
||||
});
|
||||
client.on('error', reject);
|
||||
});
|
||||
|
||||
// Keep connection alive with periodic data
|
||||
let pingCount = 0;
|
||||
const pingInterval = setInterval(() => {
|
||||
if (client.writable) {
|
||||
client.write(`ping ${++pingCount}\n`);
|
||||
console.log(`Sent ping ${pingCount}`);
|
||||
}
|
||||
}, 20000); // Every 20 seconds
|
||||
|
||||
// Wait 65 seconds to ensure it survives past old 30s and 60s timeouts
|
||||
await new Promise(resolve => setTimeout(resolve, 65000));
|
||||
|
||||
// Check if connection is still alive
|
||||
const isAlive = client.writable && !client.destroyed;
|
||||
console.log(`Connection alive after 65 seconds: ${isAlive}`);
|
||||
expect(isAlive).toBeTrue();
|
||||
|
||||
// Clean up
|
||||
clearInterval(pingInterval);
|
||||
client.destroy();
|
||||
await proxy.stop();
|
||||
await new Promise<void>((resolve) => echoServer.close(resolve));
|
||||
|
||||
console.log('✅ Long-lived connection survived past 30-second timeout!');
|
||||
});
|
||||
|
||||
tap.start();
|
306
test/test.zombie-connection-cleanup.node.ts
Normal file
306
test/test.zombie-connection-cleanup.node.ts
Normal file
@ -0,0 +1,306 @@
|
||||
import { tap, expect } from '@git.zone/tstest/tapbundle';
|
||||
import * as net from 'net';
|
||||
import * as plugins from '../ts/plugins.js';
|
||||
|
||||
// Import SmartProxy
|
||||
import { SmartProxy } from '../ts/index.js';
|
||||
|
||||
// Import types through type-only imports
|
||||
import type { ConnectionManager } from '../ts/proxies/smart-proxy/connection-manager.js';
|
||||
import type { IConnectionRecord } from '../ts/proxies/smart-proxy/models/interfaces.js';
|
||||
|
||||
tap.test('zombie connection cleanup - verify inactivity check detects and cleans destroyed sockets', async () => {
|
||||
console.log('\n=== Zombie Connection Cleanup Test ===');
|
||||
console.log('Purpose: Verify that connections with destroyed sockets are detected and cleaned up');
|
||||
console.log('Setup: Client → OuterProxy (8590) → InnerProxy (8591) → Backend (9998)');
|
||||
|
||||
// Create backend server that can be controlled
|
||||
let acceptConnections = true;
|
||||
let destroyImmediately = false;
|
||||
const backendConnections: net.Socket[] = [];
|
||||
|
||||
const backend = net.createServer((socket) => {
|
||||
console.log('Backend: Connection received');
|
||||
backendConnections.push(socket);
|
||||
|
||||
if (destroyImmediately) {
|
||||
console.log('Backend: Destroying connection immediately');
|
||||
socket.destroy();
|
||||
} else {
|
||||
socket.on('data', (data) => {
|
||||
console.log('Backend: Received data, echoing back');
|
||||
socket.write(data);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
backend.listen(9998, () => {
|
||||
console.log('✓ Backend server started on port 9998');
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Create InnerProxy with faster inactivity check for testing
|
||||
const innerProxy = new SmartProxy({
|
||||
ports: [8591],
|
||||
enableDetailedLogging: true,
|
||||
inactivityTimeout: 5000, // 5 seconds for faster testing
|
||||
inactivityCheckInterval: 1000, // Check every second
|
||||
routes: [{
|
||||
name: 'to-backend',
|
||||
match: { ports: 8591 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: {
|
||||
host: 'localhost',
|
||||
port: 9998
|
||||
}
|
||||
}
|
||||
}]
|
||||
});
|
||||
|
||||
// Create OuterProxy with faster inactivity check
|
||||
const outerProxy = new SmartProxy({
|
||||
ports: [8590],
|
||||
enableDetailedLogging: true,
|
||||
inactivityTimeout: 5000, // 5 seconds for faster testing
|
||||
inactivityCheckInterval: 1000, // Check every second
|
||||
routes: [{
|
||||
name: 'to-inner',
|
||||
match: { ports: 8590 },
|
||||
action: {
|
||||
type: 'forward',
|
||||
target: {
|
||||
host: 'localhost',
|
||||
port: 8591
|
||||
}
|
||||
}
|
||||
}]
|
||||
});
|
||||
|
||||
await innerProxy.start();
|
||||
console.log('✓ InnerProxy started on port 8591');
|
||||
|
||||
await outerProxy.start();
|
||||
console.log('✓ OuterProxy started on port 8590');
|
||||
|
||||
// Helper to get connection details
|
||||
const getConnectionDetails = () => {
|
||||
const outerConnMgr = (outerProxy as any).connectionManager as ConnectionManager;
|
||||
const innerConnMgr = (innerProxy as any).connectionManager as ConnectionManager;
|
||||
|
||||
const outerRecords = Array.from((outerConnMgr as any).connectionRecords.values()) as IConnectionRecord[];
|
||||
const innerRecords = Array.from((innerConnMgr as any).connectionRecords.values()) as IConnectionRecord[];
|
||||
|
||||
return {
|
||||
outer: {
|
||||
count: outerConnMgr.getConnectionCount(),
|
||||
records: outerRecords,
|
||||
zombies: outerRecords.filter(r =>
|
||||
!r.connectionClosed &&
|
||||
r.incoming?.destroyed &&
|
||||
(r.outgoing?.destroyed ?? true)
|
||||
),
|
||||
halfZombies: outerRecords.filter(r =>
|
||||
!r.connectionClosed &&
|
||||
(r.incoming?.destroyed || r.outgoing?.destroyed) &&
|
||||
!(r.incoming?.destroyed && (r.outgoing?.destroyed ?? true))
|
||||
)
|
||||
},
|
||||
inner: {
|
||||
count: innerConnMgr.getConnectionCount(),
|
||||
records: innerRecords,
|
||||
zombies: innerRecords.filter(r =>
|
||||
!r.connectionClosed &&
|
||||
r.incoming?.destroyed &&
|
||||
(r.outgoing?.destroyed ?? true)
|
||||
),
|
||||
halfZombies: innerRecords.filter(r =>
|
||||
!r.connectionClosed &&
|
||||
(r.incoming?.destroyed || r.outgoing?.destroyed) &&
|
||||
!(r.incoming?.destroyed && (r.outgoing?.destroyed ?? true))
|
||||
)
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
console.log('\n--- Test 1: Create zombie by destroying sockets without events ---');
|
||||
|
||||
// Create a connection and forcefully destroy sockets to create zombies
|
||||
const client1 = new net.Socket();
|
||||
await new Promise<void>((resolve) => {
|
||||
client1.connect(8590, 'localhost', () => {
|
||||
console.log('Client1 connected to OuterProxy');
|
||||
client1.write('GET / HTTP/1.1\r\nHost: test.com\r\n\r\n');
|
||||
|
||||
// Wait for connection to be established through the chain
|
||||
setTimeout(() => {
|
||||
console.log('Forcefully destroying backend connections to create zombies');
|
||||
|
||||
// Get connection details before destruction
|
||||
const beforeDetails = getConnectionDetails();
|
||||
console.log(`Before destruction: Outer=${beforeDetails.outer.count}, Inner=${beforeDetails.inner.count}`);
|
||||
|
||||
// Destroy all backend connections without proper close events
|
||||
backendConnections.forEach(conn => {
|
||||
if (!conn.destroyed) {
|
||||
// Remove all listeners to prevent proper cleanup
|
||||
conn.removeAllListeners();
|
||||
conn.destroy();
|
||||
}
|
||||
});
|
||||
|
||||
// Also destroy the client socket abruptly
|
||||
client1.removeAllListeners();
|
||||
client1.destroy();
|
||||
|
||||
resolve();
|
||||
}, 500);
|
||||
});
|
||||
});
|
||||
|
||||
// Check immediately after destruction
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
let details = getConnectionDetails();
|
||||
console.log(`\nAfter destruction:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
// Wait for inactivity check to run (should detect zombies)
|
||||
console.log('\nWaiting for inactivity check to detect zombies...');
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
|
||||
details = getConnectionDetails();
|
||||
console.log(`\nAfter first inactivity check:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
console.log('\n--- Test 2: Create half-zombie by destroying only one socket ---');
|
||||
|
||||
// Clear backend connections array
|
||||
backendConnections.length = 0;
|
||||
|
||||
const client2 = new net.Socket();
|
||||
await new Promise<void>((resolve) => {
|
||||
client2.connect(8590, 'localhost', () => {
|
||||
console.log('Client2 connected to OuterProxy');
|
||||
client2.write('GET / HTTP/1.1\r\nHost: test.com\r\n\r\n');
|
||||
|
||||
setTimeout(() => {
|
||||
console.log('Creating half-zombie by destroying only outgoing socket on outer proxy');
|
||||
|
||||
// Access the connection records directly
|
||||
const outerConnMgr = (outerProxy as any).connectionManager as ConnectionManager;
|
||||
const outerRecords = Array.from((outerConnMgr as any).connectionRecords.values()) as IConnectionRecord[];
|
||||
|
||||
// Find the active connection and destroy only its outgoing socket
|
||||
const activeRecord = outerRecords.find(r => !r.connectionClosed && r.outgoing && !r.outgoing.destroyed);
|
||||
if (activeRecord && activeRecord.outgoing) {
|
||||
console.log('Found active connection, destroying outgoing socket');
|
||||
activeRecord.outgoing.removeAllListeners();
|
||||
activeRecord.outgoing.destroy();
|
||||
}
|
||||
|
||||
resolve();
|
||||
}, 500);
|
||||
});
|
||||
});
|
||||
|
||||
// Check half-zombie state
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
details = getConnectionDetails();
|
||||
console.log(`\nAfter creating half-zombie:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
// Wait for 30-second grace period (simulated by multiple checks)
|
||||
console.log('\nWaiting for half-zombie grace period (30 seconds simulated)...');
|
||||
|
||||
// Manually age the connection to trigger half-zombie cleanup
|
||||
const outerConnMgr = (outerProxy as any).connectionManager as ConnectionManager;
|
||||
const records = Array.from((outerConnMgr as any).connectionRecords.values()) as IConnectionRecord[];
|
||||
records.forEach(record => {
|
||||
if (!record.connectionClosed) {
|
||||
// Age the connection by 35 seconds
|
||||
record.incomingStartTime -= 35000;
|
||||
}
|
||||
});
|
||||
|
||||
// Trigger inactivity check
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
|
||||
details = getConnectionDetails();
|
||||
console.log(`\nAfter half-zombie cleanup:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
// Clean up client2 properly
|
||||
if (!client2.destroyed) {
|
||||
client2.destroy();
|
||||
}
|
||||
|
||||
console.log('\n--- Test 3: Rapid zombie creation under load ---');
|
||||
|
||||
// Create multiple connections rapidly and destroy them
|
||||
const rapidClients: net.Socket[] = [];
|
||||
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const client = new net.Socket();
|
||||
rapidClients.push(client);
|
||||
|
||||
client.connect(8590, 'localhost', () => {
|
||||
console.log(`Rapid client ${i} connected`);
|
||||
client.write('GET / HTTP/1.1\r\nHost: test.com\r\n\r\n');
|
||||
|
||||
// Destroy after random delay
|
||||
setTimeout(() => {
|
||||
client.removeAllListeners();
|
||||
client.destroy();
|
||||
}, Math.random() * 500);
|
||||
});
|
||||
|
||||
// Small delay between connections
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
}
|
||||
|
||||
// Wait a bit
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
details = getConnectionDetails();
|
||||
console.log(`\nAfter rapid connections:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
// Wait for cleanup
|
||||
console.log('\nWaiting for final cleanup...');
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
|
||||
details = getConnectionDetails();
|
||||
console.log(`\nFinal state:`);
|
||||
console.log(` Outer: ${details.outer.count} connections, ${details.outer.zombies.length} zombies, ${details.outer.halfZombies.length} half-zombies`);
|
||||
console.log(` Inner: ${details.inner.count} connections, ${details.inner.zombies.length} zombies, ${details.inner.halfZombies.length} half-zombies`);
|
||||
|
||||
// Cleanup
|
||||
await outerProxy.stop();
|
||||
await innerProxy.stop();
|
||||
backend.close();
|
||||
|
||||
// Verify all connections are cleaned up
|
||||
console.log('\n--- Verification ---');
|
||||
|
||||
if (details.outer.count === 0 && details.inner.count === 0) {
|
||||
console.log('✅ PASS: All zombie connections were cleaned up');
|
||||
} else {
|
||||
console.log('❌ FAIL: Some connections remain');
|
||||
}
|
||||
|
||||
expect(details.outer.count).toEqual(0);
|
||||
expect(details.inner.count).toEqual(0);
|
||||
expect(details.outer.zombies.length).toEqual(0);
|
||||
expect(details.inner.zombies.length).toEqual(0);
|
||||
expect(details.outer.halfZombies.length).toEqual(0);
|
||||
expect(details.inner.halfZombies.length).toEqual(0);
|
||||
});
|
||||
|
||||
tap.start();
|
@ -15,3 +15,4 @@ export * from './lifecycle-component.js';
|
||||
export * from './binary-heap.js';
|
||||
export * from './enhanced-connection-pool.js';
|
||||
export * from './socket-utils.js';
|
||||
export * from './proxy-protocol.js';
|
||||
|
246
ts/core/utils/proxy-protocol.ts
Normal file
246
ts/core/utils/proxy-protocol.ts
Normal file
@ -0,0 +1,246 @@
|
||||
import * as plugins from '../../plugins.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
/**
|
||||
* Interface representing parsed PROXY protocol information
|
||||
*/
|
||||
export interface IProxyInfo {
|
||||
protocol: 'TCP4' | 'TCP6' | 'UNKNOWN';
|
||||
sourceIP: string;
|
||||
sourcePort: number;
|
||||
destinationIP: string;
|
||||
destinationPort: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for parse result including remaining data
|
||||
*/
|
||||
export interface IProxyParseResult {
|
||||
proxyInfo: IProxyInfo | null;
|
||||
remainingData: Buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parser for PROXY protocol v1 (text format)
|
||||
* Spec: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
*/
|
||||
export class ProxyProtocolParser {
|
||||
static readonly PROXY_V1_SIGNATURE = 'PROXY ';
|
||||
static readonly MAX_HEADER_LENGTH = 107; // Max length for v1 header
|
||||
static readonly HEADER_TERMINATOR = '\r\n';
|
||||
|
||||
/**
|
||||
* Parse PROXY protocol v1 header from buffer
|
||||
* Returns proxy info and remaining data after header
|
||||
*/
|
||||
static parse(data: Buffer): IProxyParseResult {
|
||||
// Check if buffer starts with PROXY signature
|
||||
if (!data.toString('ascii', 0, 6).startsWith(this.PROXY_V1_SIGNATURE)) {
|
||||
return {
|
||||
proxyInfo: null,
|
||||
remainingData: data
|
||||
};
|
||||
}
|
||||
|
||||
// Find header terminator
|
||||
const headerEndIndex = data.indexOf(this.HEADER_TERMINATOR);
|
||||
if (headerEndIndex === -1) {
|
||||
// Header incomplete, need more data
|
||||
if (data.length > this.MAX_HEADER_LENGTH) {
|
||||
// Header too long, invalid
|
||||
throw new Error('PROXY protocol header exceeds maximum length');
|
||||
}
|
||||
return {
|
||||
proxyInfo: null,
|
||||
remainingData: data
|
||||
};
|
||||
}
|
||||
|
||||
// Extract header line
|
||||
const headerLine = data.toString('ascii', 0, headerEndIndex);
|
||||
const remainingData = data.slice(headerEndIndex + 2); // Skip \r\n
|
||||
|
||||
// Parse header
|
||||
const parts = headerLine.split(' ');
|
||||
|
||||
if (parts.length < 2) {
|
||||
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
|
||||
}
|
||||
|
||||
const [signature, protocol] = parts;
|
||||
|
||||
// Validate protocol
|
||||
if (!['TCP4', 'TCP6', 'UNKNOWN'].includes(protocol)) {
|
||||
throw new Error(`Invalid PROXY protocol: ${protocol}`);
|
||||
}
|
||||
|
||||
// For UNKNOWN protocol, ignore addresses
|
||||
if (protocol === 'UNKNOWN') {
|
||||
return {
|
||||
proxyInfo: {
|
||||
protocol: 'UNKNOWN',
|
||||
sourceIP: '',
|
||||
sourcePort: 0,
|
||||
destinationIP: '',
|
||||
destinationPort: 0
|
||||
},
|
||||
remainingData
|
||||
};
|
||||
}
|
||||
|
||||
// For TCP4/TCP6, we need all 6 parts
|
||||
if (parts.length !== 6) {
|
||||
throw new Error(`Invalid PROXY protocol header format: ${headerLine}`);
|
||||
}
|
||||
|
||||
const [, , srcIP, dstIP, srcPort, dstPort] = parts;
|
||||
|
||||
// Validate and parse ports
|
||||
const sourcePort = parseInt(srcPort, 10);
|
||||
const destinationPort = parseInt(dstPort, 10);
|
||||
|
||||
if (isNaN(sourcePort) || sourcePort < 0 || sourcePort > 65535) {
|
||||
throw new Error(`Invalid source port: ${srcPort}`);
|
||||
}
|
||||
|
||||
if (isNaN(destinationPort) || destinationPort < 0 || destinationPort > 65535) {
|
||||
throw new Error(`Invalid destination port: ${dstPort}`);
|
||||
}
|
||||
|
||||
// Validate IP addresses
|
||||
const protocolType = protocol as 'TCP4' | 'TCP6' | 'UNKNOWN';
|
||||
if (!this.isValidIP(srcIP, protocolType)) {
|
||||
throw new Error(`Invalid source IP for ${protocol}: ${srcIP}`);
|
||||
}
|
||||
|
||||
if (!this.isValidIP(dstIP, protocolType)) {
|
||||
throw new Error(`Invalid destination IP for ${protocol}: ${dstIP}`);
|
||||
}
|
||||
|
||||
return {
|
||||
proxyInfo: {
|
||||
protocol: protocol as 'TCP4' | 'TCP6',
|
||||
sourceIP: srcIP,
|
||||
sourcePort,
|
||||
destinationIP: dstIP,
|
||||
destinationPort
|
||||
},
|
||||
remainingData
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate PROXY protocol v1 header
|
||||
*/
|
||||
static generate(info: IProxyInfo): Buffer {
|
||||
if (info.protocol === 'UNKNOWN') {
|
||||
return Buffer.from(`PROXY UNKNOWN\r\n`, 'ascii');
|
||||
}
|
||||
|
||||
const header = `PROXY ${info.protocol} ${info.sourceIP} ${info.destinationIP} ${info.sourcePort} ${info.destinationPort}\r\n`;
|
||||
|
||||
if (header.length > this.MAX_HEADER_LENGTH) {
|
||||
throw new Error('Generated PROXY protocol header exceeds maximum length');
|
||||
}
|
||||
|
||||
return Buffer.from(header, 'ascii');
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate IP address format
|
||||
*/
|
||||
private static isValidIP(ip: string, protocol: 'TCP4' | 'TCP6' | 'UNKNOWN'): boolean {
|
||||
if (protocol === 'TCP4') {
|
||||
return plugins.net.isIPv4(ip);
|
||||
} else if (protocol === 'TCP6') {
|
||||
return plugins.net.isIPv6(ip);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to read a complete PROXY protocol header from a socket
|
||||
* Returns null if no PROXY protocol detected or incomplete
|
||||
*/
|
||||
static async readFromSocket(socket: plugins.net.Socket, timeout: number = 5000): Promise<IProxyParseResult | null> {
|
||||
return new Promise((resolve) => {
|
||||
let buffer = Buffer.alloc(0);
|
||||
let resolved = false;
|
||||
|
||||
const cleanup = () => {
|
||||
socket.removeListener('data', onData);
|
||||
socket.removeListener('error', onError);
|
||||
clearTimeout(timer);
|
||||
};
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve({
|
||||
proxyInfo: null,
|
||||
remainingData: buffer
|
||||
});
|
||||
}
|
||||
}, timeout);
|
||||
|
||||
const onData = (chunk: Buffer) => {
|
||||
buffer = Buffer.concat([buffer, chunk]);
|
||||
|
||||
// Check if we have enough data
|
||||
if (!buffer.toString('ascii', 0, Math.min(6, buffer.length)).startsWith(this.PROXY_V1_SIGNATURE)) {
|
||||
// Not PROXY protocol
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve({
|
||||
proxyInfo: null,
|
||||
remainingData: buffer
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to parse
|
||||
try {
|
||||
const result = this.parse(buffer);
|
||||
if (result.proxyInfo) {
|
||||
// Successfully parsed
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve(result);
|
||||
} else if (buffer.length > this.MAX_HEADER_LENGTH) {
|
||||
// Header too long
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve({
|
||||
proxyInfo: null,
|
||||
remainingData: buffer
|
||||
});
|
||||
}
|
||||
// Otherwise continue reading
|
||||
} catch (error) {
|
||||
// Parse error
|
||||
logger.log('error', `PROXY protocol parse error: ${error.message}`);
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve({
|
||||
proxyInfo: null,
|
||||
remainingData: buffer
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const onError = (error: Error) => {
|
||||
logger.log('error', `Socket error while reading PROXY protocol: ${error.message}`);
|
||||
resolved = true;
|
||||
cleanup();
|
||||
resolve({
|
||||
proxyInfo: null,
|
||||
remainingData: buffer
|
||||
});
|
||||
};
|
||||
|
||||
socket.on('data', onData);
|
||||
socket.on('error', onError);
|
||||
});
|
||||
}
|
||||
}
|
@ -258,22 +258,61 @@ export function createSocketWithErrorHandler(options: SafeSocketOptions): plugin
|
||||
// Create socket with immediate error handler attachment
|
||||
const socket = new plugins.net.Socket();
|
||||
|
||||
// Track if connected
|
||||
let connected = false;
|
||||
let connectionTimeout: NodeJS.Timeout | null = null;
|
||||
|
||||
// Attach error handler BEFORE connecting to catch immediate errors
|
||||
socket.on('error', (error) => {
|
||||
console.error(`Socket connection error to ${host}:${port}: ${error.message}`);
|
||||
// Clear the connection timeout if it exists
|
||||
if (connectionTimeout) {
|
||||
clearTimeout(connectionTimeout);
|
||||
connectionTimeout = null;
|
||||
}
|
||||
if (onError) {
|
||||
onError(error);
|
||||
}
|
||||
});
|
||||
|
||||
// Attach connect handler if provided
|
||||
if (onConnect) {
|
||||
socket.on('connect', onConnect);
|
||||
}
|
||||
// Attach connect handler
|
||||
const handleConnect = () => {
|
||||
connected = true;
|
||||
// Clear the connection timeout
|
||||
if (connectionTimeout) {
|
||||
clearTimeout(connectionTimeout);
|
||||
connectionTimeout = null;
|
||||
}
|
||||
// Set inactivity timeout if provided (after connection is established)
|
||||
if (timeout) {
|
||||
socket.setTimeout(timeout);
|
||||
}
|
||||
if (onConnect) {
|
||||
onConnect();
|
||||
}
|
||||
};
|
||||
|
||||
// Set timeout if provided
|
||||
socket.on('connect', handleConnect);
|
||||
|
||||
// Implement connection establishment timeout
|
||||
if (timeout) {
|
||||
socket.setTimeout(timeout);
|
||||
connectionTimeout = setTimeout(() => {
|
||||
if (!connected && !socket.destroyed) {
|
||||
// Connection timed out - destroy the socket
|
||||
const error = new Error(`Connection timeout after ${timeout}ms to ${host}:${port}`);
|
||||
(error as any).code = 'ETIMEDOUT';
|
||||
|
||||
console.error(`Socket connection timeout to ${host}:${port} after ${timeout}ms`);
|
||||
|
||||
// Destroy the socket
|
||||
socket.destroy();
|
||||
|
||||
// Call error handler
|
||||
if (onError) {
|
||||
onError(error);
|
||||
}
|
||||
}
|
||||
}, timeout);
|
||||
}
|
||||
|
||||
// Now attempt to connect - any immediate errors will be caught
|
||||
|
@ -30,6 +30,7 @@ import * as smartacmeHandlers from '@push.rocks/smartacme/dist_ts/handlers/index
|
||||
import * as smartlog from '@push.rocks/smartlog';
|
||||
import * as smartlogDestinationLocal from '@push.rocks/smartlog/destination-local';
|
||||
import * as taskbuffer from '@push.rocks/taskbuffer';
|
||||
import * as smartrx from '@push.rocks/smartrx';
|
||||
|
||||
export {
|
||||
lik,
|
||||
@ -45,6 +46,7 @@ export {
|
||||
smartlog,
|
||||
smartlogDestinationLocal,
|
||||
taskbuffer,
|
||||
smartrx,
|
||||
};
|
||||
|
||||
// third party scope
|
||||
|
@ -30,6 +30,9 @@ export class FunctionCache {
|
||||
// Logger
|
||||
private logger: ILogger;
|
||||
|
||||
// Cleanup interval timer
|
||||
private cleanupInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
/**
|
||||
* Creates a new function cache
|
||||
*
|
||||
@ -48,7 +51,12 @@ export class FunctionCache {
|
||||
this.defaultTtl = options.defaultTtl || 5000; // 5 seconds default
|
||||
|
||||
// Start the cache cleanup timer
|
||||
setInterval(() => this.cleanupCache(), 30000); // Cleanup every 30 seconds
|
||||
this.cleanupInterval = setInterval(() => this.cleanupCache(), 30000); // Cleanup every 30 seconds
|
||||
|
||||
// Make sure the interval doesn't keep the process alive
|
||||
if (this.cleanupInterval.unref) {
|
||||
this.cleanupInterval.unref();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -256,4 +264,16 @@ export class FunctionCache {
|
||||
this.portCache.clear();
|
||||
this.logger.info('Function cache cleared');
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the cache and cleanup resources
|
||||
*/
|
||||
public destroy(): void {
|
||||
if (this.cleanupInterval) {
|
||||
clearInterval(this.cleanupInterval);
|
||||
this.cleanupInterval = null;
|
||||
}
|
||||
this.clearCache();
|
||||
this.logger.debug('Function cache destroyed');
|
||||
}
|
||||
}
|
@ -464,6 +464,11 @@ export class HttpProxy implements IMetricsTracker {
|
||||
// Stop WebSocket handler
|
||||
this.webSocketHandler.shutdown();
|
||||
|
||||
// Destroy request handler (cleans up intervals and caches)
|
||||
if (this.requestHandler && typeof this.requestHandler.destroy === 'function') {
|
||||
this.requestHandler.destroy();
|
||||
}
|
||||
|
||||
// Close all tracked sockets
|
||||
const socketCleanupPromises = this.socketMap.getArray().map(socket =>
|
||||
cleanupSocket(socket, 'http-proxy-stop', { immediate: true })
|
||||
|
@ -42,6 +42,9 @@ export class RequestHandler {
|
||||
|
||||
// Security manager for IP filtering, rate limiting, etc.
|
||||
public securityManager: SecurityManager;
|
||||
|
||||
// Rate limit cleanup interval
|
||||
private rateLimitCleanupInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
constructor(
|
||||
private options: IHttpProxyOptions,
|
||||
@ -54,9 +57,14 @@ export class RequestHandler {
|
||||
this.securityManager = new SecurityManager(this.logger);
|
||||
|
||||
// Schedule rate limit cleanup every minute
|
||||
setInterval(() => {
|
||||
this.rateLimitCleanupInterval = setInterval(() => {
|
||||
this.securityManager.cleanupExpiredRateLimits();
|
||||
}, 60000);
|
||||
|
||||
// Make sure the interval doesn't keep the process alive
|
||||
if (this.rateLimitCleanupInterval.unref) {
|
||||
this.rateLimitCleanupInterval.unref();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -741,4 +749,27 @@ export class RequestHandler {
|
||||
stream.end('Not Found: No route configuration for this request');
|
||||
if (this.metricsTracker) this.metricsTracker.incrementFailedRequests();
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources and stop intervals
|
||||
*/
|
||||
public destroy(): void {
|
||||
if (this.rateLimitCleanupInterval) {
|
||||
clearInterval(this.rateLimitCleanupInterval);
|
||||
this.rateLimitCleanupInterval = null;
|
||||
}
|
||||
|
||||
// Close all HTTP/2 sessions
|
||||
for (const [key, session] of this.h2Sessions) {
|
||||
session.close();
|
||||
}
|
||||
this.h2Sessions.clear();
|
||||
|
||||
// Clear function cache if it has a destroy method
|
||||
if (this.functionCache && typeof this.functionCache.destroy === 'function') {
|
||||
this.functionCache.destroy();
|
||||
}
|
||||
|
||||
this.logger.debug('RequestHandler destroyed');
|
||||
}
|
||||
}
|
@ -70,6 +70,7 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
|
||||
const connectionId = this.generateConnectionId();
|
||||
const remoteIP = socket.remoteAddress || '';
|
||||
const remotePort = socket.remotePort || 0;
|
||||
const localPort = socket.localPort || 0;
|
||||
const now = Date.now();
|
||||
|
||||
@ -85,6 +86,7 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
bytesReceived: 0,
|
||||
bytesSent: 0,
|
||||
remoteIP,
|
||||
remotePort,
|
||||
localPort,
|
||||
isTLS: false,
|
||||
tlsHandshakeComplete: false,
|
||||
@ -138,10 +140,10 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
* Start the inactivity check timer
|
||||
*/
|
||||
private startInactivityCheckTimer(): void {
|
||||
// Check every 30 seconds for connections that need inactivity check
|
||||
// Check more frequently (every 10 seconds) to catch zombies and stuck connections faster
|
||||
this.setInterval(() => {
|
||||
this.performOptimizedInactivityCheck();
|
||||
}, 30000);
|
||||
}, 10000);
|
||||
// Note: LifecycleComponent's setInterval already calls unref()
|
||||
}
|
||||
|
||||
@ -192,6 +194,13 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
* Queue a connection for cleanup
|
||||
*/
|
||||
private queueCleanup(connectionId: string): void {
|
||||
// Check if connection is already being processed
|
||||
const record = this.connectionRecords.get(connectionId);
|
||||
if (!record || record.connectionClosed) {
|
||||
// Already cleaned up or doesn't exist, skip
|
||||
return;
|
||||
}
|
||||
|
||||
this.cleanupQueue.add(connectionId);
|
||||
|
||||
// Process immediately if queue is getting large
|
||||
@ -215,9 +224,10 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
}
|
||||
|
||||
const toCleanup = Array.from(this.cleanupQueue).slice(0, this.cleanupBatchSize);
|
||||
this.cleanupQueue.clear();
|
||||
|
||||
// Remove only the items we're processing, not the entire queue!
|
||||
for (const connectionId of toCleanup) {
|
||||
this.cleanupQueue.delete(connectionId);
|
||||
const record = this.connectionRecords.get(connectionId);
|
||||
if (record) {
|
||||
this.cleanupConnection(record, record.incomingTerminationReason || 'normal');
|
||||
@ -454,6 +464,84 @@ export class ConnectionManager extends LifecycleComponent {
|
||||
}
|
||||
}
|
||||
|
||||
// Also check ALL connections for zombie state (destroyed sockets but not cleaned up)
|
||||
// This is critical for proxy chains where sockets can be destroyed without events
|
||||
for (const [connectionId, record] of this.connectionRecords) {
|
||||
if (!record.connectionClosed) {
|
||||
const incomingDestroyed = record.incoming?.destroyed || false;
|
||||
const outgoingDestroyed = record.outgoing?.destroyed || false;
|
||||
|
||||
// Check for zombie connections: both sockets destroyed but connection not cleaned up
|
||||
if (incomingDestroyed && outgoingDestroyed) {
|
||||
logger.log('warn', `Zombie connection detected: ${connectionId} - both sockets destroyed but not cleaned up`, {
|
||||
connectionId,
|
||||
remoteIP: record.remoteIP,
|
||||
age: plugins.prettyMs(now - record.incomingStartTime),
|
||||
component: 'connection-manager'
|
||||
});
|
||||
|
||||
// Clean up immediately
|
||||
this.cleanupConnection(record, 'zombie_cleanup');
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for half-zombie: one socket destroyed
|
||||
if (incomingDestroyed || outgoingDestroyed) {
|
||||
const age = now - record.incomingStartTime;
|
||||
// Use longer grace period for encrypted connections (5 minutes vs 30 seconds)
|
||||
const gracePeriod = record.isTLS ? 300000 : 30000;
|
||||
|
||||
// Also ensure connection is old enough to avoid premature cleanup
|
||||
if (age > gracePeriod && age > 10000) {
|
||||
logger.log('warn', `Half-zombie connection detected: ${connectionId} - ${incomingDestroyed ? 'incoming' : 'outgoing'} destroyed`, {
|
||||
connectionId,
|
||||
remoteIP: record.remoteIP,
|
||||
age: plugins.prettyMs(age),
|
||||
incomingDestroyed,
|
||||
outgoingDestroyed,
|
||||
isTLS: record.isTLS,
|
||||
gracePeriod: plugins.prettyMs(gracePeriod),
|
||||
component: 'connection-manager'
|
||||
});
|
||||
|
||||
// Clean up
|
||||
this.cleanupConnection(record, 'half_zombie_cleanup');
|
||||
}
|
||||
}
|
||||
|
||||
// Check for stuck connections: no data sent back to client
|
||||
if (!record.connectionClosed && record.outgoing && record.bytesReceived > 0 && record.bytesSent === 0) {
|
||||
const age = now - record.incomingStartTime;
|
||||
// Use longer grace period for encrypted connections (5 minutes vs 60 seconds)
|
||||
const stuckThreshold = record.isTLS ? 300000 : 60000;
|
||||
|
||||
// If connection is older than threshold and no data sent back, likely stuck
|
||||
if (age > stuckThreshold) {
|
||||
logger.log('warn', `Stuck connection detected: ${connectionId} - received ${record.bytesReceived} bytes but sent 0 bytes`, {
|
||||
connectionId,
|
||||
remoteIP: record.remoteIP,
|
||||
age: plugins.prettyMs(age),
|
||||
bytesReceived: record.bytesReceived,
|
||||
targetHost: record.targetHost,
|
||||
targetPort: record.targetPort,
|
||||
isTLS: record.isTLS,
|
||||
threshold: plugins.prettyMs(stuckThreshold),
|
||||
component: 'connection-manager'
|
||||
});
|
||||
|
||||
// Set termination reason and increment stats
|
||||
if (record.incomingTerminationReason == null) {
|
||||
record.incomingTerminationReason = 'stuck_no_response';
|
||||
this.incrementTerminationStat('incoming', 'stuck_no_response');
|
||||
}
|
||||
|
||||
// Clean up
|
||||
this.cleanupConnection(record, 'stuck_no_response');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process only connections that need checking
|
||||
for (const connectionId of connectionsToCheck) {
|
||||
const record = this.connectionRecords.get(connectionId);
|
||||
|
289
ts/proxies/smart-proxy/metrics-collector.ts
Normal file
289
ts/proxies/smart-proxy/metrics-collector.ts
Normal file
@ -0,0 +1,289 @@
|
||||
import * as plugins from '../../plugins.js';
|
||||
import type { SmartProxy } from './smart-proxy.js';
|
||||
import type { IProxyStats, IProxyStatsExtended } from './models/metrics-types.js';
|
||||
import { logger } from '../../core/utils/logger.js';
|
||||
|
||||
/**
|
||||
* Collects and computes metrics for SmartProxy on-demand
|
||||
*/
|
||||
export class MetricsCollector implements IProxyStatsExtended {
|
||||
// RPS tracking (the only state we need to maintain)
|
||||
private requestTimestamps: number[] = [];
|
||||
private readonly RPS_WINDOW_SIZE = 60000; // 1 minute window
|
||||
private readonly MAX_TIMESTAMPS = 5000; // Maximum timestamps to keep
|
||||
|
||||
// Optional caching for performance
|
||||
private cachedMetrics: {
|
||||
timestamp: number;
|
||||
connectionsByRoute?: Map<string, number>;
|
||||
connectionsByIP?: Map<string, number>;
|
||||
} = { timestamp: 0 };
|
||||
|
||||
private readonly CACHE_TTL = 1000; // 1 second cache
|
||||
|
||||
// RxJS subscription for connection events
|
||||
private connectionSubscription?: plugins.smartrx.rxjs.Subscription;
|
||||
|
||||
constructor(
|
||||
private smartProxy: SmartProxy
|
||||
) {
|
||||
// Subscription will be set up in start() method
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current number of active connections
|
||||
*/
|
||||
public getActiveConnections(): number {
|
||||
return this.smartProxy.connectionManager.getConnectionCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connection counts grouped by route name
|
||||
*/
|
||||
public getConnectionsByRoute(): Map<string, number> {
|
||||
const now = Date.now();
|
||||
|
||||
// Return cached value if fresh
|
||||
if (this.cachedMetrics.connectionsByRoute &&
|
||||
now - this.cachedMetrics.timestamp < this.CACHE_TTL) {
|
||||
return new Map(this.cachedMetrics.connectionsByRoute);
|
||||
}
|
||||
|
||||
// Compute fresh value
|
||||
const routeCounts = new Map<string, number>();
|
||||
const connections = this.smartProxy.connectionManager.getConnections();
|
||||
|
||||
if (this.smartProxy.settings?.enableDetailedLogging) {
|
||||
logger.log('debug', `MetricsCollector: Computing route connections`, {
|
||||
totalConnections: connections.size,
|
||||
component: 'metrics'
|
||||
});
|
||||
}
|
||||
|
||||
for (const [_, record] of connections) {
|
||||
// Try different ways to get the route name
|
||||
const routeName = (record as any).routeName ||
|
||||
record.routeConfig?.name ||
|
||||
(record.routeConfig as any)?.routeName ||
|
||||
'unknown';
|
||||
|
||||
if (this.smartProxy.settings?.enableDetailedLogging) {
|
||||
logger.log('debug', `MetricsCollector: Connection route info`, {
|
||||
connectionId: record.id,
|
||||
routeName,
|
||||
hasRouteConfig: !!record.routeConfig,
|
||||
routeConfigName: record.routeConfig?.name,
|
||||
routeConfigKeys: record.routeConfig ? Object.keys(record.routeConfig) : [],
|
||||
component: 'metrics'
|
||||
});
|
||||
}
|
||||
|
||||
const current = routeCounts.get(routeName) || 0;
|
||||
routeCounts.set(routeName, current + 1);
|
||||
}
|
||||
|
||||
// Cache and return
|
||||
this.cachedMetrics.connectionsByRoute = routeCounts;
|
||||
this.cachedMetrics.timestamp = now;
|
||||
return new Map(routeCounts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get connection counts grouped by IP address
|
||||
*/
|
||||
public getConnectionsByIP(): Map<string, number> {
|
||||
const now = Date.now();
|
||||
|
||||
// Return cached value if fresh
|
||||
if (this.cachedMetrics.connectionsByIP &&
|
||||
now - this.cachedMetrics.timestamp < this.CACHE_TTL) {
|
||||
return new Map(this.cachedMetrics.connectionsByIP);
|
||||
}
|
||||
|
||||
// Compute fresh value
|
||||
const ipCounts = new Map<string, number>();
|
||||
for (const [_, record] of this.smartProxy.connectionManager.getConnections()) {
|
||||
const ip = record.remoteIP;
|
||||
const current = ipCounts.get(ip) || 0;
|
||||
ipCounts.set(ip, current + 1);
|
||||
}
|
||||
|
||||
// Cache and return
|
||||
this.cachedMetrics.connectionsByIP = ipCounts;
|
||||
this.cachedMetrics.timestamp = now;
|
||||
return new Map(ipCounts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the total number of connections since proxy start
|
||||
*/
|
||||
public getTotalConnections(): number {
|
||||
// Get from termination stats
|
||||
const stats = this.smartProxy.connectionManager.getTerminationStats();
|
||||
let total = this.smartProxy.connectionManager.getConnectionCount(); // Add active connections
|
||||
|
||||
// Add all terminated connections
|
||||
for (const reason in stats.incoming) {
|
||||
total += stats.incoming[reason];
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current requests per second rate
|
||||
*/
|
||||
public getRequestsPerSecond(): number {
|
||||
const now = Date.now();
|
||||
const windowStart = now - this.RPS_WINDOW_SIZE;
|
||||
|
||||
// Clean old timestamps
|
||||
this.requestTimestamps = this.requestTimestamps.filter(ts => ts > windowStart);
|
||||
|
||||
// Calculate RPS based on window
|
||||
const requestsInWindow = this.requestTimestamps.length;
|
||||
return requestsInWindow / (this.RPS_WINDOW_SIZE / 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a new request for RPS tracking
|
||||
*/
|
||||
public recordRequest(): void {
|
||||
const now = Date.now();
|
||||
this.requestTimestamps.push(now);
|
||||
|
||||
// Prevent unbounded growth - clean up more aggressively
|
||||
if (this.requestTimestamps.length > this.MAX_TIMESTAMPS) {
|
||||
// Keep only timestamps within the window
|
||||
const cutoff = now - this.RPS_WINDOW_SIZE;
|
||||
this.requestTimestamps = this.requestTimestamps.filter(ts => ts > cutoff);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get total throughput (bytes transferred)
|
||||
*/
|
||||
public getThroughput(): { bytesIn: number; bytesOut: number } {
|
||||
let bytesIn = 0;
|
||||
let bytesOut = 0;
|
||||
|
||||
// Sum bytes from all active connections
|
||||
for (const [_, record] of this.smartProxy.connectionManager.getConnections()) {
|
||||
bytesIn += record.bytesReceived;
|
||||
bytesOut += record.bytesSent;
|
||||
}
|
||||
|
||||
return { bytesIn, bytesOut };
|
||||
}
|
||||
|
||||
/**
|
||||
* Get throughput rate (bytes per second) for last minute
|
||||
*/
|
||||
public getThroughputRate(): { bytesInPerSec: number; bytesOutPerSec: number } {
|
||||
const now = Date.now();
|
||||
let recentBytesIn = 0;
|
||||
let recentBytesOut = 0;
|
||||
|
||||
// Calculate bytes transferred in last minute from active connections
|
||||
for (const [_, record] of this.smartProxy.connectionManager.getConnections()) {
|
||||
const connectionAge = now - record.incomingStartTime;
|
||||
if (connectionAge < 60000) { // Connection started within last minute
|
||||
recentBytesIn += record.bytesReceived;
|
||||
recentBytesOut += record.bytesSent;
|
||||
} else {
|
||||
// For older connections, estimate rate based on average
|
||||
const rate = connectionAge / 60000;
|
||||
recentBytesIn += record.bytesReceived / rate;
|
||||
recentBytesOut += record.bytesSent / rate;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
bytesInPerSec: Math.round(recentBytesIn / 60),
|
||||
bytesOutPerSec: Math.round(recentBytesOut / 60)
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get top IPs by connection count
|
||||
*/
|
||||
public getTopIPs(limit: number = 10): Array<{ ip: string; connections: number }> {
|
||||
const ipCounts = this.getConnectionsByIP();
|
||||
const sorted = Array.from(ipCounts.entries())
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.slice(0, limit)
|
||||
.map(([ip, connections]) => ({ ip, connections }));
|
||||
|
||||
return sorted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an IP has reached the connection limit
|
||||
*/
|
||||
public isIPBlocked(ip: string, maxConnectionsPerIP: number): boolean {
|
||||
const ipCounts = this.getConnectionsByIP();
|
||||
const currentConnections = ipCounts.get(ip) || 0;
|
||||
return currentConnections >= maxConnectionsPerIP;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old request timestamps
|
||||
*/
|
||||
private cleanupOldRequests(): void {
|
||||
const cutoff = Date.now() - this.RPS_WINDOW_SIZE;
|
||||
this.requestTimestamps = this.requestTimestamps.filter(ts => ts > cutoff);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the metrics collector and set up subscriptions
|
||||
*/
|
||||
public start(): void {
|
||||
if (!this.smartProxy.routeConnectionHandler) {
|
||||
throw new Error('MetricsCollector: RouteConnectionHandler not available');
|
||||
}
|
||||
|
||||
// Subscribe to the newConnectionSubject from RouteConnectionHandler
|
||||
this.connectionSubscription = this.smartProxy.routeConnectionHandler.newConnectionSubject.subscribe({
|
||||
next: (record) => {
|
||||
this.recordRequest();
|
||||
|
||||
// Optional: Log connection details
|
||||
if (this.smartProxy.settings?.enableDetailedLogging) {
|
||||
logger.log('debug', `MetricsCollector: New connection recorded`, {
|
||||
connectionId: record.id,
|
||||
remoteIP: record.remoteIP,
|
||||
routeName: record.routeConfig?.name || 'unknown',
|
||||
component: 'metrics'
|
||||
});
|
||||
}
|
||||
},
|
||||
error: (err) => {
|
||||
logger.log('error', `MetricsCollector: Error in connection subscription`, {
|
||||
error: err.message,
|
||||
component: 'metrics'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
logger.log('debug', 'MetricsCollector started', { component: 'metrics' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the metrics collector and clean up resources
|
||||
*/
|
||||
public stop(): void {
|
||||
if (this.connectionSubscription) {
|
||||
this.connectionSubscription.unsubscribe();
|
||||
this.connectionSubscription = undefined;
|
||||
}
|
||||
|
||||
logger.log('debug', 'MetricsCollector stopped', { component: 'metrics' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Alias for stop() for backward compatibility
|
||||
*/
|
||||
public destroy(): void {
|
||||
this.stop();
|
||||
}
|
||||
}
|
@ -4,3 +4,4 @@
|
||||
// Export everything except IAcmeOptions from interfaces
|
||||
export type { ISmartProxyOptions, IConnectionRecord, TSmartProxyCertProvisionObject } from './interfaces.js';
|
||||
export * from './route-types.js';
|
||||
export * from './metrics-types.js';
|
||||
|
@ -69,6 +69,7 @@ export interface ISmartProxyOptions {
|
||||
maxVersion?: string;
|
||||
|
||||
// Timeout settings
|
||||
connectionTimeout?: number; // Timeout for establishing connection to backend (ms), default: 30000 (30s)
|
||||
initialDataTimeout?: number; // Timeout for initial data/SNI (ms), default: 60000 (60s)
|
||||
socketTimeout?: number; // Socket inactivity timeout (ms), default: 3600000 (1h)
|
||||
inactivityCheckInterval?: number; // How often to check for inactive connections (ms), default: 60000 (60s)
|
||||
@ -151,6 +152,7 @@ export interface IConnectionRecord {
|
||||
bytesReceived: number; // Total bytes received
|
||||
bytesSent: number; // Total bytes sent
|
||||
remoteIP: string; // Remote IP (cached for logging after socket close)
|
||||
remotePort: number; // Remote port (cached for logging after socket close)
|
||||
localPort: number; // Local port (cached for logging)
|
||||
isTLS: boolean; // Whether this connection is a TLS connection
|
||||
tlsHandshakeComplete: boolean; // Whether the TLS handshake is complete
|
||||
|
54
ts/proxies/smart-proxy/models/metrics-types.ts
Normal file
54
ts/proxies/smart-proxy/models/metrics-types.ts
Normal file
@ -0,0 +1,54 @@
|
||||
/**
|
||||
* Interface for proxy statistics and metrics
|
||||
*/
|
||||
export interface IProxyStats {
|
||||
/**
|
||||
* Get the current number of active connections
|
||||
*/
|
||||
getActiveConnections(): number;
|
||||
|
||||
/**
|
||||
* Get connection counts grouped by route name
|
||||
*/
|
||||
getConnectionsByRoute(): Map<string, number>;
|
||||
|
||||
/**
|
||||
* Get connection counts grouped by IP address
|
||||
*/
|
||||
getConnectionsByIP(): Map<string, number>;
|
||||
|
||||
/**
|
||||
* Get the total number of connections since proxy start
|
||||
*/
|
||||
getTotalConnections(): number;
|
||||
|
||||
/**
|
||||
* Get the current requests per second rate
|
||||
*/
|
||||
getRequestsPerSecond(): number;
|
||||
|
||||
/**
|
||||
* Get total throughput (bytes transferred)
|
||||
*/
|
||||
getThroughput(): { bytesIn: number; bytesOut: number };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extended interface for additional metrics helpers
|
||||
*/
|
||||
export interface IProxyStatsExtended extends IProxyStats {
|
||||
/**
|
||||
* Get throughput rate (bytes per second) for last minute
|
||||
*/
|
||||
getThroughputRate(): { bytesInPerSec: number; bytesOutPerSec: number };
|
||||
|
||||
/**
|
||||
* Get top IPs by connection count
|
||||
*/
|
||||
getTopIPs(limit?: number): Array<{ ip: string; connections: number }>;
|
||||
|
||||
/**
|
||||
* Check if an IP has reached the connection limit
|
||||
*/
|
||||
isIPBlocked(ip: string, maxConnectionsPerIP: number): boolean;
|
||||
}
|
@ -250,6 +250,9 @@ export interface IRouteAction {
|
||||
|
||||
// Socket handler function (when type is 'socket-handler')
|
||||
socketHandler?: TSocketHandler;
|
||||
|
||||
// PROXY protocol support
|
||||
sendProxyProtocol?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -10,9 +10,10 @@ import { TlsManager } from './tls-manager.js';
|
||||
import { HttpProxyBridge } from './http-proxy-bridge.js';
|
||||
import { TimeoutManager } from './timeout-manager.js';
|
||||
import { SharedRouteManager as RouteManager } from '../../core/routing/route-manager.js';
|
||||
import { cleanupSocket, createIndependentSocketHandlers, setupSocketHandlers, createSocketWithErrorHandler, setupBidirectionalForwarding } from '../../core/utils/socket-utils.js';
|
||||
import { cleanupSocket, setupSocketHandlers, createSocketWithErrorHandler, setupBidirectionalForwarding } from '../../core/utils/socket-utils.js';
|
||||
import { WrappedSocket } from '../../core/models/wrapped-socket.js';
|
||||
import { getUnderlyingSocket } from '../../core/models/socket-types.js';
|
||||
import { ProxyProtocolParser } from '../../core/utils/proxy-protocol.js';
|
||||
|
||||
/**
|
||||
* Handles new connection processing and setup logic with support for route-based configuration
|
||||
@ -20,8 +21,12 @@ import { getUnderlyingSocket } from '../../core/models/socket-types.js';
|
||||
export class RouteConnectionHandler {
|
||||
private settings: ISmartProxyOptions;
|
||||
|
||||
// Cache for route contexts to avoid recreation
|
||||
private routeContextCache: Map<string, IRouteContext> = new Map();
|
||||
// Note: Route context caching was considered but not implemented
|
||||
// as route contexts are lightweight and should be created fresh
|
||||
// for each connection to ensure accurate context data
|
||||
|
||||
// RxJS Subject for new connections
|
||||
public newConnectionSubject = new plugins.smartrx.rxjs.Subject<IConnectionRecord>();
|
||||
|
||||
constructor(
|
||||
settings: ISmartProxyOptions,
|
||||
@ -34,6 +39,7 @@ export class RouteConnectionHandler {
|
||||
) {
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a route context object for port and host mapping functions
|
||||
@ -109,6 +115,9 @@ export class RouteConnectionHandler {
|
||||
// Connection was rejected due to limit - socket already destroyed by connection manager
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit new connection event
|
||||
this.newConnectionSubject.next(record);
|
||||
const connectionId = record.id;
|
||||
|
||||
// Apply socket optimizations (apply to underlying socket)
|
||||
@ -198,25 +207,29 @@ export class RouteConnectionHandler {
|
||||
setupSocketHandlers(
|
||||
underlyingSocket,
|
||||
(reason) => {
|
||||
// Only cleanup if connection hasn't been fully established
|
||||
// Check if outgoing connection exists and is connected
|
||||
if (!record.outgoing || record.outgoing.readyState !== 'open') {
|
||||
logger.log('debug', `Connection ${connectionId} closed during immediate routing: ${reason}`, {
|
||||
// Always cleanup when incoming socket closes
|
||||
// This prevents connection accumulation in proxy chains
|
||||
logger.log('debug', `Connection ${connectionId} closed during immediate routing: ${reason}`, {
|
||||
connectionId,
|
||||
remoteIP: record.remoteIP,
|
||||
reason,
|
||||
hasOutgoing: !!record.outgoing,
|
||||
outgoingState: record.outgoing?.readyState,
|
||||
component: 'route-handler'
|
||||
});
|
||||
|
||||
// If there's a pending or established outgoing connection, destroy it
|
||||
if (record.outgoing && !record.outgoing.destroyed) {
|
||||
logger.log('debug', `Destroying outgoing connection for ${connectionId}`, {
|
||||
connectionId,
|
||||
remoteIP: record.remoteIP,
|
||||
reason,
|
||||
hasOutgoing: !!record.outgoing,
|
||||
outgoingState: record.outgoing?.readyState,
|
||||
outgoingState: record.outgoing.readyState,
|
||||
component: 'route-handler'
|
||||
});
|
||||
|
||||
// If there's a pending outgoing connection, destroy it
|
||||
if (record.outgoing && !record.outgoing.destroyed) {
|
||||
record.outgoing.destroy();
|
||||
}
|
||||
|
||||
this.connectionManager.cleanupConnection(record, reason);
|
||||
record.outgoing.destroy();
|
||||
}
|
||||
|
||||
// Always cleanup the connection record
|
||||
this.connectionManager.cleanupConnection(record, reason);
|
||||
},
|
||||
undefined, // Use default timeout handler
|
||||
'immediate-route-client'
|
||||
@ -295,17 +308,8 @@ export class RouteConnectionHandler {
|
||||
}
|
||||
});
|
||||
|
||||
// First data handler to capture initial TLS handshake
|
||||
socket.once('data', (chunk: Buffer) => {
|
||||
// Clear the initial timeout since we've received data
|
||||
if (initialTimeout) {
|
||||
clearTimeout(initialTimeout);
|
||||
initialTimeout = null;
|
||||
}
|
||||
|
||||
initialDataReceived = true;
|
||||
record.hasReceivedInitialData = true;
|
||||
|
||||
// Handler for processing initial data (after potential PROXY protocol)
|
||||
const processInitialData = (chunk: Buffer) => {
|
||||
// Block non-TLS connections on port 443
|
||||
if (!this.tlsManager.isTlsHandshake(chunk) && localPort === 443) {
|
||||
logger.log('warn', `Non-TLS connection ${connectionId} detected on port 443. Terminating connection - only TLS traffic is allowed on standard HTTPS port.`, {
|
||||
@ -381,6 +385,67 @@ export class RouteConnectionHandler {
|
||||
|
||||
// Find the appropriate route for this connection
|
||||
this.routeConnection(socket, record, serverName, chunk);
|
||||
};
|
||||
|
||||
// First data handler to capture initial TLS handshake or PROXY protocol
|
||||
socket.once('data', async (chunk: Buffer) => {
|
||||
// Clear the initial timeout since we've received data
|
||||
if (initialTimeout) {
|
||||
clearTimeout(initialTimeout);
|
||||
initialTimeout = null;
|
||||
}
|
||||
|
||||
initialDataReceived = true;
|
||||
record.hasReceivedInitialData = true;
|
||||
|
||||
// Check if this is from a trusted proxy and might have PROXY protocol
|
||||
if (this.settings.proxyIPs?.includes(socket.remoteAddress || '') && this.settings.acceptProxyProtocol !== false) {
|
||||
// Check if this starts with PROXY protocol
|
||||
if (chunk.toString('ascii', 0, Math.min(6, chunk.length)).startsWith('PROXY ')) {
|
||||
try {
|
||||
const parseResult = ProxyProtocolParser.parse(chunk);
|
||||
|
||||
if (parseResult.proxyInfo) {
|
||||
// Update the wrapped socket with real client info (if it's a WrappedSocket)
|
||||
if (socket instanceof WrappedSocket) {
|
||||
socket.setProxyInfo(parseResult.proxyInfo.sourceIP, parseResult.proxyInfo.sourcePort);
|
||||
}
|
||||
|
||||
// Update connection record with real client info
|
||||
record.remoteIP = parseResult.proxyInfo.sourceIP;
|
||||
record.remotePort = parseResult.proxyInfo.sourcePort;
|
||||
|
||||
logger.log('info', `PROXY protocol parsed successfully`, {
|
||||
connectionId,
|
||||
realClientIP: parseResult.proxyInfo.sourceIP,
|
||||
realClientPort: parseResult.proxyInfo.sourcePort,
|
||||
proxyIP: socket.remoteAddress,
|
||||
component: 'route-handler'
|
||||
});
|
||||
|
||||
// Process remaining data if any
|
||||
if (parseResult.remainingData.length > 0) {
|
||||
processInitialData(parseResult.remainingData);
|
||||
} else {
|
||||
// Wait for more data
|
||||
socket.once('data', processInitialData);
|
||||
}
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.log('error', `Failed to parse PROXY protocol from trusted proxy`, {
|
||||
connectionId,
|
||||
error: error.message,
|
||||
proxyIP: socket.remoteAddress,
|
||||
component: 'route-handler'
|
||||
});
|
||||
// Continue processing as normal data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process as normal data (no PROXY protocol)
|
||||
processInitialData(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
@ -583,6 +648,9 @@ export class RouteConnectionHandler {
|
||||
): void {
|
||||
const connectionId = record.id;
|
||||
const action = route.action as IRouteAction;
|
||||
|
||||
// Store the route config in the connection record for metrics and other uses
|
||||
record.routeConfig = route;
|
||||
|
||||
// Check if this route uses NFTables for forwarding
|
||||
if (action.forwardingEngine === 'nftables') {
|
||||
@ -663,8 +731,7 @@ export class RouteConnectionHandler {
|
||||
routeId: route.id,
|
||||
});
|
||||
|
||||
// Cache the context for potential reuse
|
||||
this.routeContextCache.set(connectionId, routeContext);
|
||||
// Note: Route contexts are not cached to ensure fresh data for each connection
|
||||
|
||||
// Determine host using function or static value
|
||||
let targetHost: string | string[];
|
||||
@ -900,6 +967,9 @@ export class RouteConnectionHandler {
|
||||
): Promise<void> {
|
||||
const connectionId = record.id;
|
||||
|
||||
// Store the route config in the connection record for metrics and other uses
|
||||
record.routeConfig = route;
|
||||
|
||||
if (!route.action.socketHandler) {
|
||||
logger.log('error', 'socket-handler action missing socketHandler function', {
|
||||
connectionId,
|
||||
@ -1068,6 +1138,7 @@ export class RouteConnectionHandler {
|
||||
const targetSocket = createSocketWithErrorHandler({
|
||||
port: finalTargetPort,
|
||||
host: finalTargetHost,
|
||||
timeout: this.settings.connectionTimeout || 30000, // Connection timeout (default: 30s)
|
||||
onError: (error) => {
|
||||
// Connection failed - clean up everything immediately
|
||||
// Check if connection record is still valid (client might have disconnected)
|
||||
@ -1119,7 +1190,7 @@ export class RouteConnectionHandler {
|
||||
// Clean up the connection record - this is critical!
|
||||
this.connectionManager.cleanupConnection(record, `connection_failed_${(error as any).code || 'unknown'}`);
|
||||
},
|
||||
onConnect: () => {
|
||||
onConnect: async () => {
|
||||
if (this.settings.enableDetailedLogging) {
|
||||
logger.log('info', `Connection ${connectionId} established to target ${finalTargetHost}:${finalTargetPort}`, {
|
||||
connectionId,
|
||||
@ -1135,6 +1206,56 @@ export class RouteConnectionHandler {
|
||||
// Add the normal error handler for established connections
|
||||
targetSocket.on('error', this.connectionManager.handleError('outgoing', record));
|
||||
|
||||
// Check if we should send PROXY protocol header
|
||||
const shouldSendProxyProtocol = record.routeConfig?.action?.sendProxyProtocol ||
|
||||
this.settings.sendProxyProtocol;
|
||||
|
||||
if (shouldSendProxyProtocol) {
|
||||
try {
|
||||
// Generate PROXY protocol header
|
||||
const proxyInfo = {
|
||||
protocol: (record.remoteIP.includes(':') ? 'TCP6' : 'TCP4') as 'TCP4' | 'TCP6',
|
||||
sourceIP: record.remoteIP,
|
||||
sourcePort: record.remotePort || socket.remotePort || 0,
|
||||
destinationIP: socket.localAddress || '',
|
||||
destinationPort: socket.localPort || 0
|
||||
};
|
||||
|
||||
const proxyHeader = ProxyProtocolParser.generate(proxyInfo);
|
||||
|
||||
// Send PROXY protocol header first
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
targetSocket.write(proxyHeader, (err) => {
|
||||
if (err) {
|
||||
logger.log('error', `Failed to send PROXY protocol header`, {
|
||||
connectionId,
|
||||
error: err.message,
|
||||
component: 'route-handler'
|
||||
});
|
||||
reject(err);
|
||||
} else {
|
||||
logger.log('info', `PROXY protocol header sent to backend`, {
|
||||
connectionId,
|
||||
targetHost: finalTargetHost,
|
||||
targetPort: finalTargetPort,
|
||||
sourceIP: proxyInfo.sourceIP,
|
||||
sourcePort: proxyInfo.sourcePort,
|
||||
component: 'route-handler'
|
||||
});
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (error) {
|
||||
logger.log('error', `Error sending PROXY protocol header`, {
|
||||
connectionId,
|
||||
error: error.message,
|
||||
component: 'route-handler'
|
||||
});
|
||||
// Continue anyway - don't break the connection
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any pending data to target
|
||||
if (record.pendingData.length > 0) {
|
||||
const combinedData = Buffer.concat(record.pendingData);
|
||||
|
@ -27,6 +27,10 @@ import { Mutex } from './utils/mutex.js';
|
||||
// Import ACME state manager
|
||||
import { AcmeStateManager } from './acme-state-manager.js';
|
||||
|
||||
// Import metrics collector
|
||||
import { MetricsCollector } from './metrics-collector.js';
|
||||
import type { IProxyStats } from './models/metrics-types.js';
|
||||
|
||||
/**
|
||||
* SmartProxy - Pure route-based API
|
||||
*
|
||||
@ -47,13 +51,13 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
private isShuttingDown: boolean = false;
|
||||
|
||||
// Component managers
|
||||
private connectionManager: ConnectionManager;
|
||||
public connectionManager: ConnectionManager;
|
||||
private securityManager: SecurityManager;
|
||||
private tlsManager: TlsManager;
|
||||
private httpProxyBridge: HttpProxyBridge;
|
||||
private timeoutManager: TimeoutManager;
|
||||
public routeManager: RouteManager; // Made public for route management
|
||||
private routeConnectionHandler: RouteConnectionHandler;
|
||||
public routeConnectionHandler: RouteConnectionHandler; // Made public for metrics
|
||||
private nftablesManager: NFTablesManager;
|
||||
|
||||
// Certificate manager for ACME and static certificates
|
||||
@ -64,6 +68,9 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
private routeUpdateLock: any = null; // Will be initialized as AsyncMutex
|
||||
private acmeStateManager: AcmeStateManager;
|
||||
|
||||
// Metrics collector
|
||||
private metricsCollector: MetricsCollector;
|
||||
|
||||
// Track port usage across route updates
|
||||
private portUsageMap: Map<number, Set<string>> = new Map();
|
||||
|
||||
@ -204,6 +211,9 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
|
||||
// Initialize ACME state manager
|
||||
this.acmeStateManager = new AcmeStateManager();
|
||||
|
||||
// Initialize metrics collector with reference to this SmartProxy instance
|
||||
this.metricsCollector = new MetricsCollector(this);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -383,6 +393,9 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
logger.log('info', 'Starting certificate provisioning now that ports are ready', { component: 'certificate-manager' });
|
||||
await this.certManager.provisionAllCertificates();
|
||||
}
|
||||
|
||||
// Start the metrics collector now that all components are initialized
|
||||
this.metricsCollector.start();
|
||||
|
||||
// Set up periodic connection logging and inactivity checks
|
||||
this.connectionLogger = setInterval(() => {
|
||||
@ -508,6 +521,9 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
|
||||
// Clear ACME state manager
|
||||
this.acmeStateManager.clear();
|
||||
|
||||
// Stop metrics collector
|
||||
this.metricsCollector.stop();
|
||||
|
||||
logger.log('info', 'SmartProxy shutdown complete.');
|
||||
}
|
||||
@ -905,6 +921,15 @@ export class SmartProxy extends plugins.EventEmitter {
|
||||
return this.certManager.getCertificateStatus(routeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get proxy statistics and metrics
|
||||
*
|
||||
* @returns IProxyStats interface with various metrics methods
|
||||
*/
|
||||
public getStats(): IProxyStats {
|
||||
return this.metricsCollector;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if a domain name is valid for certificate issuance
|
||||
*/
|
||||
|
Reference in New Issue
Block a user