Compare commits

...

10 Commits

Author SHA1 Message Date
3485392979 1.11.2
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-12-15 15:22:45 +00:00
89adae2cff update 2025-12-15 15:22:35 +00:00
3451ab7456 update 2025-12-15 15:14:16 +00:00
bcded1eafa update 2025-12-15 14:34:02 +00:00
9cae46e2fe update 2025-12-15 14:33:58 +00:00
65c1df30da 1.11.1
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-12-15 12:02:16 +00:00
e8f2add812 fix(dependencies): update 2025-12-15 12:02:13 +00:00
8fcc304ee3 v1.11.0
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-12-15 11:36:03 +00:00
69802b46b6 feat(commit): Integrate DualAgentOrchestrator for commit message generation and improve diff/context handling 2025-12-15 11:36:03 +00:00
e500455557 1.10.2
Some checks failed
Default (tags) / security (push) Failing after 0s
Default (tags) / test (push) Failing after 0s
Default (tags) / release (push) Has been skipped
Default (tags) / metadata (push) Has been skipped
2025-12-13 22:50:26 +00:00
28 changed files with 509 additions and 4599 deletions

View File

@@ -1,5 +1,16 @@
# Changelog
## 2025-12-15 - 1.11.0 - feat(commit)
Integrate DualAgentOrchestrator for commit message generation and improve diff/context handling
- Add @push.rocks/smartagent dependency and export it from plugins
- Use DualAgentOrchestrator to generate and guardian-validate commit messages
- Use DualAgentOrchestrator for changelog generation with guardian validation
- Switch commit flow to TaskContextFactory and DiffProcessor for token-efficient context
- Expose getOpenaiToken() and wire orchestrator with the project OpenAI token
- Enhance iterative context builder and context components to better manage token budgets and sampling
- Update npmextra.json with release config for @git.zone/cli and reference local smartagent package in package.json
## 2025-12-02 - 1.10.0 - feat(diff-processor)
Improve diff sampling and file prioritization: increase inclusion thresholds, expand sampled context, and boost priority for interface/type and entry-point files

View File

@@ -31,5 +31,14 @@
},
"tsdoc": {
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
},
"@git.zone/cli": {
"release": {
"registries": [
"https://verdaccio.lossless.digital",
"https://registry.npmjs.org"
],
"accessLevel": "public"
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@git.zone/tsdoc",
"version": "1.10.1",
"version": "1.11.2",
"private": false,
"description": "A comprehensive TypeScript documentation tool that leverages AI to generate and enhance project documentation, including dynamic README creation, API docs via TypeDoc, and smart commit message generation.",
"type": "module",
@@ -19,20 +19,21 @@
"buildDocs": "tsdoc"
},
"devDependencies": {
"@git.zone/tsbuild": "^3.1.2",
"@git.zone/tsbuild": "^4.0.2",
"@git.zone/tsrun": "^2.0.1",
"@git.zone/tstest": "^3.1.3",
"@types/node": "^25.0.1"
"@types/node": "^25.0.2"
},
"dependencies": {
"@git.zone/tspublish": "^1.10.3",
"@push.rocks/early": "^4.0.4",
"@push.rocks/npmextra": "^5.3.3",
"@push.rocks/qenv": "^6.1.3",
"@push.rocks/smartagent": "file:../../push.rocks/smartagent",
"@push.rocks/smartai": "^0.8.0",
"@push.rocks/smartcli": "^4.0.19",
"@push.rocks/smartdelay": "^3.0.5",
"@push.rocks/smartfile": "^13.1.0",
"@push.rocks/smartfile": "^13.1.2",
"@push.rocks/smartfs": "^1.2.0",
"@push.rocks/smartgit": "^3.3.1",
"@push.rocks/smartinteract": "^2.0.16",
@@ -41,7 +42,6 @@
"@push.rocks/smartpath": "^6.0.0",
"@push.rocks/smartshell": "^3.3.0",
"@push.rocks/smarttime": "^4.1.1",
"gpt-tokenizer": "^3.4.0",
"typedoc": "^0.28.15",
"typescript": "^5.9.3"
},

150
pnpm-lock.yaml generated
View File

@@ -20,6 +20,9 @@ importers:
'@push.rocks/qenv':
specifier: ^6.1.3
version: 6.1.3
'@push.rocks/smartagent':
specifier: file:../../push.rocks/smartagent
version: file:../../push.rocks/smartagent(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
'@push.rocks/smartai':
specifier: ^0.8.0
version: 0.8.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
@@ -30,8 +33,8 @@ importers:
specifier: ^3.0.5
version: 3.0.5
'@push.rocks/smartfile':
specifier: ^13.1.0
version: 13.1.0
specifier: ^13.1.2
version: 13.1.2
'@push.rocks/smartfs':
specifier: ^1.2.0
version: 1.2.0
@@ -56,9 +59,6 @@ importers:
'@push.rocks/smarttime':
specifier: ^4.1.1
version: 4.1.1
gpt-tokenizer:
specifier: ^3.4.0
version: 3.4.0
typedoc:
specifier: ^0.28.15
version: 0.28.15(typescript@5.9.3)
@@ -67,8 +67,8 @@ importers:
version: 5.9.3
devDependencies:
'@git.zone/tsbuild':
specifier: ^3.1.2
version: 3.1.2
specifier: ^4.0.2
version: 4.0.2
'@git.zone/tsrun':
specifier: ^2.0.1
version: 2.0.1
@@ -76,8 +76,8 @@ importers:
specifier: ^3.1.3
version: 3.1.3(@aws-sdk/credential-providers@3.808.0)(socks@2.8.7)(typescript@5.9.3)
'@types/node':
specifier: ^25.0.1
version: 25.0.1
specifier: ^25.0.2
version: 25.0.2
packages:
@@ -697,8 +697,8 @@ packages:
'@gerrit0/mini-shiki@3.20.0':
resolution: {integrity: sha512-Wa57i+bMpK6PGJZ1f2myxo3iO+K/kZikcyvH8NIqNNZhQUbDav7V9LQmWOXhf946mz5c1NZ19WMsGYiDKTryzQ==}
'@git.zone/tsbuild@3.1.2':
resolution: {integrity: sha512-K0u840Qo0WEhvcpAtktvdBX6KEXjelU32o820WzcK7dMA7dd2YV+mPOEYfbmWLcdtFJkrjkigQq5fpLhTN4oKQ==}
'@git.zone/tsbuild@4.0.2':
resolution: {integrity: sha512-LcRlFnDbcUe53Pdoob585iXq9TAT90TyEaYl/wml/etFoPeBX+oQLm6GryejUPXrUP7i1opyTonadkQN1OyXOA==}
hasBin: true
'@git.zone/tsbundle@2.6.3':
@@ -1155,12 +1155,18 @@ packages:
'@push.rocks/qenv@6.1.3':
resolution: {integrity: sha512-+z2hsAU/7CIgpYLFqvda8cn9rUBMHqLdQLjsFfRn5jPoD7dJ5rFlpkbhfM4Ws8mHMniwWaxGKo+q/YBhtzRBLg==}
'@push.rocks/smartagent@file:../../push.rocks/smartagent':
resolution: {directory: ../../push.rocks/smartagent, type: directory}
'@push.rocks/smartai@0.8.0':
resolution: {integrity: sha512-guzi28meUDc3mydC8kpoA+4pzExRQqygXYFDD4qQSWPpIRHQ7qhpeNqJzrrGezT1yOH5Gb9taPEGwT56hI+nwQ==}
'@push.rocks/smartarchive@4.2.2':
resolution: {integrity: sha512-6EpqbKU32D6Gcqsc9+Tn1dOCU5HoTlrqqs/7IdUr9Tirp9Ngtptkapca1Fw/D0kVJ7SSw3kG/miAYnuPMZLEoA==}
'@push.rocks/smartarchive@5.0.1':
resolution: {integrity: sha512-x4bie9IIdL9BZqBZLc8Pemp8xZOJGa6mXSVgKJRL4/Rw+E5N4rVHjQOYGRV75nC2mAMJh9GIbixuxLnWjj77ag==}
'@push.rocks/smartarray@1.1.0':
resolution: {integrity: sha512-b5YgBmUdglOJH8zeUf2ZWdPCoqySgwvkycRi2BhA9zVZHkpASh39Ej0q0fxFJetlUVyYqGfVoMVjbVrLFfFV7g==}
@@ -1198,6 +1204,9 @@ packages:
'@push.rocks/smartdelay@3.0.5':
resolution: {integrity: sha512-mUuI7kj2f7ztjpic96FvRIlf2RsKBa5arw81AHNsndbxO6asRcxuWL8dTVxouEIK8YsBUlj0AsrCkHhMbLQdHw==}
'@push.rocks/smartdeno@1.2.0':
resolution: {integrity: sha512-6S1plCaMUVOZiRSflfoz9Fqk9phACCuKmc7Z6SfTvfl+p9VcPUmewKgaa/0QiLOpiI6ksfxdfmkS5Rw5HpYeIA==}
'@push.rocks/smartdns@7.6.1':
resolution: {integrity: sha512-nnP5+A2GOt0WsHrYhtKERmjdEHUchc+QbCCBEqlyeQTn+mNfx2WZvKVI1DFRJt8lamvzxP6Hr/BSe3WHdh4Snw==}
@@ -1228,8 +1237,8 @@ packages:
'@push.rocks/smartfile@11.2.7':
resolution: {integrity: sha512-8Yp7/sAgPpWJBHohV92ogHWKzRomI5MEbSG6b5W2n18tqwfAmjMed0rQvsvGrSBlnEWCKgoOrYIIZbLO61+J0Q==}
'@push.rocks/smartfile@13.1.0':
resolution: {integrity: sha512-bSjH9vHl6l1nbe/gcSi4PcutFcTHUCVkMuQGGTVtn1cOgCuOXIHV04uhOXrZoKvlcSxxoiq8THolFt65lqn7cg==}
'@push.rocks/smartfile@13.1.2':
resolution: {integrity: sha512-DaEhwmnGEpX4coeeToaw4cZe3pNBhH7CY1iGr+d3pIXihozREvzzAR9/0i2r7bUXXL5+Lgy8YYIk5ZS+fwxMKA==}
'@push.rocks/smartfs@1.2.0':
resolution: {integrity: sha512-1R47jJZwX869z7DYgKeAZKTU1SbGnM7W/ZmgsI7AkQQhiascNqY3/gF4V5kIprmuf1WhpRbCbZyum8s7J1LDdg==}
@@ -2158,11 +2167,11 @@ packages:
'@types/node@16.9.1':
resolution: {integrity: sha512-QpLcX9ZSsq3YYUUnD3nFDY8H7wctAhQj/TFKL8Ya8v5fMm3CFXxo8zStsLAl780ltoYoo1WvKUVGBQK+1ifr7g==}
'@types/node@22.19.2':
resolution: {integrity: sha512-LPM2G3Syo1GLzXLGJAKdqoU35XvrWzGJ21/7sgZTUpbkBaOasTj8tjwn6w+hCkqaa1TfJ/w67rJSwYItlJ2mYw==}
'@types/node@22.19.3':
resolution: {integrity: sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==}
'@types/node@25.0.1':
resolution: {integrity: sha512-czWPzKIAXucn9PtsttxmumiQ9N0ok9FrBwgRWrwmVLlp86BrMExzvXRLFYRJ+Ex3g6yqj+KuaxfX1JTgV2lpfg==}
'@types/node@25.0.2':
resolution: {integrity: sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==}
'@types/ping@0.4.4':
resolution: {integrity: sha512-ifvo6w2f5eJYlXm+HiVx67iJe8WZp87sfa683nlqED5Vnt9Z93onkokNoWqOG21EaE8fMxyKPobE+mkPEyxsdw==}
@@ -3020,9 +3029,6 @@ packages:
resolution: {integrity: sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==}
engines: {node: '>=14.16'}
gpt-tokenizer@3.4.0:
resolution: {integrity: sha512-wxFLnhIXTDjYebd9A9pGl3e31ZpSypbpIJSOswbgop5jLte/AsZVDvjlbEuVFlsqZixVKqbcoNmRlFDf6pz/UQ==}
graceful-fs@4.2.10:
resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==}
@@ -6150,13 +6156,14 @@ snapshots:
'@shikijs/types': 3.20.0
'@shikijs/vscode-textmate': 10.0.2
'@git.zone/tsbuild@3.1.2':
'@git.zone/tsbuild@4.0.2':
dependencies:
'@git.zone/tspublish': 1.10.3
'@push.rocks/early': 4.0.4
'@push.rocks/smartcli': 4.0.19
'@push.rocks/smartdelay': 3.0.5
'@push.rocks/smartfile': 11.2.7
'@push.rocks/smartfile': 13.1.2
'@push.rocks/smartfs': 1.2.0
'@push.rocks/smartlog': 3.1.10
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartpromise': 4.2.3
@@ -6203,7 +6210,7 @@ snapshots:
'@git.zone/tsrun@2.0.1':
dependencies:
'@push.rocks/smartfile': 13.1.0
'@push.rocks/smartfile': 13.1.2
'@push.rocks/smartshell': 3.3.0
tsx: 4.21.0
@@ -6365,7 +6372,7 @@ snapshots:
'@inquirer/figures': 1.0.10
'@inquirer/type': 2.0.0
'@types/mute-stream': 0.0.4
'@types/node': 22.19.2
'@types/node': 22.19.3
'@types/wrap-ansi': 3.0.0
ansi-escapes: 4.3.2
cli-width: 4.1.0
@@ -6913,6 +6920,25 @@ snapshots:
'@push.rocks/smartlog': 3.1.10
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartagent@file:../../push.rocks/smartagent(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
dependencies:
'@push.rocks/smartai': 0.8.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)
'@push.rocks/smartbrowser': 2.0.8(typescript@5.9.3)
'@push.rocks/smartdeno': 1.2.0
'@push.rocks/smartfs': 1.2.0
'@push.rocks/smartrequest': 5.0.1
'@push.rocks/smartshell': 3.3.0
transitivePeerDependencies:
- aws-crt
- bare-abort-controller
- bare-buffer
- bufferutil
- supports-color
- typescript
- utf-8-validate
- ws
- zod
'@push.rocks/smartai@0.8.0(typescript@5.9.3)(ws@8.18.3)(zod@3.25.76)':
dependencies:
'@anthropic-ai/sdk': 0.65.0(zod@3.25.76)
@@ -6954,6 +6980,24 @@ snapshots:
transitivePeerDependencies:
- supports-color
'@push.rocks/smartarchive@5.0.1':
dependencies:
'@push.rocks/smartdelay': 3.0.5
'@push.rocks/smartfile': 13.1.2
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartrequest': 4.4.2
'@push.rocks/smartrx': 3.0.10
'@push.rocks/smartstream': 3.2.5
'@push.rocks/smartunique': 3.0.9
'@push.rocks/smarturl': 3.1.0
'@types/tar-stream': 3.1.4
fflate: 0.8.2
file-type: 21.0.0
tar-stream: 3.1.7
transitivePeerDependencies:
- supports-color
'@push.rocks/smartarray@1.1.0': {}
'@push.rocks/smartbrowser@2.0.8(typescript@5.9.3)':
@@ -7074,6 +7118,16 @@ snapshots:
dependencies:
'@push.rocks/smartpromise': 4.2.3
'@push.rocks/smartdeno@1.2.0':
dependencies:
'@push.rocks/smartarchive': 5.0.1
'@push.rocks/smartfs': 1.2.0
'@push.rocks/smartpath': 6.0.0
'@push.rocks/smartshell': 3.3.0
'@push.rocks/smartunique': 3.0.9
transitivePeerDependencies:
- supports-color
'@push.rocks/smartdns@7.6.1':
dependencies:
'@push.rocks/smartdelay': 3.0.5
@@ -7145,7 +7199,7 @@ snapshots:
glob: 11.1.0
js-yaml: 4.1.1
'@push.rocks/smartfile@13.1.0':
'@push.rocks/smartfile@13.1.2':
dependencies:
'@push.rocks/lik': 6.2.2
'@push.rocks/smartdelay': 3.0.5
@@ -8549,27 +8603,27 @@ snapshots:
'@types/bn.js@5.2.0':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/body-parser@1.19.6':
dependencies:
'@types/connect': 3.4.38
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/buffer-json@2.0.3': {}
'@types/clean-css@4.2.11':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
source-map: 0.6.1
'@types/connect@3.4.38':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/cors@2.8.19':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/debug@4.1.12':
dependencies:
@@ -8581,7 +8635,7 @@ snapshots:
'@types/dns-packet@5.6.5':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/elliptic@6.4.18':
dependencies:
@@ -8589,7 +8643,7 @@ snapshots:
'@types/express-serve-static-core@5.1.0':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/qs': 6.14.0
'@types/range-parser': 1.2.7
'@types/send': 1.2.1
@@ -8609,7 +8663,7 @@ snapshots:
'@types/fs-extra@11.0.4':
dependencies:
'@types/jsonfile': 6.1.4
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/hast@3.0.4':
dependencies:
@@ -8631,7 +8685,7 @@ snapshots:
'@types/jsonfile@6.1.4':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/mdast@4.0.4':
dependencies:
@@ -8647,19 +8701,19 @@ snapshots:
'@types/mute-stream@0.0.4':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/node-forge@1.3.14':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/node@16.9.1': {}
'@types/node@22.19.2':
'@types/node@22.19.3':
dependencies:
undici-types: 6.21.0
'@types/node@25.0.1':
'@types/node@25.0.2':
dependencies:
undici-types: 7.16.0
@@ -8678,32 +8732,32 @@ snapshots:
'@types/send@0.17.6':
dependencies:
'@types/mime': 1.3.5
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/send@1.2.1':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/serve-static@1.15.10':
dependencies:
'@types/http-errors': 2.0.5
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/send': 0.17.6
'@types/serve-static@2.2.0':
dependencies:
'@types/http-errors': 2.0.5
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/symbol-tree@3.2.5': {}
'@types/tar-stream@3.1.4':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/through2@2.0.41':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/trusted-types@2.0.7': {}
@@ -8729,11 +8783,11 @@ snapshots:
'@types/ws@8.18.1':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
'@types/yauzl@2.10.3':
dependencies:
'@types/node': 25.0.1
'@types/node': 25.0.2
optional: true
'@ungap/structured-clone@1.3.0': {}
@@ -9203,7 +9257,7 @@ snapshots:
engine.io@6.6.4:
dependencies:
'@types/cors': 2.8.19
'@types/node': 25.0.1
'@types/node': 25.0.2
accepts: 1.3.8
base64id: 2.0.0
cookie: 0.7.2
@@ -9627,8 +9681,6 @@ snapshots:
p-cancelable: 3.0.0
responselike: 3.0.0
gpt-tokenizer@3.4.0: {}
graceful-fs@4.2.10: {}
graceful-fs@4.2.11: {}

View File

@@ -1,465 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as path from 'path';
import { ContextAnalyzer } from '../ts/context/context-analyzer.js';
import type { IFileMetadata } from '../ts/context/types.js';
const testProjectRoot = process.cwd();
tap.test('ContextAnalyzer should create instance with default weights', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
expect(analyzer).toBeInstanceOf(ContextAnalyzer);
});
tap.test('ContextAnalyzer should create instance with custom weights', async () => {
const analyzer = new ContextAnalyzer(
testProjectRoot,
{
dependencyWeight: 0.5,
relevanceWeight: 0.3,
efficiencyWeight: 0.1,
recencyWeight: 0.1
}
);
expect(analyzer).toBeInstanceOf(ContextAnalyzer);
});
tap.test('ContextAnalyzer.analyze should return analysis result with files', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 5000,
mtime: Date.now(),
estimatedTokens: 1250
},
{
path: path.join(testProjectRoot, 'ts/context/enhanced-context.ts'),
relativePath: 'ts/context/enhanced-context.ts',
size: 10000,
mtime: Date.now(),
estimatedTokens: 2500
}
];
const result = await analyzer.analyze(metadata, 'readme');
expect(result.taskType).toEqual('readme');
expect(result.files.length).toEqual(2);
expect(result.totalFiles).toEqual(2);
expect(result.analysisDuration).toBeGreaterThan(0);
expect(result.dependencyGraph).toBeDefined();
});
tap.test('ContextAnalyzer.analyze should assign importance scores to files', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
expect(result.files[0].importanceScore).toBeGreaterThanOrEqual(0);
expect(result.files[0].importanceScore).toBeLessThanOrEqual(1);
});
tap.test('ContextAnalyzer.analyze should sort files by importance score', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
},
{
path: path.join(testProjectRoot, 'test/test.basic.node.ts'),
relativePath: 'test/test.basic.node.ts',
size: 2000,
mtime: Date.now(),
estimatedTokens: 500
}
];
const result = await analyzer.analyze(metadata, 'readme');
// Files should be sorted by importance (highest first)
for (let i = 0; i < result.files.length - 1; i++) {
expect(result.files[i].importanceScore).toBeGreaterThanOrEqual(
result.files[i + 1].importanceScore
);
}
});
tap.test('ContextAnalyzer.analyze should assign tiers based on scores', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/index.ts'),
relativePath: 'ts/index.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
const file = result.files[0];
expect(['essential', 'important', 'optional', 'excluded']).toContain(file.tier);
});
tap.test('ContextAnalyzer should prioritize index.ts files for README task', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/index.ts'),
relativePath: 'ts/index.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
},
{
path: path.join(testProjectRoot, 'ts/some-helper.ts'),
relativePath: 'ts/some-helper.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
// index.ts should have higher relevance score
const indexFile = result.files.find(f => f.path.includes('index.ts'));
const helperFile = result.files.find(f => f.path.includes('some-helper.ts'));
if (indexFile && helperFile) {
expect(indexFile.relevanceScore).toBeGreaterThan(helperFile.relevanceScore);
}
});
tap.test('ContextAnalyzer should deprioritize test files for README task', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
},
{
path: path.join(testProjectRoot, 'test/test.basic.node.ts'),
relativePath: 'test/test.basic.node.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
// Source file should have higher relevance than test file
const sourceFile = result.files.find(f => f.path.includes('ts/context/types.ts'));
const testFile = result.files.find(f => f.path.includes('test/test.basic.node.ts'));
if (sourceFile && testFile) {
expect(sourceFile.relevanceScore).toBeGreaterThan(testFile.relevanceScore);
}
});
tap.test('ContextAnalyzer should prioritize changed files for commit task', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const changedFile = path.join(testProjectRoot, 'ts/context/types.ts');
const unchangedFile = path.join(testProjectRoot, 'ts/index.ts');
const metadata: IFileMetadata[] = [
{
path: changedFile,
relativePath: 'ts/context/types.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
},
{
path: unchangedFile,
relativePath: 'ts/index.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'commit', [changedFile]);
const changed = result.files.find(f => f.path === changedFile);
const unchanged = result.files.find(f => f.path === unchangedFile);
if (changed && unchanged) {
// Changed file should have recency score of 1.0
expect(changed.recencyScore).toEqual(1.0);
// Unchanged file should have recency score of 0
expect(unchanged.recencyScore).toEqual(0);
}
});
tap.test('ContextAnalyzer should calculate efficiency scores', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 5000, // Optimal size
mtime: Date.now(),
estimatedTokens: 1250
},
{
path: path.join(testProjectRoot, 'ts/very-large-file.ts'),
relativePath: 'ts/very-large-file.ts',
size: 50000, // Too large
mtime: Date.now(),
estimatedTokens: 12500
}
];
const result = await analyzer.analyze(metadata, 'readme');
// Optimal size file should have better efficiency score
const optimalFile = result.files.find(f => f.path.includes('types.ts'));
const largeFile = result.files.find(f => f.path.includes('very-large-file.ts'));
if (optimalFile && largeFile) {
expect(optimalFile.efficiencyScore).toBeGreaterThan(largeFile.efficiencyScore);
}
});
tap.test('ContextAnalyzer should build dependency graph', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/enhanced-context.ts'),
relativePath: 'ts/context/enhanced-context.ts',
size: 10000,
mtime: Date.now(),
estimatedTokens: 2500
},
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 5000,
mtime: Date.now(),
estimatedTokens: 1250
}
];
const result = await analyzer.analyze(metadata, 'readme');
expect(result.dependencyGraph.size).toBeGreaterThan(0);
// Check that each file has dependency info
for (const meta of metadata) {
const deps = result.dependencyGraph.get(meta.path);
expect(deps).toBeDefined();
expect(deps!.path).toEqual(meta.path);
expect(deps!.imports).toBeDefined();
expect(deps!.importedBy).toBeDefined();
expect(deps!.centrality).toBeGreaterThanOrEqual(0);
}
});
tap.test('ContextAnalyzer should calculate centrality scores', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 5000,
mtime: Date.now(),
estimatedTokens: 1250
},
{
path: path.join(testProjectRoot, 'ts/context/enhanced-context.ts'),
relativePath: 'ts/context/enhanced-context.ts',
size: 10000,
mtime: Date.now(),
estimatedTokens: 2500
}
];
const result = await analyzer.analyze(metadata, 'readme');
// All centrality scores should be between 0 and 1
for (const [, deps] of result.dependencyGraph) {
expect(deps.centrality).toBeGreaterThanOrEqual(0);
expect(deps.centrality).toBeLessThanOrEqual(1);
}
});
tap.test('ContextAnalyzer should assign higher centrality to highly imported files', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
// types.ts is likely imported by many files
const typesPath = path.join(testProjectRoot, 'ts/context/types.ts');
// A test file is likely imported by fewer files
const testPath = path.join(testProjectRoot, 'test/test.basic.node.ts');
const metadata: IFileMetadata[] = [
{
path: typesPath,
relativePath: 'ts/context/types.ts',
size: 5000,
mtime: Date.now(),
estimatedTokens: 1250
},
{
path: testPath,
relativePath: 'test/test.basic.node.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
const typesDeps = result.dependencyGraph.get(typesPath);
const testDeps = result.dependencyGraph.get(testPath);
if (typesDeps && testDeps) {
// types.ts should generally have higher centrality due to being imported more
expect(typesDeps.centrality).toBeGreaterThanOrEqual(0);
expect(testDeps.centrality).toBeGreaterThanOrEqual(0);
}
});
tap.test('ContextAnalyzer should provide reason for scoring', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/index.ts'),
relativePath: 'ts/index.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
expect(result.files[0].reason).toBeDefined();
expect(result.files[0].reason!.length).toBeGreaterThan(0);
});
tap.test('ContextAnalyzer should handle empty metadata array', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const result = await analyzer.analyze([], 'readme');
expect(result.files.length).toEqual(0);
expect(result.totalFiles).toEqual(0);
expect(result.dependencyGraph.size).toEqual(0);
});
tap.test('ContextAnalyzer should respect custom tier configuration', async () => {
const analyzer = new ContextAnalyzer(
testProjectRoot,
{},
{
essential: { minScore: 0.9, trimLevel: 'none' },
important: { minScore: 0.7, trimLevel: 'light' },
optional: { minScore: 0.5, trimLevel: 'aggressive' }
}
);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}
];
const result = await analyzer.analyze(metadata, 'readme');
// Should use custom tier thresholds
const file = result.files[0];
expect(['essential', 'important', 'optional', 'excluded']).toContain(file.tier);
});
tap.test('ContextAnalyzer should calculate combined importance score from all factors', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot, {
dependencyWeight: 0.25,
relevanceWeight: 0.25,
efficiencyWeight: 0.25,
recencyWeight: 0.25
});
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'ts/context/types.ts'),
relativePath: 'ts/context/types.ts',
size: 5000,
mtime: Date.now(),
estimatedTokens: 1250
}
];
const result = await analyzer.analyze(metadata, 'readme');
const file = result.files[0];
// Importance score should be weighted sum of all factors
// With equal weights (0.25 each), importance should be average of all scores
const expectedImportance =
(file.relevanceScore * 0.25) +
(file.centralityScore * 0.25) +
(file.efficiencyScore * 0.25) +
(file.recencyScore * 0.25);
expect(file.importanceScore).toBeCloseTo(expectedImportance, 2);
});
tap.test('ContextAnalyzer should complete analysis within reasonable time', async () => {
const analyzer = new ContextAnalyzer(testProjectRoot);
const metadata: IFileMetadata[] = Array.from({ length: 10 }, (_, i) => ({
path: path.join(testProjectRoot, `ts/file${i}.ts`),
relativePath: `ts/file${i}.ts`,
size: 3000,
mtime: Date.now(),
estimatedTokens: 750
}));
const startTime = Date.now();
const result = await analyzer.analyze(metadata, 'readme');
const endTime = Date.now();
const duration = endTime - startTime;
// Analysis duration should be recorded (can be 0 for fast operations)
expect(result.analysisDuration).toBeGreaterThanOrEqual(0);
expect(duration).toBeLessThan(10000); // Should complete within 10 seconds
});
export default tap.start();

View File

@@ -1,465 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as path from 'path';
import * as fs from 'fs';
import { ContextCache } from '../ts/context/context-cache.js';
import type { ICacheEntry } from '../ts/context/types.js';
const testProjectRoot = process.cwd();
const testCacheDir = path.join(testProjectRoot, '.nogit', 'test-cache');
// Helper to clean up test cache directory
async function cleanupTestCache() {
try {
await fs.promises.rm(testCacheDir, { recursive: true, force: true });
} catch (error) {
// Ignore if directory doesn't exist
}
}
tap.test('ContextCache should create instance with default config', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
expect(cache).toBeInstanceOf(ContextCache);
await cleanupTestCache();
});
tap.test('ContextCache.init should create cache directory', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
// Check that cache directory was created
const exists = await fs.promises.access(testCacheDir).then(() => true).catch(() => false);
expect(exists).toEqual(true);
await cleanupTestCache();
});
tap.test('ContextCache.set should store cache entry', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const testPath = path.join(testProjectRoot, 'package.json');
// Get actual file mtime for validation to work
const stats = await fs.promises.stat(testPath);
const fileMtime = Math.floor(stats.mtimeMs);
const entry: ICacheEntry = {
path: testPath,
contents: 'test content',
tokenCount: 100,
mtime: fileMtime,
cachedAt: Date.now()
};
await cache.set(entry);
const retrieved = await cache.get(testPath);
expect(retrieved).toBeDefined();
expect(retrieved!.contents).toEqual('test content');
expect(retrieved!.tokenCount).toEqual(100);
await cleanupTestCache();
});
tap.test('ContextCache.get should return null for non-existent entry', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const retrieved = await cache.get('/non/existent/path.ts');
expect(retrieved).toBeNull();
await cleanupTestCache();
});
tap.test('ContextCache.get should invalidate expired entries', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true,
ttl: 1 // 1 second TTL
});
await cache.init();
const testPath = path.join(testProjectRoot, 'test-file.ts');
const entry: ICacheEntry = {
path: testPath,
contents: 'test content',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now() - 2000 // Cached 2 seconds ago (expired)
};
await cache.set(entry);
// Wait a bit to ensure expiration logic runs
await new Promise(resolve => setTimeout(resolve, 100));
const retrieved = await cache.get(testPath);
expect(retrieved).toBeNull(); // Should be expired
await cleanupTestCache();
});
tap.test('ContextCache.get should invalidate entries when file mtime changes', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const testPath = path.join(testProjectRoot, 'package.json');
const stats = await fs.promises.stat(testPath);
const oldMtime = Math.floor(stats.mtimeMs);
const entry: ICacheEntry = {
path: testPath,
contents: 'test content',
tokenCount: 100,
mtime: oldMtime - 1000, // Old mtime (file has changed)
cachedAt: Date.now()
};
await cache.set(entry);
const retrieved = await cache.get(testPath);
expect(retrieved).toBeNull(); // Should be invalidated due to mtime mismatch
await cleanupTestCache();
});
tap.test('ContextCache.has should check if file is cached and valid', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const testPath = path.join(testProjectRoot, 'package.json');
const stats = await fs.promises.stat(testPath);
const entry: ICacheEntry = {
path: testPath,
contents: 'test content',
tokenCount: 100,
mtime: Math.floor(stats.mtimeMs),
cachedAt: Date.now()
};
await cache.set(entry);
const hasIt = await cache.has(testPath);
expect(hasIt).toEqual(true);
const doesNotHaveIt = await cache.has('/non/existent/path.ts');
expect(doesNotHaveIt).toEqual(false);
await cleanupTestCache();
});
tap.test('ContextCache.setMany should store multiple entries', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const entries: ICacheEntry[] = [
{
path: '/test/file1.ts',
contents: 'content 1',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now()
},
{
path: '/test/file2.ts',
contents: 'content 2',
tokenCount: 200,
mtime: Date.now(),
cachedAt: Date.now()
}
];
await cache.setMany(entries);
const stats = cache.getStats();
expect(stats.entries).toBeGreaterThanOrEqual(2);
await cleanupTestCache();
});
tap.test('ContextCache.getStats should return cache statistics', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const entry: ICacheEntry = {
path: '/test/file.ts',
contents: 'test content with some length',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now()
};
await cache.set(entry);
const stats = cache.getStats();
expect(stats.entries).toEqual(1);
expect(stats.totalSize).toBeGreaterThan(0);
expect(stats.oldestEntry).toBeDefined();
expect(stats.newestEntry).toBeDefined();
await cleanupTestCache();
});
tap.test('ContextCache.clear should clear all entries', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const entry: ICacheEntry = {
path: '/test/file.ts',
contents: 'test content',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now()
};
await cache.set(entry);
expect(cache.getStats().entries).toEqual(1);
await cache.clear();
expect(cache.getStats().entries).toEqual(0);
await cleanupTestCache();
});
tap.test('ContextCache.clearPaths should clear specific entries', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const entries: ICacheEntry[] = [
{
path: '/test/file1.ts',
contents: 'content 1',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now()
},
{
path: '/test/file2.ts',
contents: 'content 2',
tokenCount: 200,
mtime: Date.now(),
cachedAt: Date.now()
}
];
await cache.setMany(entries);
expect(cache.getStats().entries).toEqual(2);
await cache.clearPaths(['/test/file1.ts']);
expect(cache.getStats().entries).toEqual(1);
await cleanupTestCache();
});
tap.test('ContextCache should enforce max size by evicting oldest entries', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true,
maxSize: 0.001 // Very small: 0.001 MB = 1KB
});
await cache.init();
// Add entries that exceed the max size
const largeContent = 'x'.repeat(500); // 500 bytes
const entries: ICacheEntry[] = [
{
path: '/test/file1.ts',
contents: largeContent,
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now() - 3000 // Oldest
},
{
path: '/test/file2.ts',
contents: largeContent,
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now() - 2000
},
{
path: '/test/file3.ts',
contents: largeContent,
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now() - 1000 // Newest
}
];
await cache.setMany(entries);
const stats = cache.getStats();
// Should have evicted oldest entries to stay under size limit
expect(stats.totalSize).toBeLessThanOrEqual(1024); // 1KB
await cleanupTestCache();
});
tap.test('ContextCache should not cache when disabled', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: false
});
await cache.init();
const entry: ICacheEntry = {
path: '/test/file.ts',
contents: 'test content',
tokenCount: 100,
mtime: Date.now(),
cachedAt: Date.now()
};
await cache.set(entry);
const retrieved = await cache.get('/test/file.ts');
expect(retrieved).toBeNull();
await cleanupTestCache();
});
tap.test('ContextCache should persist to disk and reload', async () => {
await cleanupTestCache();
// Create first cache instance and add entry
const cache1 = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache1.init();
// Use a real file that exists so validation passes
const testPath = path.join(testProjectRoot, 'package.json');
const stats = await fs.promises.stat(testPath);
const fileMtime = Math.floor(stats.mtimeMs);
const entry: ICacheEntry = {
path: testPath,
contents: 'persistent content',
tokenCount: 150,
mtime: fileMtime,
cachedAt: Date.now()
};
await cache1.set(entry);
// Wait for persist
await new Promise(resolve => setTimeout(resolve, 500));
// Create second cache instance (should reload from disk)
const cache2 = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache2.init();
const cacheStats = cache2.getStats();
expect(cacheStats.entries).toBeGreaterThan(0);
await cleanupTestCache();
});
tap.test('ContextCache should handle invalid cache index gracefully', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
// Create cache dir manually
await fs.promises.mkdir(testCacheDir, { recursive: true });
// Write invalid JSON to cache index
const cacheIndexPath = path.join(testCacheDir, 'index.json');
await fs.promises.writeFile(cacheIndexPath, 'invalid json {', 'utf-8');
// Should not throw, should just start with empty cache
await cache.init();
const stats = cache.getStats();
expect(stats.entries).toEqual(0);
await cleanupTestCache();
});
tap.test('ContextCache should return proper stats for empty cache', async () => {
await cleanupTestCache();
const cache = new ContextCache(testProjectRoot, {
directory: testCacheDir,
enabled: true
});
await cache.init();
const stats = cache.getStats();
expect(stats.entries).toEqual(0);
expect(stats.totalSize).toEqual(0);
expect(stats.oldestEntry).toBeNull();
expect(stats.newestEntry).toBeNull();
await cleanupTestCache();
});
export default tap.start();

View File

@@ -1,5 +1,5 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import { DiffProcessor } from '../ts/context/diff-processor.js';
import { DiffProcessor } from '../ts/classes.diffprocessor.js';
// Sample diff strings for testing
const createSmallDiff = (filepath: string, addedLines = 5, removedLines = 3): string => {

View File

@@ -1,147 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as path from 'path';
import { IterativeContextBuilder } from '../ts/context/iterative-context-builder.js';
import type { IIterativeConfig, TaskType } from '../ts/context/types.js';
import * as qenv from '@push.rocks/qenv';
// Test project directory
const testProjectRoot = path.join(process.cwd());
// Helper to check if OPENAI_TOKEN is available
async function hasOpenAIToken(): Promise<boolean> {
try {
const qenvInstance = new qenv.Qenv();
const token = await qenvInstance.getEnvVarOnDemand('OPENAI_TOKEN');
return !!token;
} catch (error) {
return false;
}
}
tap.test('IterativeContextBuilder should create instance with default config', async () => {
const builder = new IterativeContextBuilder(testProjectRoot);
expect(builder).toBeInstanceOf(IterativeContextBuilder);
});
tap.test('IterativeContextBuilder should create instance with custom config', async () => {
const customConfig: Partial<IIterativeConfig> = {
maxIterations: 3,
firstPassFileLimit: 5,
subsequentPassFileLimit: 3,
temperature: 0.5,
model: 'gpt-4',
};
const builder = new IterativeContextBuilder(testProjectRoot, customConfig);
expect(builder).toBeInstanceOf(IterativeContextBuilder);
});
tap.test('IterativeContextBuilder should initialize successfully', async () => {
if (!(await hasOpenAIToken())) {
console.log('⚠️ Skipping initialization test - OPENAI_TOKEN not available');
return;
}
const builder = new IterativeContextBuilder(testProjectRoot);
await builder.initialize();
// If we get here without error, initialization succeeded
expect(true).toEqual(true);
});
tap.test('IterativeContextBuilder should build context iteratively for readme task', async () => {
if (!(await hasOpenAIToken())) {
console.log('⚠️ Skipping iterative build test - OPENAI_TOKEN not available');
return;
}
const builder = new IterativeContextBuilder(testProjectRoot, {
maxIterations: 2, // Limit iterations for testing
firstPassFileLimit: 3,
subsequentPassFileLimit: 2,
});
await builder.initialize();
const result = await builder.buildContextIteratively('readme');
// Verify result structure
expect(result).toBeTypeOf('object');
expect(result.context).toBeTypeOf('string');
expect(result.context.length).toBeGreaterThan(0);
expect(result.tokenCount).toBeTypeOf('number');
expect(result.tokenCount).toBeGreaterThan(0);
expect(result.includedFiles).toBeInstanceOf(Array);
expect(result.includedFiles.length).toBeGreaterThan(0);
expect(result.iterationCount).toBeTypeOf('number');
expect(result.iterationCount).toBeGreaterThan(0);
expect(result.iterationCount).toBeLessThanOrEqual(2);
expect(result.iterations).toBeInstanceOf(Array);
expect(result.iterations.length).toEqual(result.iterationCount);
expect(result.apiCallCount).toBeTypeOf('number');
expect(result.apiCallCount).toBeGreaterThan(0);
expect(result.totalDuration).toBeTypeOf('number');
expect(result.totalDuration).toBeGreaterThan(0);
// Verify iteration structure
for (const iteration of result.iterations) {
expect(iteration.iteration).toBeTypeOf('number');
expect(iteration.filesLoaded).toBeInstanceOf(Array);
expect(iteration.tokensUsed).toBeTypeOf('number');
expect(iteration.totalTokensUsed).toBeTypeOf('number');
expect(iteration.decision).toBeTypeOf('object');
expect(iteration.duration).toBeTypeOf('number');
}
console.log(`✅ Iterative context build completed:`);
console.log(` Iterations: ${result.iterationCount}`);
console.log(` Files: ${result.includedFiles.length}`);
console.log(` Tokens: ${result.tokenCount}`);
console.log(` API calls: ${result.apiCallCount}`);
console.log(` Duration: ${(result.totalDuration / 1000).toFixed(2)}s`);
});
tap.test('IterativeContextBuilder should respect token budget', async () => {
if (!(await hasOpenAIToken())) {
console.log('⚠️ Skipping token budget test - OPENAI_TOKEN not available');
return;
}
const builder = new IterativeContextBuilder(testProjectRoot, {
maxIterations: 5,
});
await builder.initialize();
const result = await builder.buildContextIteratively('description');
// Token count should not exceed budget significantly (allow 5% margin for safety)
const configManager = (await import('../ts/context/config-manager.js')).ConfigManager.getInstance();
const maxTokens = configManager.getMaxTokens();
expect(result.tokenCount).toBeLessThanOrEqual(maxTokens * 1.05);
console.log(`✅ Token budget respected: ${result.tokenCount}/${maxTokens}`);
});
tap.test('IterativeContextBuilder should work with different task types', async () => {
if (!(await hasOpenAIToken())) {
console.log('⚠️ Skipping task types test - OPENAI_TOKEN not available');
return;
}
const taskTypes: TaskType[] = ['readme', 'description', 'commit'];
for (const taskType of taskTypes) {
const builder = new IterativeContextBuilder(testProjectRoot, {
maxIterations: 2,
firstPassFileLimit: 2,
});
await builder.initialize();
const result = await builder.buildContextIteratively(taskType);
expect(result.includedFiles.length).toBeGreaterThan(0);
console.log(`${taskType}: ${result.includedFiles.length} files, ${result.tokenCount} tokens`);
}
});
export default tap.start();

View File

@@ -1,243 +0,0 @@
import { tap, expect } from '@git.zone/tstest/tapbundle';
import * as path from 'path';
import { LazyFileLoader } from '../ts/context/lazy-file-loader.js';
import type { IFileMetadata } from '../ts/context/types.js';
const testProjectRoot = process.cwd();
tap.test('LazyFileLoader should create instance with project root', async () => {
const loader = new LazyFileLoader(testProjectRoot);
expect(loader).toBeInstanceOf(LazyFileLoader);
});
tap.test('LazyFileLoader.getMetadata should return file metadata without loading contents', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const packageJsonPath = path.join(testProjectRoot, 'package.json');
const metadata = await loader.getMetadata(packageJsonPath);
expect(metadata.path).toEqual(packageJsonPath);
expect(metadata.relativePath).toEqual('package.json');
expect(metadata.size).toBeGreaterThan(0);
expect(metadata.mtime).toBeGreaterThan(0);
expect(metadata.estimatedTokens).toBeGreaterThan(0);
// Rough estimate: size / 4 (with reasonable tolerance)
expect(metadata.estimatedTokens).toBeGreaterThan(metadata.size / 5);
expect(metadata.estimatedTokens).toBeLessThan(metadata.size / 3);
});
tap.test('LazyFileLoader.getMetadata should cache metadata for same file', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const packageJsonPath = path.join(testProjectRoot, 'package.json');
const metadata1 = await loader.getMetadata(packageJsonPath);
const metadata2 = await loader.getMetadata(packageJsonPath);
// Should return identical metadata from cache
expect(metadata1.mtime).toEqual(metadata2.mtime);
expect(metadata1.size).toEqual(metadata2.size);
expect(metadata1.estimatedTokens).toEqual(metadata2.estimatedTokens);
});
tap.test('LazyFileLoader.scanFiles should scan TypeScript files', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const metadata = await loader.scanFiles(['ts/context/types.ts']);
expect(metadata.length).toBeGreaterThan(0);
const typesFile = metadata.find(m => m.relativePath.includes('types.ts'));
expect(typesFile).toBeDefined();
expect(typesFile!.size).toBeGreaterThan(0);
expect(typesFile!.estimatedTokens).toBeGreaterThan(0);
});
tap.test('LazyFileLoader.scanFiles should handle multiple globs', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const metadata = await loader.scanFiles([
'package.json',
'readme.md'
]);
expect(metadata.length).toBeGreaterThanOrEqual(2);
const hasPackageJson = metadata.some(m => m.relativePath === 'package.json');
const hasReadme = metadata.some(m => m.relativePath.toLowerCase() === 'readme.md');
expect(hasPackageJson).toEqual(true);
expect(hasReadme).toEqual(true);
});
tap.test('LazyFileLoader.loadFile should load file with actual token count', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const packageJsonPath = path.join(testProjectRoot, 'package.json');
const tokenizer = (content: string) => Math.ceil(content.length / 4);
const fileInfo = await loader.loadFile(packageJsonPath, tokenizer);
expect(fileInfo.path).toEqual(packageJsonPath);
expect(fileInfo.contents).toBeDefined();
expect(fileInfo.contents.length).toBeGreaterThan(0);
expect(fileInfo.tokenCount).toBeGreaterThan(0);
expect(fileInfo.relativePath).toEqual('package.json');
});
tap.test('LazyFileLoader.loadFiles should load multiple files in parallel', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'package.json'),
relativePath: 'package.json',
size: 100,
mtime: Date.now(),
estimatedTokens: 25
},
{
path: path.join(testProjectRoot, 'readme.md'),
relativePath: 'readme.md',
size: 200,
mtime: Date.now(),
estimatedTokens: 50
}
];
const tokenizer = (content: string) => Math.ceil(content.length / 4);
const startTime = Date.now();
const files = await loader.loadFiles(metadata, tokenizer);
const endTime = Date.now();
expect(files.length).toEqual(2);
expect(files[0].contents).toBeDefined();
expect(files[1].contents).toBeDefined();
// Should be fast (parallel loading)
expect(endTime - startTime).toBeLessThan(5000); // 5 seconds max
});
tap.test('LazyFileLoader.updateImportanceScores should update cached metadata', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const packageJsonPath = path.join(testProjectRoot, 'package.json');
// Get initial metadata
await loader.getMetadata(packageJsonPath);
// Update importance scores
const scores = new Map<string, number>();
scores.set(packageJsonPath, 0.95);
loader.updateImportanceScores(scores);
// Check cached metadata has updated score
const cached = loader.getCachedMetadata();
const packageJsonMeta = cached.find(m => m.path === packageJsonPath);
expect(packageJsonMeta).toBeDefined();
expect(packageJsonMeta!.importanceScore).toEqual(0.95);
});
tap.test('LazyFileLoader.getTotalEstimatedTokens should sum all cached metadata tokens', async () => {
const loader = new LazyFileLoader(testProjectRoot);
// Scan some files
await loader.scanFiles(['package.json', 'readme.md']);
const totalTokens = loader.getTotalEstimatedTokens();
expect(totalTokens).toBeGreaterThan(0);
});
tap.test('LazyFileLoader.clearCache should clear metadata cache', async () => {
const loader = new LazyFileLoader(testProjectRoot);
// Scan files to populate cache
await loader.scanFiles(['package.json']);
expect(loader.getCachedMetadata().length).toBeGreaterThan(0);
// Clear cache
loader.clearCache();
expect(loader.getCachedMetadata().length).toEqual(0);
});
tap.test('LazyFileLoader.getCachedMetadata should return all cached entries', async () => {
const loader = new LazyFileLoader(testProjectRoot);
// Scan files
await loader.scanFiles(['package.json', 'readme.md']);
const cached = loader.getCachedMetadata();
expect(cached.length).toBeGreaterThanOrEqual(2);
expect(cached.every(m => m.path && m.size && m.estimatedTokens)).toEqual(true);
});
tap.test('LazyFileLoader should handle non-existent files gracefully', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const nonExistentPath = path.join(testProjectRoot, 'this-file-does-not-exist.ts');
try {
await loader.getMetadata(nonExistentPath);
expect(false).toEqual(true); // Should not reach here
} catch (error) {
expect(error).toBeDefined();
}
});
tap.test('LazyFileLoader.loadFiles should filter out failed file loads', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const metadata: IFileMetadata[] = [
{
path: path.join(testProjectRoot, 'package.json'),
relativePath: 'package.json',
size: 100,
mtime: Date.now(),
estimatedTokens: 25
},
{
path: path.join(testProjectRoot, 'non-existent-file.txt'),
relativePath: 'non-existent-file.txt',
size: 100,
mtime: Date.now(),
estimatedTokens: 25
}
];
const tokenizer = (content: string) => Math.ceil(content.length / 4);
const files = await loader.loadFiles(metadata, tokenizer);
// Should only include the successfully loaded file
expect(files.length).toEqual(1);
expect(files[0].relativePath).toEqual('package.json');
});
tap.test('LazyFileLoader should handle glob patterns for TypeScript source files', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const metadata = await loader.scanFiles(['ts/context/*.ts']);
expect(metadata.length).toBeGreaterThan(0);
// Should find multiple context files
const hasEnhancedContext = metadata.some(m => m.relativePath.includes('enhanced-context.ts'));
const hasTypes = metadata.some(m => m.relativePath.includes('types.ts'));
expect(hasEnhancedContext).toEqual(true);
expect(hasTypes).toEqual(true);
});
tap.test('LazyFileLoader should estimate tokens reasonably accurately', async () => {
const loader = new LazyFileLoader(testProjectRoot);
const packageJsonPath = path.join(testProjectRoot, 'package.json');
const metadata = await loader.getMetadata(packageJsonPath);
const tokenizer = (content: string) => Math.ceil(content.length / 4);
const fileInfo = await loader.loadFile(packageJsonPath, tokenizer);
// Estimated tokens should be close to actual (within reasonable range)
const difference = Math.abs(metadata.estimatedTokens - fileInfo.tokenCount);
const percentDiff = (difference / fileInfo.tokenCount) * 100;
// Should be within 20% accuracy (since it's just an estimate)
expect(percentDiff).toBeLessThan(20);
});
export default tap.start();

View File

@@ -3,6 +3,6 @@
*/
export const commitinfo = {
name: '@git.zone/tsdoc',
version: '1.10.0',
version: '1.11.0',
description: 'A comprehensive TypeScript documentation tool that leverages AI to generate and enhance project documentation, including dynamic README creation, API docs via TypeDoc, and smart commit message generation.'
}

View File

@@ -1,7 +1,8 @@
import * as plugins from '../plugins.js';
import { AiDoc } from '../classes.aidoc.js';
import { ProjectContext } from './projectcontext.js';
import { DiffProcessor } from '../context/diff-processor.js';
import { DiffProcessor } from '../classes.diffprocessor.js';
import { logger } from '../logging.js';
export interface INextCommitObject {
recommendedNextVersionLevel: 'fix' | 'feat' | 'BREAKING CHANGE'; // the recommended next version level of the project
@@ -114,35 +115,40 @@ export class Commit {
processedDiffString = 'No changes.';
}
// Use the new TaskContextFactory for optimized context
const taskContextFactory = new (await import('../context/index.js')).TaskContextFactory(
this.projectDir,
this.aiDocsRef.openaiInstance
);
await taskContextFactory.initialize();
// Use DualAgentOrchestrator for commit message generation
// Note: No filesystem tool needed - the diff already contains all change information
const commitOrchestrator = new plugins.smartagent.DualAgentOrchestrator({
smartAiInstance: this.aiDocsRef.smartAiInstance,
defaultProvider: 'openai',
logPrefix: '[Commit]',
onProgress: (event) => logger.log(event.logLevel, event.logMessage),
guardianPolicyPrompt: `
You validate commit messages for semantic versioning compliance.
// Generate context specifically for commit task
const contextResult = await taskContextFactory.createContextForCommit(processedDiffString);
// Get the optimized context string
let contextString = contextResult.context;
// Log token usage statistics
console.log(`Token usage - Context: ${contextResult.tokenCount}, Files: ${contextResult.includedFiles.length + contextResult.trimmedFiles.length}, Savings: ${contextResult.tokenSavings}`);
// Check for token overflow against model limits
const MODEL_TOKEN_LIMIT = 200000; // o4-mini
if (contextResult.tokenCount > MODEL_TOKEN_LIMIT * 0.9) {
console.log(`⚠️ Warning: Context size (${contextResult.tokenCount} tokens) is close to or exceeds model limit (${MODEL_TOKEN_LIMIT} tokens).`);
console.log(`The model may not be able to process all information effectively.`);
}
APPROVE if:
- Version level (fix/feat/BREAKING CHANGE) matches the scope of changes in the diff
- Commit message is clear, professional, and follows conventional commit conventions
- No personal information, licensing details, or AI mentions (Claude/Codex) included
- JSON structure is valid with all required fields
- Scope accurately reflects the changed modules/files
let result = await this.aiDocsRef.openaiInstance.chat({
systemMessage: `
REJECT with specific feedback if:
- Version level doesn't match the scope of changes (e.g., "feat" for a typo fix should be "fix")
- Message is vague, unprofessional, or contains sensitive information
- JSON is malformed or missing required fields
`,
});
await commitOrchestrator.start();
const commitTaskPrompt = `
You create a commit message for a git commit.
The commit message should be based on the files in the project.
You should not include any licensing information.
You should not include any personal information.
Project directory: ${this.projectDir}
Analyze the git diff below to understand what changed and generate a commit message.
You should not include any licensing information or personal information.
Never mention CLAUDE code, or codex.
Important: Answer only in valid JSON.
@@ -151,26 +157,31 @@ Your answer should be parseable with JSON.parse() without modifying anything.
Here is the structure of the JSON you should return:
interface {
recommendedNextVersionLevel: 'fix' | 'feat' | 'BREAKING CHANGE'; // the recommended next version level of the project
recommendedNextVersionScope: string; // the recommended scope name of the next version, like "core" or "cli", or specific class names.
recommendedNextVersionMessage: string; // the commit message. Don't put fix() feat() or BREAKING CHANGE in the message. Please just the message itself.
recommendedNextVersionLevel: 'fix' | 'feat' | 'BREAKING CHANGE'; // the recommended next version level
recommendedNextVersionScope: string; // scope name like "core", "cli", or specific class names
recommendedNextVersionMessage: string; // the commit message (don't include fix/feat prefix)
recommendedNextVersionDetails: string[]; // detailed bullet points for the changelog
recommendedNextVersion: string; // the recommended next version of the project, x.x.x
recommendedNextVersion: string; // the recommended next version x.x.x
}
For the recommendedNextVersionDetails, please only add a detail entries to the array if it has an obvious value to the reader.
For recommendedNextVersionDetails, only add entries that have obvious value to the reader.
You are being given the files of the project. You should use them to create the commit message.
Also you are given a diff.
Never mention CLAUDE code, or codex.
`,
messageHistory: [],
userMessage: contextString,
});
Here is the git diff showing what changed:
${processedDiffString}
Generate the commit message based on these changes.
`;
const commitResult = await commitOrchestrator.run(commitTaskPrompt);
await commitOrchestrator.stop();
if (!commitResult.success) {
throw new Error(`Commit message generation failed: ${commitResult.status}`);
}
// console.log(result.message);
const resultObject: INextCommitObject = JSON.parse(
result.message.replace('```json', '').replace('```', '')
commitResult.result.replace('```json', '').replace('```', '')
);
const previousChangelogPath = plugins.path.join(this.projectDir, 'changelog.md');
@@ -183,9 +194,33 @@ Never mention CLAUDE code, or codex.
// lets build the changelog based on that
const commitMessages = await gitRepo.getAllCommitMessages();
console.log(JSON.stringify(commitMessages, null, 2));
let result2 = await this.aiDocsRef.openaiInstance.chat({
messageHistory: [],
systemMessage: `
// Use DualAgentOrchestrator for changelog generation with Guardian validation
const changelogOrchestrator = new plugins.smartagent.DualAgentOrchestrator({
smartAiInstance: this.aiDocsRef.smartAiInstance,
defaultProvider: 'openai',
logPrefix: '[Changelog]',
onProgress: (event) => logger.log(event.logLevel, event.logMessage),
guardianPolicyPrompt: `
You validate changelog generation.
APPROVE if:
- Changelog follows proper markdown format with ## headers for each version
- Entries are chronologically ordered (newest first)
- Version ranges for trivial commits are properly summarized
- No duplicate or empty entries
- Format matches: ## yyyy-mm-dd - x.x.x - scope
REJECT with feedback if:
- Markdown formatting is incorrect
- Entries are not meaningful or helpful
- Dates or versions are malformed
`,
});
await changelogOrchestrator.start();
const changelogTaskPrompt = `
You are building a changelog.md file for the project.
Omit commits and versions that lack relevant changes, but make sure to mention them as a range with a summarizing message instead.
@@ -199,17 +234,23 @@ A changelog entry should look like this:
You are given:
* the commit messages of the project
Only return the changelog file, so it can be written directly to changelog.md`,
userMessage: `
Only return the changelog file content, so it can be written directly to changelog.md.
Here are the commit messages:
${JSON.stringify(commitMessages, null, 2)}
`,
});
`;
const changelogResult = await changelogOrchestrator.run(changelogTaskPrompt);
await changelogOrchestrator.stop();
if (!changelogResult.success) {
throw new Error(`Changelog generation failed: ${changelogResult.status}`);
}
previousChangelog = plugins.smartfileFactory.fromString(
previousChangelogPath,
result2.message.replaceAll('```markdown', '').replaceAll('```', ''),
changelogResult.result.replaceAll('```markdown', '').replaceAll('```', ''),
'utf8'
);
}

View File

@@ -1,6 +1,7 @@
import type { AiDoc } from '../classes.aidoc.js';
import * as plugins from '../plugins.js';
import { ProjectContext } from './projectcontext.js';
import { logger } from '../logging.js';
interface IDescriptionInterface {
description: string;
@@ -18,50 +19,87 @@ export class Description {
}
public async build() {
// Use the new TaskContextFactory for optimized context
const taskContextFactory = new (await import('../context/index.js')).TaskContextFactory(
this.projectDir,
this.aiDocsRef.openaiInstance
);
await taskContextFactory.initialize();
// Generate context specifically for description task
const contextResult = await taskContextFactory.createContextForDescription();
const contextString = contextResult.context;
// Log token usage statistics
console.log(`Token usage - Context: ${contextResult.tokenCount}, Files: ${contextResult.includedFiles.length + contextResult.trimmedFiles.length}, Savings: ${contextResult.tokenSavings}`);
// Use DualAgentOrchestrator with filesystem tool for agent-driven exploration
const descriptionOrchestrator = new plugins.smartagent.DualAgentOrchestrator({
smartAiInstance: this.aiDocsRef.smartAiInstance,
defaultProvider: 'openai',
maxIterations: 15,
maxResultChars: 10000, // Limit tool output to prevent token explosion
maxHistoryMessages: 15, // Limit history window
logPrefix: '[Description]',
onProgress: (event) => logger.log(event.logLevel, event.logMessage),
guardianPolicyPrompt: `
You validate description generation tool calls and outputs.
let result = await this.aiDocsRef.openaiInstance.chat({
systemMessage: `
You create a json adhering the following interface:
{
description: string; // a sensible short, one sentence description of the project
keywords: string[]; // an array of tags that describe the project
}
APPROVE tool calls for:
- Reading package.json, npmextra.json, or source files in the ts/ directory
- Listing directory contents to understand project structure
- Using tree to see project structure
The description should be based on what you understand from the project's files.
The keywords should be based on use cases you see from the files.
Don't be cheap about the way you think.
REJECT tool calls for:
- Reading files outside the project directory
- Writing, deleting, or modifying any files
- Any destructive operations
Important: Answer only in valid JSON.
You answer should be parseable with JSON.parse() without modifying anything.
For final output, APPROVE if:
- JSON is valid and parseable
- Description is a clear, concise one-sentence summary
- Keywords are relevant to the project's use cases
- Both description and keywords fields are present
Don't wrap the JSON in three ticks json!!!
`,
messageHistory: [],
userMessage: contextString,
REJECT final output if:
- JSON is malformed or wrapped in markdown code blocks
- Description is too long or vague
- Keywords are irrelevant or generic
`,
});
console.log(result.message);
// Register scoped filesystem tool for agent exploration
descriptionOrchestrator.registerScopedFilesystemTool(this.projectDir);
await descriptionOrchestrator.start();
const descriptionTaskPrompt = `
You create a project description and keywords for an npm package.
PROJECT DIRECTORY: ${this.projectDir}
Use the filesystem tool to explore the project and understand what it does:
1. First, use tree to see the project structure
2. Read package.json to understand the package name and current description
3. Read npmextra.json if it exists for additional metadata
4. Read key source files in ts/ directory to understand the implementation
Then generate a description and keywords based on your exploration.
Your FINAL response must be valid JSON adhering to this interface:
{
description: string; // a sensible short, one sentence description of the project
keywords: string[]; // an array of tags that describe the project based on use cases
}
Important: Answer only in valid JSON.
Your answer should be parseable with JSON.parse() without modifying anything.
Don't wrap the JSON in \`\`\`json\`\`\` - just return the raw JSON object.
`;
const descriptionResult = await descriptionOrchestrator.run(descriptionTaskPrompt);
await descriptionOrchestrator.stop();
if (!descriptionResult.success) {
throw new Error(`Description generation failed: ${descriptionResult.status}`);
}
console.log(descriptionResult.result);
const resultObject: IDescriptionInterface = JSON.parse(
result.message.replace('```json', '').replace('```', ''),
descriptionResult.result.replace('```json', '').replace('```', ''),
);
// Create a standard ProjectContext instance for file operations
// Use ProjectContext to get file handles for writing
const projectContext = new ProjectContext(this.projectDir);
const files = await projectContext.gatherFiles();
// Update npmextra.json
const npmextraJson = files.smartfilesNpmextraJSON;
const npmextraJsonContent = JSON.parse(npmextraJson.contents.toString());
@@ -71,7 +109,7 @@ Don't wrap the JSON in three ticks json!!!
npmextraJson.contents = Buffer.from(JSON.stringify(npmextraJsonContent, null, 2));
await npmextraJson.write();
// do the same with packageJson
// Update package.json
const packageJson = files.smartfilePackageJSON;
const packageJsonContent = JSON.parse(packageJson.contents.toString());
packageJsonContent.description = resultObject.description;
@@ -82,6 +120,6 @@ Don't wrap the JSON in three ticks json!!!
console.log(`\n======================\n`);
console.log(JSON.stringify(resultObject, null, 2));
console.log(`\n======================\n`);
return result.message;
return descriptionResult.result;
}
}

View File

@@ -64,21 +64,14 @@ ${smartfile.contents.toString()}
}
/**
* Calculate the token count for a string using the GPT tokenizer
* @param text The text to count tokens for
* @param model The model to use for token counting (default: gpt-3.5-turbo)
* @returns The number of tokens in the text
* Estimate token count for a string
* Uses a rough estimate of 4 characters per token
* @param text The text to estimate tokens for
* @returns Estimated number of tokens
*/
public countTokens(text: string, model: string = 'gpt-3.5-turbo'): number {
try {
// Use the gpt-tokenizer library to count tokens
const tokens = plugins.gptTokenizer.encode(text);
return tokens.length;
} catch (error) {
console.error('Error counting tokens:', error);
// Provide a rough estimate (4 chars per token) if tokenization fails
return Math.ceil(text.length / 4);
}
public countTokens(text: string): number {
// Rough estimate: ~4 characters per token for English text
return Math.ceil(text.length / 4);
}
private async buildContext(dirArg: string) {

View File

@@ -17,21 +17,7 @@ export class Readme {
public async build() {
let finalReadmeString = ``;
// Use the new TaskContextFactory for optimized context
const taskContextFactory = new (await import('../context/index.js')).TaskContextFactory(
this.projectDir,
this.aiDocsRef.openaiInstance
);
await taskContextFactory.initialize();
// Generate context specifically for readme task
const contextResult = await taskContextFactory.createContextForReadme();
const contextString = contextResult.context;
// Log token usage statistics
console.log(`Token usage - Context: ${contextResult.tokenCount}, Files: ${contextResult.includedFiles.length + contextResult.trimmedFiles.length}, Savings: ${contextResult.tokenSavings}`);
// lets first check legal before introducung any cost
// First check legal info before introducing any cost
const projectContext = new ProjectContext(this.projectDir);
const npmExtraJson = JSON.parse(
(await projectContext.gatherFiles()).smartfilesNpmextraJSON.contents.toString()
@@ -42,50 +28,100 @@ export class Readme {
console.log(error);
}
let result = await this.aiDocsRef.openaiInstance.chat({
systemMessage: `
You create markdown readmes for npm projects. You only output the markdown readme.
// Use DualAgentOrchestrator with filesystem tool for agent-driven exploration
const readmeOrchestrator = new plugins.smartagent.DualAgentOrchestrator({
smartAiInstance: this.aiDocsRef.smartAiInstance,
defaultProvider: 'openai',
maxIterations: 25,
maxResultChars: 15000, // Limit tool output to prevent token explosion
maxHistoryMessages: 20, // Limit history window
logPrefix: '[README]',
onProgress: (event) => logger.log(event.logLevel, event.logMessage),
guardianPolicyPrompt: `
You validate README generation tool calls and outputs.
The Readme should follow the following template:
APPROVE tool calls for:
- Reading any files within the project directory (package.json, ts/*.ts, readme.md, etc.)
- Using tree to see project structure
- Using glob to find source files
- Listing directory contents
REJECT tool calls for:
- Reading files outside the project directory
- Writing, deleting, or modifying any files
- Any destructive operations
For final README output, APPROVE if:
- README follows proper markdown format
- Contains Install and Usage sections
- Code examples are correct TypeScript/ESM syntax
- Documentation is comprehensive and helpful
REJECT final output if:
- README is incomplete or poorly formatted
- Contains licensing information (added separately)
- Uses CommonJS syntax instead of ESM
- Contains "in conclusion" or similar filler
`,
});
// Register scoped filesystem tool for agent exploration
readmeOrchestrator.registerScopedFilesystemTool(this.projectDir);
await readmeOrchestrator.start();
const readmeTaskPrompt = `
You create markdown READMEs for npm projects. You only output the markdown readme.
PROJECT DIRECTORY: ${this.projectDir}
Use the filesystem tool to explore the project and understand what it does:
1. First, use tree to see the project structure (maxDepth: 3)
2. Read package.json to understand the package name, description, and dependencies
3. Read the existing readme.md if it exists (use it as a base, improve and expand)
4. Read readme.hints.md if it exists (contains hints for documentation)
5. Read key source files in ts/ directory to understand the API and implementation
6. Focus on exported classes, interfaces, and functions
Then generate a comprehensive README following this template:
# Project Name
[
The name is the module name of package.json
The description is in the description field of package.json
]
[The name from package.json and description]
## Install
[
Write a short text on how to install the project
]
[Short text on how to install the project]
## Usage
[
[
Give code examples here.
Construct sensible scenarios for the user.
Make sure to show a complete set of features of the module.
Don't omit use cases.
It does not matter how much time you need.
ALWAYS USE ESM SYNTAX AND TYPESCRIPT.
DON'T CHICKEN OUT. Write at least 4000 words. More if necessary.
If there is already a readme, take the Usage section as base. Remove outdated content, and expand and improve upon the valid parts.
Super important: Check for completenes.
Don't include any licensing information. This will be added in a later step.
Avoid "in conclusions".
Good to know:
* npmextra.json contains overall module information.
* readme.hints.md provides valuable hints about module ideas.
Write at least 4000 words. More if necessary.
If there is already a readme, take the Usage section as base. Remove outdated content, expand and improve.
Check for completeness.
Don't include any licensing information. This will be added later.
Avoid "in conclusion" statements.
]
`,
messageHistory: [],
userMessage: contextString,
});
`;
finalReadmeString += result.message + '\n' + legalInfo;
const readmeResult = await readmeOrchestrator.run(readmeTaskPrompt);
await readmeOrchestrator.stop();
if (!readmeResult.success) {
throw new Error(`README generation failed: ${readmeResult.status}`);
}
// Clean up markdown formatting if wrapped in code blocks
let resultMessage = readmeResult.result
.replace(/^```markdown\n?/i, '')
.replace(/\n?```$/i, '');
finalReadmeString += resultMessage + '\n' + legalInfo;
console.log(`\n======================\n`);
console.log(result.message);
console.log(resultMessage);
console.log(`\n======================\n`);
const readme = (await projectContext.gatherFiles()).smartfilesReadme;
@@ -96,60 +132,99 @@ The Readme should follow the following template:
const tsPublishInstance = new plugins.tspublish.TsPublish();
const subModules = await tsPublishInstance.getModuleSubDirs(paths.cwd);
logger.log('info', `Found ${Object.keys(subModules).length} sub modules`);
for (const subModule of Object.keys(subModules)) {
logger.log('info', `Building readme for ${subModule}`);
const subModuleContextString = await projectContext.update();
let result = await this.aiDocsRef.openaiInstance.chat({
systemMessage: `
You create markdown readmes for npm projects. You only output the markdown readme.
IMPORTANT: YOU ARE NOW CREATING THE README FOR THE FOLLOWING SUB MODULE: ${subModule} !!!!!!!!!!!
The Sub Module will be published with the following data:
${JSON.stringify(await plugins.fsInstance.file(plugins.path.join(paths.cwd, subModule, 'tspublish.json')).encoding('utf8').read(), null, 2)}
const subModulePath = plugins.path.join(paths.cwd, subModule);
const tspublishData = await plugins.fsInstance
.file(plugins.path.join(subModulePath, 'tspublish.json'))
.encoding('utf8')
.read();
The Readme should follow the following template:
# Project Name
[
The name is the module name of package.json
The description is in the description field of package.json
]
## Install
[
Write a short text on how to install the project
]
## Usage
[
Give code examples here.
Construct sensible scenarios for the user.
Make sure to show a complete set of features of the module.
Don't omit use cases.
It does not matter how much time you need.
ALWAYS USE ESM SYNTAX AND TYPESCRIPT.
DON'T CHICKEN OUT. Write at least 4000 words. More if necessary.
If there is already a readme, take the Usage section as base. Remove outdated content, and expand and improve upon the valid parts.
Super important: Check for completenes.
Don't include any licensing information. This will be added in a later step.
Avoid "in conclusions".
Good to know:
* npmextra.json contains overall module information.
* readme.hints.md provides valuable hints about module ideas.
* Your output lands directly in the readme.md file.
* Don't use \`\`\` at the beginning or the end. It'll cause problems. Only use it for codeblocks. You are directly writing markdown. No need to introduce it weirdly.
]
`,
messageHistory: [],
userMessage: subModuleContextString,
// Create a new orchestrator with filesystem tool for each submodule
const subModuleOrchestrator = new plugins.smartagent.DualAgentOrchestrator({
smartAiInstance: this.aiDocsRef.smartAiInstance,
defaultProvider: 'openai',
maxIterations: 20,
maxResultChars: 12000,
maxHistoryMessages: 15,
logPrefix: `[README:${subModule}]`,
onProgress: (event) => logger.log(event.logLevel, event.logMessage),
guardianPolicyPrompt: `
You validate README generation for submodules.
APPROVE tool calls for:
- Reading any files within the submodule directory
- Using tree to see structure
- Using glob to find source files
REJECT tool calls for:
- Reading files outside the submodule directory
- Writing, deleting, or modifying any files
- Any destructive operations
APPROVE final README if comprehensive, well-formatted markdown with ESM TypeScript examples.
REJECT incomplete READMEs or those with licensing info.
`,
});
const subModuleReadmeString = result.message + '\n' + legalInfo;
await plugins.fsInstance.file(plugins.path.join(paths.cwd, subModule, 'readme.md')).encoding('utf8').write(subModuleReadmeString);
logger.log('success', `Built readme for ${subModule}`);
// Register scoped filesystem tool for the submodule directory
subModuleOrchestrator.registerScopedFilesystemTool(subModulePath);
await subModuleOrchestrator.start();
const subModulePrompt = `
You create markdown READMEs for npm projects. You only output the markdown readme.
SUB MODULE: ${subModule}
SUB MODULE DIRECTORY: ${subModulePath}
IMPORTANT: YOU ARE CREATING THE README FOR THIS SUB MODULE: ${subModule}
The Sub Module will be published with:
${JSON.stringify(tspublishData, null, 2)}
Use the filesystem tool to explore the submodule:
1. Use tree to see the submodule structure
2. Read package.json to understand the submodule
3. Read source files in ts/ directory to understand the implementation
Generate a README following the template:
# Project Name
[name and description from package.json]
## Install
[installation instructions]
## Usage
[
Code examples with complete features.
ESM TypeScript syntax only.
Write at least 4000 words.
No licensing information.
No "in conclusion".
]
Don't use \`\`\` at the beginning or end. Only for code blocks.
`;
const subModuleResult = await subModuleOrchestrator.run(subModulePrompt);
await subModuleOrchestrator.stop();
if (subModuleResult.success) {
const subModuleReadmeString = subModuleResult.result
.replace(/^```markdown\n?/i, '')
.replace(/\n?```$/i, '') + '\n' + legalInfo;
await plugins.fsInstance
.file(plugins.path.join(subModulePath, 'readme.md'))
.encoding('utf8')
.write(subModuleReadmeString);
logger.log('success', `Built readme for ${subModule}`);
} else {
logger.log('error', `Failed to build readme for ${subModule}: ${subModuleResult.status}`);
}
}
return result.message;
return resultMessage;
}
}

View File

@@ -8,7 +8,7 @@ export class AiDoc {
public npmextraKV: plugins.npmextra.KeyValueStore;
public qenvInstance: plugins.qenv.Qenv;
public aidocInteract: plugins.smartinteract.SmartInteract;
public openaiInstance: plugins.smartai.OpenAiProvider;
public smartAiInstance: plugins.smartai.SmartAi;
argvArg: any;
@@ -85,20 +85,32 @@ export class AiDoc {
}
// lets assume we have an OPENAI_Token now
this.openaiInstance = new plugins.smartai.OpenAiProvider({
this.smartAiInstance = new plugins.smartai.SmartAi({
openaiToken: this.openaiToken,
});
await this.openaiInstance.start();
await this.smartAiInstance.start();
}
public async stop() {
if (this.openaiInstance) {
await this.openaiInstance.stop();
if (this.smartAiInstance) {
await this.smartAiInstance.stop();
}
// No explicit cleanup needed for npmextraKV or aidocInteract
// They don't keep event loop alive
}
/**
* Get the OpenAI provider for direct chat calls
* This is a convenience getter to access the provider from SmartAi
*/
public get openaiProvider(): plugins.smartai.OpenAiProvider {
return this.smartAiInstance.openaiProvider;
}
public getOpenaiToken(): string {
return this.openaiToken;
}
public async buildReadme(projectDirArg: string) {
const readmeInstance = new aiDocsClasses.Readme(this, projectDirArg);
return await readmeInstance.build();
@@ -142,13 +154,12 @@ export class AiDoc {
}
/**
* Count tokens in a text string using GPT tokenizer
* @param text The text to count tokens for
* @param model The model to use for tokenization (default: gpt-3.5-turbo)
* @returns The number of tokens in the text
* Estimate token count in a text string
* @param text The text to estimate tokens for
* @returns Estimated number of tokens
*/
public countTokens(text: string, model: string = 'gpt-3.5-turbo'): number {
public countTokens(text: string): number {
const projectContextInstance = new aiDocsClasses.ProjectContext('');
return projectContextInstance.countTokens(text, model);
return projectContextInstance.countTokens(text);
}
}

120
ts/cli.ts
View File

@@ -4,7 +4,6 @@ import { logger } from './logging.js';
import { TypeDoc } from './classes.typedoc.js';
import { AiDoc } from './classes.aidoc.js';
import * as context from './context/index.js';
export const run = async () => {
const tsdocCli = new plugins.smartcli.Smartcli();
@@ -32,17 +31,6 @@ export const run = async () => {
const aidocInstance = new AiDoc();
await aidocInstance.start();
// Get context token count if requested
if (argvArg.tokens || argvArg.showTokens) {
logger.log('info', `Calculating context token count...`);
const tokenCount = await aidocInstance.getProjectContextTokenCount(paths.cwd);
logger.log('ok', `Total context token count: ${tokenCount}`);
if (argvArg.tokensOnly) {
return; // Exit early if we only want token count
}
}
logger.log('info', `Generating new readme...`);
logger.log('info', `This may take some time...`);
await aidocInstance.buildReadme(paths.cwd);
@@ -51,102 +39,34 @@ export const run = async () => {
await aidocInstance.buildDescription(paths.cwd);
});
tsdocCli.addCommand('tokens').subscribe(async (argvArg) => {
tsdocCli.addCommand('readme').subscribe(async (argvArg) => {
const aidocInstance = new AiDoc();
await aidocInstance.start();
logger.log('info', `Calculating context token count...`);
logger.log('info', `Generating new readme...`);
logger.log('info', `This may take some time...`);
await aidocInstance.buildReadme(paths.cwd);
});
// Get task type if specified
let taskType: context.TaskType | undefined = undefined;
if (argvArg.task) {
if (['readme', 'commit', 'description'].includes(argvArg.task)) {
taskType = argvArg.task as context.TaskType;
} else {
logger.log('warn', `Unknown task type: ${argvArg.task}. Using default (readme).`);
taskType = 'readme';
}
} else {
// Default to readme if no task specified
taskType = 'readme';
}
tsdocCli.addCommand('description').subscribe(async (argvArg) => {
const aidocInstance = new AiDoc();
await aidocInstance.start();
// Use iterative context building
const taskFactory = new context.TaskContextFactory(paths.cwd);
await taskFactory.initialize();
logger.log('info', `Generating new description and keywords...`);
logger.log('info', `This may take some time...`);
await aidocInstance.buildDescription(paths.cwd);
});
let contextResult: context.IIterativeContextResult;
tsdocCli.addCommand('commit').subscribe(async (argvArg) => {
const aidocInstance = new AiDoc();
await aidocInstance.start();
if (argvArg.all) {
// Show stats for all task types
const stats = await taskFactory.getTokenStats();
logger.log('info', `Generating commit message...`);
logger.log('info', `This may take some time...`);
const commitObject = await aidocInstance.buildNextCommitObject(paths.cwd);
logger.log('ok', 'Token statistics by task:');
for (const [task, data] of Object.entries(stats)) {
logger.log('info', `\n${task.toUpperCase()}:`);
logger.log('info', ` Tokens: ${data.tokenCount}`);
logger.log('info', ` Token savings: ${data.savings}`);
logger.log('info', ` Files: ${data.includedFiles} included, ${data.trimmedFiles} trimmed, ${data.excludedFiles} excluded`);
// Calculate percentage of model context
const o4MiniPercentage = (data.tokenCount / 200000 * 100).toFixed(2);
logger.log('info', ` Context usage: ${o4MiniPercentage}% of o4-mini (200K tokens)`);
}
return;
}
// Get context for specific task
contextResult = await taskFactory.createContextForTask(taskType);
// Display results
logger.log('ok', `Total context token count: ${contextResult.tokenCount}`);
logger.log('info', `Files included: ${contextResult.includedFiles.length}`);
logger.log('info', `Files trimmed: ${contextResult.trimmedFiles.length}`);
logger.log('info', `Files excluded: ${contextResult.excludedFiles.length}`);
logger.log('info', `Token savings: ${contextResult.tokenSavings}`);
if (argvArg.detailed) {
// Show more detailed info about the context and token usage
const o4MiniPercentage = (contextResult.tokenCount / 200000 * 100).toFixed(2);
logger.log('info', `Token usage: ${o4MiniPercentage}% of o4-mini 200K token context window`);
if (argvArg.model) {
// Show percentages for different models
if (argvArg.model === 'gpt4') {
const gpt4Percentage = (contextResult.tokenCount / 8192 * 100).toFixed(2);
logger.log('info', `Token usage (GPT-4): ${gpt4Percentage}% of 8192 token context window`);
} else if (argvArg.model === 'gpt35') {
const gpt35Percentage = (contextResult.tokenCount / 4096 * 100).toFixed(2);
logger.log('info', `Token usage (GPT-3.5): ${gpt35Percentage}% of 4096 token context window`);
}
}
// Estimate cost (approximate values)
const o4MiniInputCost = 0.00005; // per 1K tokens for o4-mini
const estimatedCost = (contextResult.tokenCount / 1000 * o4MiniInputCost).toFixed(6);
logger.log('info', `Estimated input cost: $${estimatedCost} (o4-mini)`);
if (argvArg.listFiles) {
// List files included in context
logger.log('info', '\nIncluded files:');
contextResult.includedFiles.forEach(file => {
logger.log('info', ` ${file.relativePath} (${file.tokenCount} tokens)`);
});
logger.log('info', '\nTrimmed files:');
contextResult.trimmedFiles.forEach(file => {
logger.log('info', ` ${file.relativePath} (${file.tokenCount} tokens)`);
});
if (contextResult.excludedFiles.length > 0) {
logger.log('info', '\nExcluded files:');
contextResult.excludedFiles.forEach(file => {
logger.log('info', ` ${file.relativePath} (${file.tokenCount} tokens)`);
});
}
}
}
logger.log('ok', `Commit message generated:`);
console.log(JSON.stringify(commitObject, null, 2));
});
tsdocCli.addCommand('test').subscribe((argvArg) => {

View File

@@ -1,369 +0,0 @@
import * as plugins from '../plugins.js';
import * as fs from 'fs';
import type {
IContextConfig,
ITrimConfig,
ITaskConfig,
TaskType,
ContextMode,
ICacheConfig,
IAnalyzerConfig,
IPrioritizationWeights,
ITierConfig,
IIterativeConfig
} from './types.js';
/**
* Manages configuration for context building
*/
export class ConfigManager {
private static instance: ConfigManager;
private config: IContextConfig;
private projectDir: string = '';
private configCache: { mtime: number; config: IContextConfig } | null = null;
/**
* Get the singleton instance of ConfigManager
*/
public static getInstance(): ConfigManager {
if (!ConfigManager.instance) {
ConfigManager.instance = new ConfigManager();
}
return ConfigManager.instance;
}
/**
* Private constructor for singleton pattern
*/
private constructor() {
this.config = this.getDefaultConfig();
}
/**
* Initialize the config manager with a project directory
* @param projectDir The project directory
*/
public async initialize(projectDir: string): Promise<void> {
this.projectDir = projectDir;
await this.loadConfig();
}
/**
* Get the default configuration
*/
private getDefaultConfig(): IContextConfig {
return {
maxTokens: 190000, // Default for o4-mini with some buffer
defaultMode: 'trimmed',
taskSpecificSettings: {
readme: {
mode: 'trimmed',
includePaths: ['ts/', 'src/'],
excludePaths: ['test/', 'node_modules/']
},
commit: {
mode: 'trimmed',
focusOnChangedFiles: true
},
description: {
mode: 'trimmed',
includePackageInfo: true
}
},
trimming: {
removeImplementations: true,
preserveInterfaces: true,
preserveTypeDefs: true,
preserveJSDoc: true,
maxFunctionLines: 5,
removeComments: true,
removeBlankLines: true
},
cache: {
enabled: true,
ttl: 3600, // 1 hour
maxSize: 100, // 100MB
directory: undefined // Will be set to .nogit/context-cache by ContextCache
},
analyzer: {
useAIRefinement: false, // Disabled by default for now
aiModel: 'haiku'
},
prioritization: {
dependencyWeight: 0.3,
relevanceWeight: 0.4,
efficiencyWeight: 0.2,
recencyWeight: 0.1
},
tiers: {
essential: { minScore: 0.8, trimLevel: 'none' },
important: { minScore: 0.5, trimLevel: 'light' },
optional: { minScore: 0.2, trimLevel: 'aggressive' }
},
iterative: {
maxIterations: 5,
firstPassFileLimit: 10,
subsequentPassFileLimit: 5,
temperature: 0.3,
model: 'gpt-4-turbo-preview'
}
};
}
/**
* Load configuration from npmextra.json
*/
private async loadConfig(): Promise<void> {
try {
if (!this.projectDir) {
return;
}
const npmextraJsonPath = plugins.path.join(this.projectDir, 'npmextra.json');
// Check if file exists
const fileExists = await plugins.fsInstance.file(npmextraJsonPath).exists();
if (!fileExists) {
return;
}
// Check cache
const stats = await fs.promises.stat(npmextraJsonPath);
const currentMtime = Math.floor(stats.mtimeMs);
if (this.configCache && this.configCache.mtime === currentMtime) {
// Use cached config
this.config = this.configCache.config;
return;
}
// Read the npmextra.json file
const npmextraJsonFile = await plugins.smartfileFactory.fromFilePath(npmextraJsonPath);
const npmextraContent = JSON.parse(npmextraJsonFile.contents.toString());
// Check for tsdoc context configuration
if (npmextraContent?.['@git.zone/tsdoc']?.context) {
// Merge with default config
this.config = this.mergeConfigs(this.config, npmextraContent['@git.zone/tsdoc'].context);
}
// Cache the config
this.configCache = {
mtime: currentMtime,
config: { ...this.config }
};
} catch (error) {
console.error('Error loading context configuration:', error);
}
}
/**
* Merge configurations, with userConfig taking precedence
* @param defaultConfig The default configuration
* @param userConfig The user configuration
*/
private mergeConfigs(defaultConfig: IContextConfig, userConfig: Partial<IContextConfig>): IContextConfig {
const result: IContextConfig = { ...defaultConfig };
// Merge top-level properties
if (userConfig.maxTokens !== undefined) result.maxTokens = userConfig.maxTokens;
if (userConfig.defaultMode !== undefined) result.defaultMode = userConfig.defaultMode;
// Merge task-specific settings
if (userConfig.taskSpecificSettings) {
result.taskSpecificSettings = result.taskSpecificSettings || {};
// For each task type, merge settings
(['readme', 'commit', 'description'] as TaskType[]).forEach(taskType => {
if (userConfig.taskSpecificSettings?.[taskType]) {
result.taskSpecificSettings![taskType] = {
...result.taskSpecificSettings![taskType],
...userConfig.taskSpecificSettings[taskType]
};
}
});
}
// Merge trimming configuration
if (userConfig.trimming) {
result.trimming = {
...result.trimming,
...userConfig.trimming
};
}
// Merge cache configuration
if (userConfig.cache) {
result.cache = {
...result.cache,
...userConfig.cache
};
}
// Merge analyzer configuration
if (userConfig.analyzer) {
result.analyzer = {
...result.analyzer,
...userConfig.analyzer
};
}
// Merge prioritization weights
if (userConfig.prioritization) {
result.prioritization = {
...result.prioritization,
...userConfig.prioritization
};
}
// Merge tier configuration
if (userConfig.tiers) {
result.tiers = {
...result.tiers,
...userConfig.tiers
};
}
// Merge iterative configuration
if (userConfig.iterative) {
result.iterative = {
...result.iterative,
...userConfig.iterative
};
}
return result;
}
/**
* Get the complete configuration
*/
public getConfig(): IContextConfig {
return this.config;
}
/**
* Get the trimming configuration
*/
public getTrimConfig(): ITrimConfig {
return this.config.trimming || {};
}
/**
* Get configuration for a specific task
* @param taskType The type of task
*/
public getTaskConfig(taskType: TaskType): ITaskConfig {
// Get task-specific config or empty object
const taskConfig = this.config.taskSpecificSettings?.[taskType] || {};
// If mode is not specified, use default mode
if (!taskConfig.mode) {
taskConfig.mode = this.config.defaultMode;
}
return taskConfig;
}
/**
* Get the maximum tokens allowed for context
*/
public getMaxTokens(): number {
return this.config.maxTokens || 190000;
}
/**
* Update the configuration
* @param config The new configuration
*/
public async updateConfig(config: Partial<IContextConfig>): Promise<void> {
// Merge with existing config
this.config = this.mergeConfigs(this.config, config);
// Invalidate cache
this.configCache = null;
try {
if (!this.projectDir) {
return;
}
// Read the existing npmextra.json file
const npmextraJsonPath = plugins.path.join(this.projectDir, 'npmextra.json');
let npmextraContent = {};
if (await plugins.fsInstance.file(npmextraJsonPath).exists()) {
const npmextraJsonFile = await plugins.smartfileFactory.fromFilePath(npmextraJsonPath);
npmextraContent = JSON.parse(npmextraJsonFile.contents.toString()) || {};
}
// Update the tsdoc context configuration
const typedContent = npmextraContent as any;
if (!typedContent.tsdoc) typedContent.tsdoc = {};
typedContent.tsdoc.context = this.config;
// Write back to npmextra.json
const updatedContent = JSON.stringify(npmextraContent, null, 2);
await plugins.fsInstance.file(npmextraJsonPath).encoding('utf8').write(updatedContent);
} catch (error) {
console.error('Error updating context configuration:', error);
}
}
/**
* Get cache configuration
*/
public getCacheConfig(): ICacheConfig {
return this.config.cache || { enabled: true, ttl: 3600, maxSize: 100 };
}
/**
* Get analyzer configuration
*/
public getAnalyzerConfig(): IAnalyzerConfig {
return this.config.analyzer || { useAIRefinement: false, aiModel: 'haiku' };
}
/**
* Get prioritization weights
*/
public getPrioritizationWeights(): IPrioritizationWeights {
return this.config.prioritization || {
dependencyWeight: 0.3,
relevanceWeight: 0.4,
efficiencyWeight: 0.2,
recencyWeight: 0.1
};
}
/**
* Get tier configuration
*/
public getTierConfig(): ITierConfig {
return this.config.tiers || {
essential: { minScore: 0.8, trimLevel: 'none' },
important: { minScore: 0.5, trimLevel: 'light' },
optional: { minScore: 0.2, trimLevel: 'aggressive' }
};
}
/**
* Get iterative configuration
*/
public getIterativeConfig(): IIterativeConfig {
return this.config.iterative || {
maxIterations: 5,
firstPassFileLimit: 10,
subsequentPassFileLimit: 5,
temperature: 0.3,
model: 'gpt-4-turbo-preview'
};
}
/**
* Clear the config cache (force reload on next access)
*/
public clearCache(): void {
this.configCache = null;
}
}

View File

@@ -1,391 +0,0 @@
import * as plugins from '../plugins.js';
import type {
IFileMetadata,
IFileDependencies,
IFileAnalysis,
IAnalysisResult,
TaskType,
IPrioritizationWeights,
ITierConfig,
} from './types.js';
/**
* ContextAnalyzer provides intelligent file selection and prioritization
* based on dependency analysis, task relevance, and configurable weights
*/
export class ContextAnalyzer {
private projectRoot: string;
private weights: Required<IPrioritizationWeights>;
private tiers: Required<ITierConfig>;
/**
* Creates a new ContextAnalyzer
* @param projectRoot - Root directory of the project
* @param weights - Prioritization weights
* @param tiers - Tier configuration
*/
constructor(
projectRoot: string,
weights: Partial<IPrioritizationWeights> = {},
tiers: Partial<ITierConfig> = {}
) {
this.projectRoot = projectRoot;
// Default weights
this.weights = {
dependencyWeight: weights.dependencyWeight ?? 0.3,
relevanceWeight: weights.relevanceWeight ?? 0.4,
efficiencyWeight: weights.efficiencyWeight ?? 0.2,
recencyWeight: weights.recencyWeight ?? 0.1,
};
// Default tiers
this.tiers = {
essential: tiers.essential ?? { minScore: 0.8, trimLevel: 'none' },
important: tiers.important ?? { minScore: 0.5, trimLevel: 'light' },
optional: tiers.optional ?? { minScore: 0.2, trimLevel: 'aggressive' },
};
}
/**
* Analyzes files for a specific task type
* @param metadata - Array of file metadata to analyze
* @param taskType - Type of task being performed
* @param changedFiles - Optional list of recently changed files (for commits)
* @returns Analysis result with scored files
*/
public async analyze(
metadata: IFileMetadata[],
taskType: TaskType,
changedFiles: string[] = []
): Promise<IAnalysisResult> {
const startTime = Date.now();
// Build dependency graph
const dependencyGraph = await this.buildDependencyGraph(metadata);
// Calculate centrality scores
this.calculateCentrality(dependencyGraph);
// Analyze each file
const files: IFileAnalysis[] = [];
for (const meta of metadata) {
const analysis = await this.analyzeFile(
meta,
taskType,
dependencyGraph,
changedFiles
);
files.push(analysis);
}
// Sort by importance score (highest first)
files.sort((a, b) => b.importanceScore - a.importanceScore);
const analysisDuration = Date.now() - startTime;
return {
taskType,
files,
dependencyGraph,
totalFiles: metadata.length,
analysisDuration,
};
}
/**
* Builds a dependency graph from file metadata
* @param metadata - Array of file metadata
* @returns Dependency graph as a map
*/
private async buildDependencyGraph(
metadata: IFileMetadata[]
): Promise<Map<string, IFileDependencies>> {
const graph = new Map<string, IFileDependencies>();
// Initialize graph entries
for (const meta of metadata) {
graph.set(meta.path, {
path: meta.path,
imports: [],
importedBy: [],
centrality: 0,
});
}
// Parse imports from each file
for (const meta of metadata) {
try {
const contents = await plugins.fsInstance.file(meta.path).encoding('utf8').read() as string;
const imports = this.extractImports(contents, meta.path);
const deps = graph.get(meta.path)!;
deps.imports = imports;
// Update importedBy for imported files
for (const importPath of imports) {
const importedDeps = graph.get(importPath);
if (importedDeps) {
importedDeps.importedBy.push(meta.path);
}
}
} catch (error) {
console.warn(`Failed to parse imports from ${meta.path}:`, error.message);
}
}
return graph;
}
/**
* Extracts import statements from file contents
* @param contents - File contents
* @param filePath - Path of the file being analyzed
* @returns Array of absolute paths to imported files
*/
private extractImports(contents: string, filePath: string): string[] {
const imports: string[] = [];
const fileDir = plugins.path.dirname(filePath);
// Match various import patterns
const importRegex = /(?:import|export).*?from\s+['"](.+?)['"]/g;
let match;
while ((match = importRegex.exec(contents)) !== null) {
const importPath = match[1];
// Skip external modules
if (!importPath.startsWith('.')) {
continue;
}
// Resolve relative import to absolute path
let resolvedPath = plugins.path.resolve(fileDir, importPath);
// Handle various file extensions
const extensions = ['.ts', '.js', '.tsx', '.jsx', '/index.ts', '/index.js'];
let found = false;
for (const ext of extensions) {
const testPath = resolvedPath.endsWith(ext) ? resolvedPath : resolvedPath + ext;
try {
// Use synchronous file check to avoid async in this context
const fs = require('fs');
const exists = fs.existsSync(testPath);
if (exists) {
imports.push(testPath);
found = true;
break;
}
} catch (error) {
// Continue trying other extensions
}
}
if (!found && !resolvedPath.includes('.')) {
// Try with .ts extension as default
imports.push(resolvedPath + '.ts');
}
}
return imports;
}
/**
* Calculates centrality scores for all nodes in the dependency graph
* Uses a simplified PageRank-like algorithm
* @param graph - Dependency graph
*/
private calculateCentrality(graph: Map<string, IFileDependencies>): void {
const damping = 0.85;
const iterations = 10;
const nodeCount = graph.size;
// Initialize scores
const scores = new Map<string, number>();
for (const path of graph.keys()) {
scores.set(path, 1.0 / nodeCount);
}
// Iterative calculation
for (let i = 0; i < iterations; i++) {
const newScores = new Map<string, number>();
for (const [path, deps] of graph.entries()) {
let score = (1 - damping) / nodeCount;
// Add contributions from nodes that import this file
for (const importerPath of deps.importedBy) {
const importerDeps = graph.get(importerPath);
if (importerDeps) {
const importerScore = scores.get(importerPath) ?? 0;
const outgoingCount = importerDeps.imports.length || 1;
score += damping * (importerScore / outgoingCount);
}
}
newScores.set(path, score);
}
// Update scores
for (const [path, score] of newScores) {
scores.set(path, score);
}
}
// Normalize scores to 0-1 range
const maxScore = Math.max(...scores.values());
if (maxScore > 0) {
for (const deps of graph.values()) {
const score = scores.get(deps.path) ?? 0;
deps.centrality = score / maxScore;
}
}
}
/**
* Analyzes a single file
* @param meta - File metadata
* @param taskType - Task being performed
* @param graph - Dependency graph
* @param changedFiles - Recently changed files
* @returns File analysis
*/
private async analyzeFile(
meta: IFileMetadata,
taskType: TaskType,
graph: Map<string, IFileDependencies>,
changedFiles: string[]
): Promise<IFileAnalysis> {
const deps = graph.get(meta.path);
const centralityScore = deps?.centrality ?? 0;
// Calculate task-specific relevance
const relevanceScore = this.calculateRelevance(meta, taskType);
// Calculate efficiency (information per token)
const efficiencyScore = this.calculateEfficiency(meta);
// Calculate recency (for commit tasks)
const recencyScore = this.calculateRecency(meta, changedFiles);
// Calculate combined importance score
const importanceScore =
relevanceScore * this.weights.relevanceWeight +
centralityScore * this.weights.dependencyWeight +
efficiencyScore * this.weights.efficiencyWeight +
recencyScore * this.weights.recencyWeight;
// Assign tier
const tier = this.assignTier(importanceScore);
return {
path: meta.path,
relevanceScore,
centralityScore,
efficiencyScore,
recencyScore,
importanceScore,
tier,
reason: this.generateReason(meta, taskType, importanceScore, tier),
};
}
/**
* Calculates task-specific relevance score
*/
private calculateRelevance(meta: IFileMetadata, taskType: TaskType): number {
const relativePath = meta.relativePath.toLowerCase();
let score = 0.5; // Base score
// README generation - prioritize public APIs and main exports
if (taskType === 'readme') {
if (relativePath.includes('index.ts')) score += 0.3;
if (relativePath.match(/^ts\/[^\/]+\.ts$/)) score += 0.2; // Root level exports
if (relativePath.includes('test/')) score -= 0.3;
if (relativePath.includes('classes/')) score += 0.1;
if (relativePath.includes('interfaces/')) score += 0.1;
}
// Commit messages - prioritize changed files and their dependencies
if (taskType === 'commit') {
if (relativePath.includes('test/')) score -= 0.2;
// Recency will handle changed files
}
// Description generation - prioritize main exports and core interfaces
if (taskType === 'description') {
if (relativePath.includes('index.ts')) score += 0.4;
if (relativePath.match(/^ts\/[^\/]+\.ts$/)) score += 0.3;
if (relativePath.includes('test/')) score -= 0.4;
if (relativePath.includes('interfaces/')) score += 0.2;
}
return Math.max(0, Math.min(1, score));
}
/**
* Calculates efficiency score (information density)
*/
private calculateEfficiency(meta: IFileMetadata): number {
// Prefer files that are not too large (good signal-to-noise ratio)
const optimalSize = 5000; // ~1250 tokens
const distance = Math.abs(meta.estimatedTokens - optimalSize);
const normalized = Math.max(0, 1 - distance / optimalSize);
return normalized;
}
/**
* Calculates recency score for changed files
*/
private calculateRecency(meta: IFileMetadata, changedFiles: string[]): number {
if (changedFiles.length === 0) {
return 0;
}
// Check if this file was changed
const isChanged = changedFiles.some((changed) => changed === meta.path);
return isChanged ? 1.0 : 0.0;
}
/**
* Assigns a tier based on importance score
*/
private assignTier(score: number): 'essential' | 'important' | 'optional' | 'excluded' {
if (score >= this.tiers.essential.minScore) return 'essential';
if (score >= this.tiers.important.minScore) return 'important';
if (score >= this.tiers.optional.minScore) return 'optional';
return 'excluded';
}
/**
* Generates a human-readable reason for the score
*/
private generateReason(
meta: IFileMetadata,
taskType: TaskType,
score: number,
tier: string
): string {
const reasons: string[] = [];
if (meta.relativePath.includes('index.ts')) {
reasons.push('main export file');
}
if (meta.relativePath.includes('test/')) {
reasons.push('test file (lower priority)');
}
if (taskType === 'readme' && meta.relativePath.match(/^ts\/[^\/]+\.ts$/)) {
reasons.push('root-level module');
}
reasons.push(`score: ${score.toFixed(2)}`);
reasons.push(`tier: ${tier}`);
return reasons.join(', ');
}
}

View File

@@ -1,286 +0,0 @@
import * as plugins from '../plugins.js';
import * as fs from 'fs';
import type { ICacheEntry, ICacheConfig } from './types.js';
import { logger } from '../logging.js';
/**
* ContextCache provides persistent caching of file contents and token counts
* with automatic invalidation on file changes
*/
export class ContextCache {
private cacheDir: string;
private cache: Map<string, ICacheEntry> = new Map();
private config: Required<ICacheConfig>;
private cacheIndexPath: string;
/**
* Creates a new ContextCache
* @param projectRoot - Root directory of the project
* @param config - Cache configuration
*/
constructor(projectRoot: string, config: Partial<ICacheConfig> = {}) {
this.config = {
enabled: config.enabled ?? true,
ttl: config.ttl ?? 3600, // 1 hour default
maxSize: config.maxSize ?? 100, // 100MB default
directory: config.directory ?? plugins.path.join(projectRoot, '.nogit', 'context-cache'),
};
this.cacheDir = this.config.directory;
this.cacheIndexPath = plugins.path.join(this.cacheDir, 'index.json');
}
/**
* Initializes the cache by loading from disk
*/
public async init(): Promise<void> {
if (!this.config.enabled) {
return;
}
// Ensure cache directory exists
await plugins.fsInstance.directory(this.cacheDir).recursive().create();
// Load cache index if it exists
try {
const indexExists = await plugins.fsInstance.file(this.cacheIndexPath).exists();
if (indexExists) {
const indexContent = await plugins.fsInstance.file(this.cacheIndexPath).encoding('utf8').read() as string;
const indexData = JSON.parse(indexContent) as ICacheEntry[];
if (Array.isArray(indexData)) {
for (const entry of indexData) {
this.cache.set(entry.path, entry);
}
}
}
} catch (error) {
console.warn('Failed to load cache index:', error.message);
// Start with empty cache if loading fails
}
// Clean up expired and invalid entries
await this.cleanup();
}
/**
* Gets a cached entry if it's still valid
* @param filePath - Absolute path to the file
* @returns Cache entry if valid, null otherwise
*/
public async get(filePath: string): Promise<ICacheEntry | null> {
if (!this.config.enabled) {
return null;
}
const entry = this.cache.get(filePath);
if (!entry) {
return null;
}
// Check if entry is expired
const now = Date.now();
if (now - entry.cachedAt > this.config.ttl * 1000) {
this.cache.delete(filePath);
return null;
}
// Check if file has been modified
try {
const stats = await fs.promises.stat(filePath);
const currentMtime = Math.floor(stats.mtimeMs);
if (currentMtime !== entry.mtime) {
// File has changed, invalidate cache
this.cache.delete(filePath);
return null;
}
return entry;
} catch (error) {
// File doesn't exist anymore
this.cache.delete(filePath);
return null;
}
}
/**
* Stores a cache entry
* @param entry - Cache entry to store
*/
public async set(entry: ICacheEntry): Promise<void> {
if (!this.config.enabled) {
return;
}
this.cache.set(entry.path, entry);
// Check cache size and evict old entries if needed
await this.enforceMaxSize();
// Persist to disk (async, don't await)
this.persist().catch((error) => {
console.warn('Failed to persist cache:', error.message);
});
}
/**
* Stores multiple cache entries
* @param entries - Array of cache entries
*/
public async setMany(entries: ICacheEntry[]): Promise<void> {
if (!this.config.enabled) {
return;
}
for (const entry of entries) {
this.cache.set(entry.path, entry);
}
await this.enforceMaxSize();
await this.persist();
}
/**
* Checks if a file is cached and valid
* @param filePath - Absolute path to the file
* @returns True if cached and valid
*/
public async has(filePath: string): Promise<boolean> {
const entry = await this.get(filePath);
return entry !== null;
}
/**
* Gets cache statistics
*/
public getStats(): {
entries: number;
totalSize: number;
oldestEntry: number | null;
newestEntry: number | null;
} {
let totalSize = 0;
let oldestEntry: number | null = null;
let newestEntry: number | null = null;
for (const entry of this.cache.values()) {
totalSize += entry.contents.length;
if (oldestEntry === null || entry.cachedAt < oldestEntry) {
oldestEntry = entry.cachedAt;
}
if (newestEntry === null || entry.cachedAt > newestEntry) {
newestEntry = entry.cachedAt;
}
}
return {
entries: this.cache.size,
totalSize,
oldestEntry,
newestEntry,
};
}
/**
* Clears all cache entries
*/
public async clear(): Promise<void> {
this.cache.clear();
await this.persist();
}
/**
* Clears specific cache entries
* @param filePaths - Array of file paths to clear
*/
public async clearPaths(filePaths: string[]): Promise<void> {
for (const path of filePaths) {
this.cache.delete(path);
}
await this.persist();
}
/**
* Cleans up expired and invalid cache entries
*/
private async cleanup(): Promise<void> {
const now = Date.now();
const toDelete: string[] = [];
for (const [path, entry] of this.cache.entries()) {
// Check expiration
if (now - entry.cachedAt > this.config.ttl * 1000) {
toDelete.push(path);
continue;
}
// Check if file still exists and hasn't changed
try {
const stats = await fs.promises.stat(path);
const currentMtime = Math.floor(stats.mtimeMs);
if (currentMtime !== entry.mtime) {
toDelete.push(path);
}
} catch (error) {
// File doesn't exist
toDelete.push(path);
}
}
for (const path of toDelete) {
this.cache.delete(path);
}
if (toDelete.length > 0) {
await this.persist();
}
}
/**
* Enforces maximum cache size by evicting oldest entries
*/
private async enforceMaxSize(): Promise<void> {
const stats = this.getStats();
const maxSizeBytes = this.config.maxSize * 1024 * 1024; // Convert MB to bytes
if (stats.totalSize <= maxSizeBytes) {
return;
}
// Sort entries by age (oldest first)
const entries = Array.from(this.cache.entries()).sort(
(a, b) => a[1].cachedAt - b[1].cachedAt
);
// Remove oldest entries until we're under the limit
let currentSize = stats.totalSize;
for (const [path, entry] of entries) {
if (currentSize <= maxSizeBytes) {
break;
}
currentSize -= entry.contents.length;
this.cache.delete(path);
}
}
/**
* Persists cache index to disk
*/
private async persist(): Promise<void> {
if (!this.config.enabled) {
return;
}
try {
const entries = Array.from(this.cache.values());
const content = JSON.stringify(entries, null, 2);
await plugins.fsInstance.file(this.cacheIndexPath).encoding('utf8').write(content);
} catch (error) {
console.warn('Failed to persist cache index:', error.message);
}
}
}

View File

@@ -1,310 +0,0 @@
import * as plugins from '../plugins.js';
import type { ITrimConfig, ContextMode } from './types.js';
/**
* Class responsible for trimming file contents to reduce token usage
* while preserving important information for context
*/
export class ContextTrimmer {
private config: ITrimConfig;
/**
* Create a new ContextTrimmer with the given configuration
* @param config The trimming configuration
*/
constructor(config?: ITrimConfig) {
this.config = {
removeImplementations: true,
preserveInterfaces: true,
preserveTypeDefs: true,
preserveJSDoc: true,
maxFunctionLines: 5,
removeComments: true,
removeBlankLines: true,
...config
};
}
/**
* Trim a file's contents based on the configuration
* @param filePath The path to the file
* @param content The file's contents
* @param mode The context mode to use
* @returns The trimmed file contents
*/
public trimFile(filePath: string, content: string, mode: ContextMode = 'trimmed'): string {
// If mode is 'full', return the original content
if (mode === 'full') {
return content;
}
// Process based on file type
if (filePath.endsWith('.ts') || filePath.endsWith('.tsx')) {
return this.trimTypeScriptFile(content);
} else if (filePath.endsWith('.md')) {
return this.trimMarkdownFile(content);
} else if (filePath.endsWith('.json')) {
return this.trimJsonFile(content);
}
// Default to returning the original content for unknown file types
return content;
}
/**
* Trim a TypeScript file to reduce token usage
* @param content The TypeScript file contents
* @returns The trimmed file contents
*/
private trimTypeScriptFile(content: string): string {
let result = content;
// Step 1: Preserve JSDoc comments if configured
const jsDocComments: string[] = [];
if (this.config.preserveJSDoc) {
const jsDocRegex = /\/\*\*[\s\S]*?\*\//g;
const matches = result.match(jsDocRegex) || [];
jsDocComments.push(...matches);
}
// Step 2: Remove comments if configured
if (this.config.removeComments) {
// Remove single-line comments
result = result.replace(/\/\/.*$/gm, '');
// Remove multi-line comments (except JSDoc if preserveJSDoc is true)
if (!this.config.preserveJSDoc) {
result = result.replace(/\/\*[\s\S]*?\*\//g, '');
} else {
// Only remove non-JSDoc comments
result = result.replace(/\/\*(?!\*)[\s\S]*?\*\//g, '');
}
}
// Step 3: Remove function implementations if configured
if (this.config.removeImplementations) {
// Match function and method bodies
result = result.replace(
/(\b(function|constructor|async function)\s+[\w$]*\s*\([^)]*\)\s*{)([\s\S]*?)(})/g,
(match, start, funcType, body, end) => {
// Keep function signature and opening brace, replace body with comment
return `${start} /* implementation removed */ ${end}`;
}
);
// Match arrow function bodies
result = result.replace(
/(\([^)]*\)\s*=>\s*{)([\s\S]*?)(})/g,
(match, start, body, end) => {
return `${start} /* implementation removed */ ${end}`;
}
);
// Match method declarations
result = result.replace(
/(^\s*[\w$]*\s*\([^)]*\)\s*{)([\s\S]*?)(})/gm,
(match, start, body, end) => {
return `${start} /* implementation removed */ ${end}`;
}
);
// Match class methods
result = result.replace(
/(\b(public|private|protected|static|async)?\s+[\w$]+\s*\([^)]*\)\s*{)([\s\S]*?)(})/g,
(match, start, modifier, body, end) => {
return `${start} /* implementation removed */ ${end}`;
}
);
} else if (this.config.maxFunctionLines && this.config.maxFunctionLines > 0) {
// If not removing implementations completely, limit the number of lines
// Match function and method bodies
result = result.replace(
/(\b(function|constructor|async function)\s+[\w$]*\s*\([^)]*\)\s*{)([\s\S]*?)(})/g,
(match, start, funcType, body, end) => {
return this.limitFunctionBody(start, body, end);
}
);
// Match arrow function bodies
result = result.replace(
/(\([^)]*\)\s*=>\s*{)([\s\S]*?)(})/g,
(match, start, body, end) => {
return this.limitFunctionBody(start, body, end);
}
);
// Match method declarations
result = result.replace(
/(^\s*[\w$]*\s*\([^)]*\)\s*{)([\s\S]*?)(})/gm,
(match, start, body, end) => {
return this.limitFunctionBody(start, body, end);
}
);
// Match class methods
result = result.replace(
/(\b(public|private|protected|static|async)?\s+[\w$]+\s*\([^)]*\)\s*{)([\s\S]*?)(})/g,
(match, start, modifier, body, end) => {
return this.limitFunctionBody(start, body, end);
}
);
}
// Step 4: Remove blank lines if configured
if (this.config.removeBlankLines) {
result = result.replace(/^\s*[\r\n]/gm, '');
}
// Step 5: Restore preserved JSDoc comments
if (this.config.preserveJSDoc && jsDocComments.length > 0) {
// This is a placeholder; we already preserved JSDoc comments in the regex steps
}
return result;
}
/**
* Limit a function body to a maximum number of lines
* @param start The function signature and opening brace
* @param body The function body
* @param end The closing brace
* @returns The limited function body
*/
private limitFunctionBody(start: string, body: string, end: string): string {
const lines = body.split('\n');
if (lines.length > this.config.maxFunctionLines!) {
const limitedBody = lines.slice(0, this.config.maxFunctionLines!).join('\n');
return `${start}${limitedBody}\n // ... (${lines.length - this.config.maxFunctionLines!} lines trimmed)\n${end}`;
}
return `${start}${body}${end}`;
}
/**
* Trim a Markdown file to reduce token usage
* @param content The Markdown file contents
* @returns The trimmed file contents
*/
private trimMarkdownFile(content: string): string {
// For markdown files, we generally want to keep most content
// but we can remove lengthy code blocks if needed
return content;
}
/**
* Trim a JSON file to reduce token usage
* @param content The JSON file contents
* @returns The trimmed file contents
*/
private trimJsonFile(content: string): string {
try {
// Parse the JSON
const json = JSON.parse(content);
// For package.json, keep only essential information
if ('name' in json && 'version' in json && 'dependencies' in json) {
const essentialKeys = [
'name', 'version', 'description', 'author', 'license',
'main', 'types', 'exports', 'type'
];
const trimmedJson: any = {};
essentialKeys.forEach(key => {
if (key in json) {
trimmedJson[key] = json[key];
}
});
// Add dependency information without versions
if ('dependencies' in json) {
trimmedJson.dependencies = Object.keys(json.dependencies).reduce((acc, dep) => {
acc[dep] = '*'; // Replace version with wildcard
return acc;
}, {} as Record<string, string>);
}
// Return the trimmed JSON
return JSON.stringify(trimmedJson, null, 2);
}
// For other JSON files, leave as is
return content;
} catch (error) {
// If there's an error parsing the JSON, return the original content
return content;
}
}
/**
* Update the trimmer configuration
* @param config The new configuration to apply
*/
public updateConfig(config: ITrimConfig): void {
this.config = {
...this.config,
...config
};
}
/**
* Trim a file based on its importance tier
* @param filePath The path to the file
* @param content The file's contents
* @param level The trimming level to apply ('none', 'light', 'aggressive')
* @returns The trimmed file contents
*/
public trimFileWithLevel(
filePath: string,
content: string,
level: 'none' | 'light' | 'aggressive'
): string {
// No trimming for essential files
if (level === 'none') {
return content;
}
// Create a temporary config based on level
const originalConfig = { ...this.config };
try {
if (level === 'light') {
// Light trimming: preserve signatures, remove only complex implementations
this.config = {
...this.config,
removeImplementations: false,
preserveInterfaces: true,
preserveTypeDefs: true,
preserveJSDoc: true,
maxFunctionLines: 10,
removeComments: false,
removeBlankLines: true
};
} else if (level === 'aggressive') {
// Aggressive trimming: remove all implementations, keep only signatures
this.config = {
...this.config,
removeImplementations: true,
preserveInterfaces: true,
preserveTypeDefs: true,
preserveJSDoc: true,
maxFunctionLines: 3,
removeComments: true,
removeBlankLines: true
};
}
// Process based on file type
let result = content;
if (filePath.endsWith('.ts') || filePath.endsWith('.tsx')) {
result = this.trimTypeScriptFile(content);
} else if (filePath.endsWith('.md')) {
result = this.trimMarkdownFile(content);
} else if (filePath.endsWith('.json')) {
result = this.trimJsonFile(content);
}
return result;
} finally {
// Restore original config
this.config = originalConfig;
}
}
}

View File

@@ -1,332 +0,0 @@
import * as plugins from '../plugins.js';
import type { ContextMode, IContextResult, IFileInfo, TaskType, IFileMetadata } from './types.js';
import { ContextTrimmer } from './context-trimmer.js';
import { ConfigManager } from './config-manager.js';
import { LazyFileLoader } from './lazy-file-loader.js';
import { ContextCache } from './context-cache.js';
import { ContextAnalyzer } from './context-analyzer.js';
/**
* Enhanced ProjectContext that supports context optimization strategies
*/
export class EnhancedContext {
private projectDir: string;
private trimmer: ContextTrimmer;
private configManager: ConfigManager;
private lazyLoader: LazyFileLoader;
private cache: ContextCache;
private analyzer: ContextAnalyzer;
private contextMode: ContextMode = 'trimmed';
private tokenBudget: number = 190000; // Default for o4-mini
private contextResult: IContextResult = {
context: '',
tokenCount: 0,
includedFiles: [],
trimmedFiles: [],
excludedFiles: [],
tokenSavings: 0
};
/**
* Create a new EnhancedContext
* @param projectDirArg The project directory
*/
constructor(projectDirArg: string) {
this.projectDir = projectDirArg;
this.configManager = ConfigManager.getInstance();
this.trimmer = new ContextTrimmer(this.configManager.getTrimConfig());
this.lazyLoader = new LazyFileLoader(projectDirArg);
this.cache = new ContextCache(projectDirArg, this.configManager.getCacheConfig());
this.analyzer = new ContextAnalyzer(
projectDirArg,
this.configManager.getPrioritizationWeights(),
this.configManager.getTierConfig()
);
}
/**
* Initialize the context builder
*/
public async initialize(): Promise<void> {
await this.configManager.initialize(this.projectDir);
this.tokenBudget = this.configManager.getMaxTokens();
this.trimmer.updateConfig(this.configManager.getTrimConfig());
await this.cache.init();
}
/**
* Set the context mode
* @param mode The context mode to use
*/
public setContextMode(mode: ContextMode): void {
this.contextMode = mode;
}
/**
* Set the token budget
* @param maxTokens The maximum tokens to use
*/
public setTokenBudget(maxTokens: number): void {
this.tokenBudget = maxTokens;
}
/**
* Convert files to context with smart analysis and prioritization
* @param metadata - File metadata to analyze
* @param taskType - Task type for context-aware prioritization
* @param mode - Context mode to use
* @returns Context string
*/
public async convertFilesToContextWithAnalysis(
metadata: IFileMetadata[],
taskType: TaskType,
mode: ContextMode = this.contextMode
): Promise<string> {
// Reset context result
this.contextResult = {
context: '',
tokenCount: 0,
includedFiles: [],
trimmedFiles: [],
excludedFiles: [],
tokenSavings: 0
};
// Analyze files for smart prioritization
const analysis = await this.analyzer.analyze(metadata, taskType, []);
// Sort files by importance score (highest first)
const sortedAnalysis = [...analysis.files].sort(
(a, b) => b.importanceScore - a.importanceScore
);
// Filter out excluded tier
const relevantFiles = sortedAnalysis.filter(f => f.tier !== 'excluded');
let totalTokenCount = 0;
let totalOriginalTokens = 0;
const processedFiles: string[] = [];
// Load files with cache support
for (const fileAnalysis of relevantFiles) {
try {
// Check cache first
let contents: string;
let originalTokenCount: number;
const cached = await this.cache.get(fileAnalysis.path);
if (cached) {
contents = cached.contents;
originalTokenCount = cached.tokenCount;
} else {
// Load file
const fileData = await plugins.fsInstance.file(fileAnalysis.path).encoding('utf8').read() as string;
contents = fileData;
originalTokenCount = this.countTokens(contents);
// Cache it
await this.cache.set({
path: fileAnalysis.path,
contents,
tokenCount: originalTokenCount,
mtime: Date.now(),
cachedAt: Date.now()
});
}
totalOriginalTokens += originalTokenCount;
// Apply tier-based trimming
let processedContent = contents;
let trimLevel: 'none' | 'light' | 'aggressive' = 'light';
if (fileAnalysis.tier === 'essential') {
trimLevel = 'none';
} else if (fileAnalysis.tier === 'important') {
trimLevel = 'light';
} else if (fileAnalysis.tier === 'optional') {
trimLevel = 'aggressive';
}
// Apply trimming based on mode and tier
if (mode !== 'full' && trimLevel !== 'none') {
const relativePath = plugins.path.relative(this.projectDir, fileAnalysis.path);
processedContent = this.trimmer.trimFileWithLevel(
relativePath,
contents,
trimLevel
);
}
// Calculate token count
const processedTokenCount = this.countTokens(processedContent);
// Check token budget
if (totalTokenCount + processedTokenCount > this.tokenBudget) {
// We don't have budget for this file
const relativePath = plugins.path.relative(this.projectDir, fileAnalysis.path);
this.contextResult.excludedFiles.push({
path: fileAnalysis.path,
contents,
relativePath,
tokenCount: originalTokenCount,
importanceScore: fileAnalysis.importanceScore
});
continue;
}
// Format the file for context
const relativePath = plugins.path.relative(this.projectDir, fileAnalysis.path);
const formattedContent = `
====== START OF FILE ${relativePath} ======
${processedContent}
====== END OF FILE ${relativePath} ======
`;
processedFiles.push(formattedContent);
totalTokenCount += processedTokenCount;
// Track file in appropriate list
const fileInfo: IFileInfo = {
path: fileAnalysis.path,
contents: processedContent,
relativePath,
tokenCount: processedTokenCount,
importanceScore: fileAnalysis.importanceScore
};
if (trimLevel === 'none' || processedContent === contents) {
this.contextResult.includedFiles.push(fileInfo);
} else {
this.contextResult.trimmedFiles.push(fileInfo);
this.contextResult.tokenSavings += (originalTokenCount - processedTokenCount);
}
} catch (error) {
console.warn(`Failed to process file ${fileAnalysis.path}:`, error.message);
}
}
// Join all processed files
const context = processedFiles.join('\n');
// Update context result
this.contextResult.context = context;
this.contextResult.tokenCount = totalTokenCount;
return context;
}
/**
* Build context for the project using smart analysis
* @param taskType Task type for context-aware prioritization (defaults to 'description')
*/
public async buildContext(taskType?: TaskType): Promise<IContextResult> {
// Initialize if needed
if (this.tokenBudget === 0) {
await this.initialize();
}
// Smart context building always requires a task type for optimal prioritization
// Default to 'description' if not provided
const effectiveTaskType = taskType || 'description';
// Get task-specific configuration
const taskConfig = this.configManager.getTaskConfig(effectiveTaskType);
if (taskConfig.mode) {
this.setContextMode(taskConfig.mode);
}
// Build globs for scanning
const includeGlobs = taskConfig?.includePaths?.map(p => `${p}/**/*.ts`) || [
'ts/**/*.ts',
'ts*/**/*.ts'
];
// Add config files
const configGlobs = [
'package.json',
'readme.md',
'readme.hints.md',
'npmextra.json'
];
// Scan files for metadata (fast, doesn't load contents)
const metadata = await this.lazyLoader.scanFiles([...configGlobs, ...includeGlobs]);
// Use smart analyzer to build context with intelligent prioritization
await this.convertFilesToContextWithAnalysis(metadata, effectiveTaskType, this.contextMode);
return this.contextResult;
}
/**
* Update the context with git diff information for commit tasks
* @param gitDiff The git diff to include
*/
public updateWithGitDiff(gitDiff: string): IContextResult {
// If we don't have a context yet, return empty result
if (!this.contextResult.context) {
return this.contextResult;
}
// Add git diff to context
const diffSection = `
====== GIT DIFF ======
${gitDiff}
====== END GIT DIFF ======
`;
const diffTokenCount = this.countTokens(diffSection);
// Update context and token count
this.contextResult.context += diffSection;
this.contextResult.tokenCount += diffTokenCount;
return this.contextResult;
}
/**
* Count tokens in a string
* @param text The text to count tokens for
* @param model The model to use for token counting
*/
public countTokens(text: string, model: string = 'gpt-3.5-turbo'): number {
try {
// Use the gpt-tokenizer library to count tokens
const tokens = plugins.gptTokenizer.encode(text);
return tokens.length;
} catch (error) {
console.error('Error counting tokens:', error);
// Provide a rough estimate if tokenization fails
return Math.ceil(text.length / 4);
}
}
/**
* Get the context result
*/
public getContextResult(): IContextResult {
return this.contextResult;
}
/**
* Get the token count for the current context
*/
public getTokenCount(): number {
return this.contextResult.tokenCount;
}
/**
* Get both the context string and its token count
*/
public getContextWithTokenCount(): { context: string; tokenCount: number } {
return {
context: this.contextResult.context,
tokenCount: this.contextResult.tokenCount
};
}
}

View File

@@ -1,70 +0,0 @@
import { EnhancedContext } from './enhanced-context.js';
import { TaskContextFactory } from './task-context-factory.js';
import { ConfigManager } from './config-manager.js';
import { ContextTrimmer } from './context-trimmer.js';
import { LazyFileLoader } from './lazy-file-loader.js';
import { ContextCache } from './context-cache.js';
import { ContextAnalyzer } from './context-analyzer.js';
import { DiffProcessor } from './diff-processor.js';
import type {
ContextMode,
IContextConfig,
IContextResult,
IFileInfo,
ITrimConfig,
ITaskConfig,
TaskType,
ICacheConfig,
IAnalyzerConfig,
IPrioritizationWeights,
ITierConfig,
ITierSettings,
IFileMetadata,
ICacheEntry,
IFileDependencies,
IFileAnalysis,
IAnalysisResult,
IIterativeConfig,
IIterativeContextResult,
IDiffFileInfo,
IProcessedDiff,
IDiffProcessorOptions
} from './types.js';
export {
// Classes
EnhancedContext,
TaskContextFactory,
ConfigManager,
ContextTrimmer,
LazyFileLoader,
ContextCache,
ContextAnalyzer,
DiffProcessor,
};
// Types
export type {
ContextMode,
IContextConfig,
IContextResult,
IFileInfo,
ITrimConfig,
ITaskConfig,
TaskType,
ICacheConfig,
IAnalyzerConfig,
IPrioritizationWeights,
ITierConfig,
ITierSettings,
IFileMetadata,
ICacheEntry,
IFileDependencies,
IFileAnalysis,
IAnalysisResult,
IIterativeConfig,
IIterativeContextResult,
IDiffFileInfo,
IProcessedDiff,
IDiffProcessorOptions
};

View File

@@ -1,512 +0,0 @@
import * as plugins from '../plugins.js';
import * as fs from 'fs';
import { logger } from '../logging.js';
import type {
TaskType,
IFileMetadata,
IFileInfo,
IIterativeContextResult,
IIterationState,
IFileSelectionDecision,
IContextSufficiencyDecision,
IIterativeConfig,
} from './types.js';
import { LazyFileLoader } from './lazy-file-loader.js';
import { ContextCache } from './context-cache.js';
import { ContextAnalyzer } from './context-analyzer.js';
import { ConfigManager } from './config-manager.js';
/**
* Iterative context builder that uses AI to intelligently select files
* across multiple iterations until sufficient context is gathered
*/
export class IterativeContextBuilder {
private projectRoot: string;
private lazyLoader: LazyFileLoader;
private cache: ContextCache;
private analyzer: ContextAnalyzer;
private config: Required<IIterativeConfig>;
private tokenBudget: number = 190000;
private openaiInstance: plugins.smartai.OpenAiProvider;
private externalOpenaiInstance?: plugins.smartai.OpenAiProvider;
/**
* Creates a new IterativeContextBuilder
* @param projectRoot - Root directory of the project
* @param config - Iterative configuration
* @param openaiInstance - Optional pre-configured OpenAI provider instance
*/
constructor(
projectRoot: string,
config?: Partial<IIterativeConfig>,
openaiInstance?: plugins.smartai.OpenAiProvider
) {
this.projectRoot = projectRoot;
this.lazyLoader = new LazyFileLoader(projectRoot);
this.cache = new ContextCache(projectRoot);
this.analyzer = new ContextAnalyzer(projectRoot);
this.externalOpenaiInstance = openaiInstance;
// Default configuration
this.config = {
maxIterations: config?.maxIterations ?? 5,
firstPassFileLimit: config?.firstPassFileLimit ?? 10,
subsequentPassFileLimit: config?.subsequentPassFileLimit ?? 5,
temperature: config?.temperature ?? 0.3,
model: config?.model ?? 'gpt-4-turbo-preview',
};
}
/**
* Initialize the builder
*/
public async initialize(): Promise<void> {
await this.cache.init();
const configManager = ConfigManager.getInstance();
await configManager.initialize(this.projectRoot);
this.tokenBudget = configManager.getMaxTokens();
// Use external OpenAI instance if provided, otherwise create a new one
if (this.externalOpenaiInstance) {
this.openaiInstance = this.externalOpenaiInstance;
} else {
// Initialize OpenAI instance from environment
const qenvInstance = new plugins.qenv.Qenv();
const openaiToken = await qenvInstance.getEnvVarOnDemand('OPENAI_TOKEN');
if (!openaiToken) {
throw new Error('OPENAI_TOKEN environment variable is required for iterative context building');
}
this.openaiInstance = new plugins.smartai.OpenAiProvider({
openaiToken,
});
await this.openaiInstance.start();
}
}
/**
* Build context iteratively using AI decision making
* @param taskType - Type of task being performed
* @param additionalContext - Optional additional context (e.g., git diff for commit tasks)
* @returns Complete iterative context result
*/
public async buildContextIteratively(taskType: TaskType, additionalContext?: string): Promise<IIterativeContextResult> {
const startTime = Date.now();
logger.log('info', '🤖 Starting iterative context building...');
logger.log('info', ` Task: ${taskType}, Budget: ${this.tokenBudget} tokens, Max iterations: ${this.config.maxIterations}`);
// Phase 1: Scan project files for metadata
logger.log('info', '📋 Scanning project files...');
const metadata = await this.scanProjectFiles(taskType);
const totalEstimatedTokens = metadata.reduce((sum, m) => sum + m.estimatedTokens, 0);
logger.log('info', ` Found ${metadata.length} files (~${totalEstimatedTokens} estimated tokens)`);
// Phase 2: Analyze files for initial prioritization
logger.log('info', '🔍 Analyzing file dependencies and importance...');
const analysis = await this.analyzer.analyze(metadata, taskType, []);
logger.log('info', ` Analysis complete in ${analysis.analysisDuration}ms`);
// Track state across iterations
const iterations: IIterationState[] = [];
let totalTokensUsed = 0;
let apiCallCount = 0;
let loadedContent = '';
const includedFiles: IFileInfo[] = [];
// If additional context (e.g., git diff) is provided, prepend it
if (additionalContext) {
// NOTE: additionalContext is expected to be pre-processed by DiffProcessor
// which intelligently samples large diffs to stay within token budget (100k default)
const MAX_DIFF_TOKENS = 200000; // Safety net for edge cases (DiffProcessor uses 100k budget)
const diffSection = `
====== GIT DIFF ======
${additionalContext}
====== END OF GIT DIFF ======
`;
// Validate token count (should already be under budget from DiffProcessor)
const diffTokens = this.countTokens(diffSection);
if (diffTokens > MAX_DIFF_TOKENS) {
logger.log('error', `❌ Pre-processed git diff exceeds safety limit (${diffTokens.toLocaleString()} tokens > ${MAX_DIFF_TOKENS.toLocaleString()} limit)`);
logger.log('error', ` This should not happen - DiffProcessor should have limited to ~100k tokens.`);
logger.log('error', ` Please check DiffProcessor configuration and output.`);
throw new Error(
`Pre-processed git diff size (${diffTokens.toLocaleString()} tokens) exceeds safety limit (${MAX_DIFF_TOKENS.toLocaleString()} tokens). ` +
`This indicates a bug in DiffProcessor or misconfiguration.`
);
}
loadedContent = diffSection;
totalTokensUsed += diffTokens;
logger.log('info', `📝 Added pre-processed git diff to context (${diffTokens.toLocaleString()} tokens)`);
}
// Phase 3: Iterative file selection and loading
for (let iteration = 1; iteration <= this.config.maxIterations; iteration++) {
const iterationStart = Date.now();
logger.log('info', `\n🤔 Iteration ${iteration}/${this.config.maxIterations}: Asking AI which files to examine...`);
const remainingBudget = this.tokenBudget - totalTokensUsed;
logger.log('info', ` Token budget remaining: ${remainingBudget}/${this.tokenBudget} (${Math.round((remainingBudget / this.tokenBudget) * 100)}%)`);
// Get AI decision on which files to load
const decision = await this.getFileSelectionDecision(
metadata,
analysis.files.slice(0, 30), // Top 30 files by importance
taskType,
iteration,
totalTokensUsed,
remainingBudget,
loadedContent
);
apiCallCount++;
logger.log('info', ` AI reasoning: ${decision.reasoning}`);
logger.log('info', ` AI requested ${decision.filesToLoad.length} files`);
// Load requested files
const iterationFiles: IFileInfo[] = [];
let iterationTokens = 0;
if (decision.filesToLoad.length > 0) {
logger.log('info', '📥 Loading requested files...');
for (const filePath of decision.filesToLoad) {
try {
const fileInfo = await this.loadFile(filePath);
if (totalTokensUsed + fileInfo.tokenCount! <= this.tokenBudget) {
const formattedFile = this.formatFileForContext(fileInfo);
loadedContent += formattedFile;
includedFiles.push(fileInfo);
iterationFiles.push(fileInfo);
iterationTokens += fileInfo.tokenCount!;
totalTokensUsed += fileInfo.tokenCount!;
logger.log('info', `${fileInfo.relativePath} (${fileInfo.tokenCount} tokens)`);
} else {
logger.log('warn', `${fileInfo.relativePath} - would exceed budget, skipping`);
}
} catch (error) {
logger.log('warn', ` ✗ Failed to load ${filePath}: ${error.message}`);
}
}
}
// Record iteration state
const iterationDuration = Date.now() - iterationStart;
iterations.push({
iteration,
filesLoaded: iterationFiles,
tokensUsed: iterationTokens,
totalTokensUsed,
decision,
duration: iterationDuration,
});
logger.log('info', ` Iteration ${iteration} complete: ${iterationFiles.length} files loaded, ${iterationTokens} tokens used`);
// Check if we should continue
if (totalTokensUsed >= this.tokenBudget * 0.95) {
logger.log('warn', '⚠️ Approaching token budget limit, stopping iterations');
break;
}
// Ask AI if context is sufficient
if (iteration < this.config.maxIterations) {
logger.log('info', '🤔 Asking AI if context is sufficient...');
const sufficiencyDecision = await this.evaluateContextSufficiency(
loadedContent,
taskType,
iteration,
totalTokensUsed,
remainingBudget - iterationTokens
);
apiCallCount++;
logger.log('info', ` AI decision: ${sufficiencyDecision.sufficient ? '✅ SUFFICIENT' : '⏭️ NEEDS MORE'}`);
logger.log('info', ` Reasoning: ${sufficiencyDecision.reasoning}`);
if (sufficiencyDecision.sufficient) {
logger.log('ok', '✅ Context building complete - AI determined context is sufficient');
break;
}
}
}
const totalDuration = Date.now() - startTime;
logger.log('ok', `\n✅ Iterative context building complete!`);
logger.log('info', ` Files included: ${includedFiles.length}`);
logger.log('info', ` Token usage: ${totalTokensUsed}/${this.tokenBudget} (${Math.round((totalTokensUsed / this.tokenBudget) * 100)}%)`);
logger.log('info', ` Iterations: ${iterations.length}, API calls: ${apiCallCount}`);
logger.log('info', ` Total duration: ${(totalDuration / 1000).toFixed(2)}s`);
return {
context: loadedContent,
tokenCount: totalTokensUsed,
includedFiles,
trimmedFiles: [],
excludedFiles: [],
tokenSavings: 0,
iterationCount: iterations.length,
iterations,
apiCallCount,
totalDuration,
};
}
/**
* Scan project files based on task type
*/
private async scanProjectFiles(taskType: TaskType): Promise<IFileMetadata[]> {
const configManager = ConfigManager.getInstance();
const taskConfig = configManager.getTaskConfig(taskType);
const includeGlobs = taskConfig?.includePaths?.map(p => `${p}/**/*.ts`) || [
'ts/**/*.ts',
'ts*/**/*.ts'
];
const configGlobs = [
'package.json',
'readme.md',
'readme.hints.md',
'npmextra.json'
];
return await this.lazyLoader.scanFiles([...configGlobs, ...includeGlobs]);
}
/**
* Get AI decision on which files to load
*/
private async getFileSelectionDecision(
allMetadata: IFileMetadata[],
analyzedFiles: any[],
taskType: TaskType,
iteration: number,
tokensUsed: number,
remainingBudget: number,
loadedContent: string
): Promise<IFileSelectionDecision> {
const isFirstIteration = iteration === 1;
const fileLimit = isFirstIteration
? this.config.firstPassFileLimit
: this.config.subsequentPassFileLimit;
const systemPrompt = this.buildFileSelectionPrompt(
allMetadata,
analyzedFiles,
taskType,
iteration,
tokensUsed,
remainingBudget,
loadedContent,
fileLimit
);
const response = await this.openaiInstance.chat({
systemMessage: `You are an AI assistant that helps select the most relevant files for code analysis.
You must respond ONLY with valid JSON that can be parsed with JSON.parse().
Do not wrap the JSON in markdown code blocks or add any other text.`,
userMessage: systemPrompt,
messageHistory: [],
});
// Parse JSON response, handling potential markdown formatting
const content = response.message.replace('```json', '').replace('```', '').trim();
const parsed = JSON.parse(content);
return {
reasoning: parsed.reasoning || 'No reasoning provided',
filesToLoad: parsed.files_to_load || [],
estimatedTokensNeeded: parsed.estimated_tokens_needed,
};
}
/**
* Build prompt for file selection
*/
private buildFileSelectionPrompt(
metadata: IFileMetadata[],
analyzedFiles: any[],
taskType: TaskType,
iteration: number,
tokensUsed: number,
remainingBudget: number,
loadedContent: string,
fileLimit: number
): string {
const taskDescriptions = {
readme: 'generating a comprehensive README that explains the project\'s purpose, features, and API',
commit: 'analyzing code changes to generate an intelligent commit message',
description: 'generating a concise project description for package.json',
};
const alreadyLoadedFiles = loadedContent
? loadedContent.split('\n======').slice(1).map(section => {
const match = section.match(/START OF FILE (.+?) ======/);
return match ? match[1] : '';
}).filter(Boolean)
: [];
const availableFiles = metadata
.filter(m => !alreadyLoadedFiles.includes(m.relativePath))
.map(m => {
const analysis = analyzedFiles.find(a => a.path === m.path);
return `- ${m.relativePath} (${m.size} bytes, ~${m.estimatedTokens} tokens${analysis ? `, importance: ${analysis.importanceScore.toFixed(2)}` : ''})`;
})
.join('\n');
return `You are building context for ${taskDescriptions[taskType]} in a TypeScript project.
ITERATION: ${iteration}
TOKENS USED: ${tokensUsed}/${tokensUsed + remainingBudget} (${Math.round((tokensUsed / (tokensUsed + remainingBudget)) * 100)}%)
REMAINING BUDGET: ${remainingBudget} tokens
${alreadyLoadedFiles.length > 0 ? `FILES ALREADY LOADED:\n${alreadyLoadedFiles.map(f => `- ${f}`).join('\n')}\n\n` : ''}AVAILABLE FILES (not yet loaded):
${availableFiles}
Your task: Select up to ${fileLimit} files that will give you the MOST understanding for this ${taskType} task.
${iteration === 1 ? `This is the FIRST iteration. Focus on:
- Main entry points (index.ts, main exports)
- Core classes and interfaces
- Package configuration
` : `This is iteration ${iteration}. You've already seen some files. Now focus on:
- Files that complement what you've already loaded
- Dependencies of already-loaded files
- Missing pieces for complete understanding
`}
Consider:
1. File importance scores (if provided)
2. File paths (ts/index.ts is likely more important than ts/internal/utils.ts)
3. Token efficiency (prefer smaller files if they provide good information)
4. Remaining budget (${remainingBudget} tokens)
Respond in JSON format:
{
"reasoning": "Brief explanation of why you're selecting these files",
"files_to_load": ["path/to/file1.ts", "path/to/file2.ts"],
"estimated_tokens_needed": 15000
}`;
}
/**
* Evaluate if current context is sufficient
*/
private async evaluateContextSufficiency(
loadedContent: string,
taskType: TaskType,
iteration: number,
tokensUsed: number,
remainingBudget: number
): Promise<IContextSufficiencyDecision> {
const prompt = `You have been building context for a ${taskType} task across ${iteration} iterations.
CURRENT STATE:
- Tokens used: ${tokensUsed}
- Remaining budget: ${remainingBudget}
- Files loaded: ${loadedContent.split('\n======').length - 1}
CONTEXT SO FAR:
${loadedContent.substring(0, 3000)}... (truncated for brevity)
Question: Do you have SUFFICIENT context to successfully complete the ${taskType} task?
Consider:
- For README: Do you understand the project's purpose, main features, API surface, and usage patterns?
- For commit: Do you understand what changed and why?
- For description: Do you understand the project's core value proposition?
Respond in JSON format:
{
"sufficient": true or false,
"reasoning": "Detailed explanation of your decision"
}`;
const response = await this.openaiInstance.chat({
systemMessage: `You are an AI assistant that evaluates whether gathered context is sufficient for a task.
You must respond ONLY with valid JSON that can be parsed with JSON.parse().
Do not wrap the JSON in markdown code blocks or add any other text.`,
userMessage: prompt,
messageHistory: [],
});
// Parse JSON response, handling potential markdown formatting
const content = response.message.replace('```json', '').replace('```', '').trim();
const parsed = JSON.parse(content);
return {
sufficient: parsed.sufficient || false,
reasoning: parsed.reasoning || 'No reasoning provided',
};
}
/**
* Load a single file with caching
*/
private async loadFile(filePath: string): Promise<IFileInfo> {
// Try cache first
const cached = await this.cache.get(filePath);
if (cached) {
return {
path: filePath,
relativePath: plugins.path.relative(this.projectRoot, filePath),
contents: cached.contents,
tokenCount: cached.tokenCount,
};
}
// Load from disk
const contents = await plugins.fsInstance.file(filePath).encoding('utf8').read() as string;
const tokenCount = this.countTokens(contents);
const relativePath = plugins.path.relative(this.projectRoot, filePath);
// Cache it
const stats = await fs.promises.stat(filePath);
await this.cache.set({
path: filePath,
contents,
tokenCount,
mtime: Math.floor(stats.mtimeMs),
cachedAt: Date.now(),
});
return {
path: filePath,
relativePath,
contents,
tokenCount,
};
}
/**
* Format a file for inclusion in context
*/
private formatFileForContext(file: IFileInfo): string {
return `
====== START OF FILE ${file.relativePath} ======
${file.contents}
====== END OF FILE ${file.relativePath} ======
`;
}
/**
* Count tokens in text
*/
private countTokens(text: string): number {
try {
const tokens = plugins.gptTokenizer.encode(text);
return tokens.length;
} catch (error) {
return Math.ceil(text.length / 4);
}
}
}

View File

@@ -1,207 +0,0 @@
import * as plugins from '../plugins.js';
import * as fs from 'fs';
import type { IFileMetadata, IFileInfo } from './types.js';
/**
* LazyFileLoader handles efficient file loading by:
* - Scanning files for metadata without loading contents
* - Providing fast file size and token estimates
* - Loading contents only when requested
* - Parallel loading of selected files
*/
export class LazyFileLoader {
private projectRoot: string;
private metadataCache: Map<string, IFileMetadata> = new Map();
/**
* Creates a new LazyFileLoader
* @param projectRoot - Root directory of the project
*/
constructor(projectRoot: string) {
this.projectRoot = projectRoot;
}
/**
* Scans files in given globs and creates metadata without loading contents
* @param globs - File patterns to scan (e.g., ['ts/**\/*.ts', 'test/**\/*.ts'])
* @returns Array of file metadata
*/
public async scanFiles(globs: string[]): Promise<IFileMetadata[]> {
const metadata: IFileMetadata[] = [];
for (const globPattern of globs) {
try {
const virtualDir = await plugins.smartfileFactory.virtualDirectoryFromPath(this.projectRoot);
// Filter files based on glob pattern using simple pattern matching
const smartFiles = virtualDir.filter(file => {
// Simple glob matching
const relativePath = file.relative;
if (globPattern.includes('**')) {
// Handle ** patterns - match any path
const pattern = globPattern.replace(/\*\*/g, '.*').replace(/\*/g, '[^/]*');
return new RegExp(`^${pattern}$`).test(relativePath);
} else if (globPattern.includes('*')) {
// Handle single * patterns
const pattern = globPattern.replace(/\*/g, '[^/]*');
return new RegExp(`^${pattern}$`).test(relativePath);
} else {
// Exact match
return relativePath === globPattern;
}
}).listFiles();
for (const smartFile of smartFiles) {
try {
const meta = await this.getMetadata(smartFile.absolutePath);
metadata.push(meta);
} catch (error) {
// Skip files that can't be read
console.warn(`Failed to get metadata for ${smartFile.absolutePath}:`, error.message);
}
}
} catch (error) {
// Skip patterns that don't match any files
console.warn(`No files found for pattern ${globPattern}`);
}
}
return metadata;
}
/**
* Gets metadata for a single file without loading contents
* @param filePath - Absolute path to the file
* @returns File metadata
*/
public async getMetadata(filePath: string): Promise<IFileMetadata> {
// Check cache first
if (this.metadataCache.has(filePath)) {
const cached = this.metadataCache.get(filePath)!;
const currentStats = await fs.promises.stat(filePath);
// Return cached if file hasn't changed
if (cached.mtime === Math.floor(currentStats.mtimeMs)) {
return cached;
}
}
// Get file stats
const stats = await fs.promises.stat(filePath);
const relativePath = plugins.path.relative(this.projectRoot, filePath);
// Estimate tokens: rough estimate of ~4 characters per token
// This is faster than reading and tokenizing the entire file
const estimatedTokens = Math.ceil(stats.size / 4);
const metadata: IFileMetadata = {
path: filePath,
relativePath,
size: stats.size,
mtime: Math.floor(stats.mtimeMs),
estimatedTokens,
};
// Cache the metadata
this.metadataCache.set(filePath, metadata);
return metadata;
}
/**
* Loads file contents for selected files in parallel
* @param metadata - Array of file metadata to load
* @param tokenizer - Function to calculate accurate token count
* @returns Array of complete file info with contents
*/
public async loadFiles(
metadata: IFileMetadata[],
tokenizer: (content: string) => number
): Promise<IFileInfo[]> {
// Load files in parallel
const loadPromises = metadata.map(async (meta) => {
try {
const contents = await plugins.fsInstance.file(meta.path).encoding('utf8').read() as string;
const tokenCount = tokenizer(contents);
const fileInfo: IFileInfo = {
path: meta.path,
relativePath: meta.relativePath,
contents,
tokenCount,
importanceScore: meta.importanceScore,
};
return fileInfo;
} catch (error) {
console.warn(`Failed to load file ${meta.path}:`, error.message);
return null;
}
});
// Wait for all loads to complete and filter out failures
const results = await Promise.all(loadPromises);
return results.filter((r): r is IFileInfo => r !== null);
}
/**
* Loads a single file with contents
* @param filePath - Absolute path to the file
* @param tokenizer - Function to calculate accurate token count
* @returns Complete file info with contents
*/
public async loadFile(
filePath: string,
tokenizer: (content: string) => number
): Promise<IFileInfo> {
const meta = await this.getMetadata(filePath);
const contents = await plugins.fsInstance.file(filePath).encoding('utf8').read() as string;
const tokenCount = tokenizer(contents);
const relativePath = plugins.path.relative(this.projectRoot, filePath);
return {
path: filePath,
relativePath,
contents,
tokenCount,
importanceScore: meta.importanceScore,
};
}
/**
* Updates importance scores for metadata entries
* @param scores - Map of file paths to importance scores
*/
public updateImportanceScores(scores: Map<string, number>): void {
for (const [path, score] of scores) {
const meta = this.metadataCache.get(path);
if (meta) {
meta.importanceScore = score;
}
}
}
/**
* Clears the metadata cache
*/
public clearCache(): void {
this.metadataCache.clear();
}
/**
* Gets total estimated tokens for all cached metadata
*/
public getTotalEstimatedTokens(): number {
let total = 0;
for (const meta of this.metadataCache.values()) {
total += meta.estimatedTokens;
}
return total;
}
/**
* Gets cached metadata entries
*/
public getCachedMetadata(): IFileMetadata[] {
return Array.from(this.metadataCache.values());
}
}

View File

@@ -1,120 +0,0 @@
import * as plugins from '../plugins.js';
import { IterativeContextBuilder } from './iterative-context-builder.js';
import { ConfigManager } from './config-manager.js';
import type { IIterativeContextResult, TaskType } from './types.js';
/**
* Factory class for creating task-specific context using iterative context building
*/
export class TaskContextFactory {
private projectDir: string;
private configManager: ConfigManager;
private openaiInstance?: any; // OpenAI provider instance
/**
* Create a new TaskContextFactory
* @param projectDirArg The project directory
* @param openaiInstance Optional pre-configured OpenAI provider instance
*/
constructor(projectDirArg: string, openaiInstance?: any) {
this.projectDir = projectDirArg;
this.configManager = ConfigManager.getInstance();
this.openaiInstance = openaiInstance;
}
/**
* Initialize the factory
*/
public async initialize(): Promise<void> {
await this.configManager.initialize(this.projectDir);
}
/**
* Create context for README generation
*/
public async createContextForReadme(): Promise<IIterativeContextResult> {
const iterativeBuilder = new IterativeContextBuilder(
this.projectDir,
this.configManager.getIterativeConfig(),
this.openaiInstance
);
await iterativeBuilder.initialize();
return await iterativeBuilder.buildContextIteratively('readme');
}
/**
* Create context for description generation
*/
public async createContextForDescription(): Promise<IIterativeContextResult> {
const iterativeBuilder = new IterativeContextBuilder(
this.projectDir,
this.configManager.getIterativeConfig(),
this.openaiInstance
);
await iterativeBuilder.initialize();
return await iterativeBuilder.buildContextIteratively('description');
}
/**
* Create context for commit message generation
* @param gitDiff Optional git diff to include in the context
*/
public async createContextForCommit(gitDiff?: string): Promise<IIterativeContextResult> {
const iterativeBuilder = new IterativeContextBuilder(
this.projectDir,
this.configManager.getIterativeConfig(),
this.openaiInstance
);
await iterativeBuilder.initialize();
return await iterativeBuilder.buildContextIteratively('commit', gitDiff);
}
/**
* Create context for any task type
* @param taskType The task type to create context for
* @param additionalContent Optional additional content (currently not used)
*/
public async createContextForTask(
taskType: TaskType,
additionalContent?: string
): Promise<IIterativeContextResult> {
switch (taskType) {
case 'readme':
return this.createContextForReadme();
case 'description':
return this.createContextForDescription();
case 'commit':
return this.createContextForCommit(additionalContent);
default:
// Default to readme for unknown task types
return this.createContextForReadme();
}
}
/**
* Get token stats for all task types
*/
public async getTokenStats(): Promise<Record<TaskType, {
tokenCount: number;
savings: number;
includedFiles: number;
trimmedFiles: number;
excludedFiles: number;
}>> {
const taskTypes: TaskType[] = ['readme', 'description', 'commit'];
const stats: Record<TaskType, any> = {} as any;
for (const taskType of taskTypes) {
const result = await this.createContextForTask(taskType);
stats[taskType] = {
tokenCount: result.tokenCount,
savings: result.tokenSavings,
includedFiles: result.includedFiles.length,
trimmedFiles: result.trimmedFiles.length,
excludedFiles: result.excludedFiles.length
};
}
return stats;
}
}

View File

@@ -1,324 +0,0 @@
/**
* Context processing mode to control how context is built
*/
export type ContextMode = 'full' | 'trimmed' | 'summarized';
/**
* Configuration for context trimming
*/
export interface ITrimConfig {
/** Whether to remove function implementations */
removeImplementations?: boolean;
/** Whether to preserve interface definitions */
preserveInterfaces?: boolean;
/** Whether to preserve type definitions */
preserveTypeDefs?: boolean;
/** Whether to preserve JSDoc comments */
preserveJSDoc?: boolean;
/** Maximum lines to keep for function bodies (if not removing completely) */
maxFunctionLines?: number;
/** Whether to remove normal comments (non-JSDoc) */
removeComments?: boolean;
/** Whether to remove blank lines */
removeBlankLines?: boolean;
}
/**
* Task types that require different context optimization
*/
export type TaskType = 'readme' | 'commit' | 'description';
/**
* Configuration for different tasks
*/
export interface ITaskConfig {
/** The context mode to use for this task */
mode?: ContextMode;
/** File paths to include for this task */
includePaths?: string[];
/** File paths to exclude for this task */
excludePaths?: string[];
/** For commit tasks, whether to focus on changed files */
focusOnChangedFiles?: boolean;
/** For description tasks, whether to include package info */
includePackageInfo?: boolean;
}
/**
* Complete context configuration
*/
export interface IContextConfig {
/** Maximum tokens to use for context */
maxTokens?: number;
/** Default context mode */
defaultMode?: ContextMode;
/** Task-specific settings */
taskSpecificSettings?: {
[key in TaskType]?: ITaskConfig;
};
/** Trimming configuration */
trimming?: ITrimConfig;
/** Cache configuration */
cache?: ICacheConfig;
/** Analyzer configuration */
analyzer?: IAnalyzerConfig;
/** Prioritization weights */
prioritization?: IPrioritizationWeights;
/** Tier configuration for adaptive trimming */
tiers?: ITierConfig;
/** Iterative context building configuration */
iterative?: IIterativeConfig;
}
/**
* Cache configuration
*/
export interface ICacheConfig {
/** Whether caching is enabled */
enabled?: boolean;
/** Time-to-live in seconds */
ttl?: number;
/** Maximum cache size in MB */
maxSize?: number;
/** Cache directory path */
directory?: string;
}
/**
* Analyzer configuration
* Note: Smart analysis is always enabled; this config only controls advanced options
*/
export interface IAnalyzerConfig {
/** Whether to use AI refinement for selection (advanced, disabled by default) */
useAIRefinement?: boolean;
/** AI model to use for refinement */
aiModel?: string;
}
/**
* Weights for file prioritization
*/
export interface IPrioritizationWeights {
/** Weight for dependency centrality */
dependencyWeight?: number;
/** Weight for task relevance */
relevanceWeight?: number;
/** Weight for token efficiency */
efficiencyWeight?: number;
/** Weight for file recency */
recencyWeight?: number;
}
/**
* Tier configuration for adaptive trimming
*/
export interface ITierConfig {
essential?: ITierSettings;
important?: ITierSettings;
optional?: ITierSettings;
}
/**
* Settings for a single tier
*/
export interface ITierSettings {
/** Minimum score to qualify for this tier */
minScore: number;
/** Trimming level to apply */
trimLevel: 'none' | 'light' | 'aggressive';
}
/**
* Basic file information interface
*/
export interface IFileInfo {
/** The file path */
path: string;
/** The file contents */
contents: string;
/** The file's relative path from the project root */
relativePath: string;
/** The estimated token count of the file */
tokenCount?: number;
/** The file's importance score (higher is more important) */
importanceScore?: number;
}
/**
* Result of context building
*/
export interface IContextResult {
/** The generated context string */
context: string;
/** The total token count of the context */
tokenCount: number;
/** Files included in the context */
includedFiles: IFileInfo[];
/** Files that were trimmed */
trimmedFiles: IFileInfo[];
/** Files that were excluded */
excludedFiles: IFileInfo[];
/** Token savings from trimming */
tokenSavings: number;
}
/**
* File metadata without contents (for lazy loading)
*/
export interface IFileMetadata {
/** The file path */
path: string;
/** The file's relative path from the project root */
relativePath: string;
/** File size in bytes */
size: number;
/** Last modified time (Unix timestamp) */
mtime: number;
/** Estimated token count (without loading full contents) */
estimatedTokens: number;
/** The file's importance score */
importanceScore?: number;
}
/**
* Cache entry for a file
*/
export interface ICacheEntry {
/** File path */
path: string;
/** File contents */
contents: string;
/** Token count */
tokenCount: number;
/** Last modified time when cached */
mtime: number;
/** When this cache entry was created */
cachedAt: number;
}
/**
* Dependency information for a file
*/
export interface IFileDependencies {
/** File path */
path: string;
/** Files this file imports */
imports: string[];
/** Files that import this file */
importedBy: string[];
/** Centrality score (0-1) - how central this file is in the dependency graph */
centrality: number;
}
/**
* Analysis result for a file
*/
export interface IFileAnalysis {
/** File path */
path: string;
/** Task relevance score (0-1) */
relevanceScore: number;
/** Dependency centrality score (0-1) */
centralityScore: number;
/** Token efficiency score (0-1) */
efficiencyScore: number;
/** Recency score (0-1) */
recencyScore: number;
/** Combined importance score (0-1) */
importanceScore: number;
/** Assigned tier */
tier: 'essential' | 'important' | 'optional' | 'excluded';
/** Reason for the score */
reason?: string;
}
/**
* Result of context analysis
*/
export interface IAnalysisResult {
/** Task type being analyzed */
taskType: TaskType;
/** Analyzed files with scores */
files: IFileAnalysis[];
/** Dependency graph */
dependencyGraph: Map<string, IFileDependencies>;
/** Total files analyzed */
totalFiles: number;
/** Analysis duration in ms */
analysisDuration: number;
}
/**
* Configuration for iterative context building
*/
export interface IIterativeConfig {
/** Maximum number of iterations allowed */
maxIterations?: number;
/** Maximum files to request in first iteration */
firstPassFileLimit?: number;
/** Maximum files to request in subsequent iterations */
subsequentPassFileLimit?: number;
/** Temperature for AI decision making (0-1) */
temperature?: number;
/** Model to use for iterative decisions */
model?: string;
}
/**
* AI decision for file selection
*/
export interface IFileSelectionDecision {
/** AI's reasoning for file selection */
reasoning: string;
/** File paths to load */
filesToLoad: string[];
/** Estimated tokens needed */
estimatedTokensNeeded?: number;
}
/**
* AI decision for context sufficiency
*/
export interface IContextSufficiencyDecision {
/** Whether context is sufficient */
sufficient: boolean;
/** AI's reasoning */
reasoning: string;
/** Additional files needed (if not sufficient) */
additionalFilesNeeded?: string[];
}
/**
* State for a single iteration
*/
export interface IIterationState {
/** Iteration number (1-based) */
iteration: number;
/** Files loaded in this iteration */
filesLoaded: IFileInfo[];
/** Tokens used in this iteration */
tokensUsed: number;
/** Total tokens used so far */
totalTokensUsed: number;
/** AI decision made in this iteration */
decision: IFileSelectionDecision | IContextSufficiencyDecision;
/** Duration of this iteration in ms */
duration: number;
}
/**
* Result of iterative context building
*/
export interface IIterativeContextResult extends IContextResult {
/** Number of iterations performed */
iterationCount: number;
/** Details of each iteration */
iterations: IIterationState[];
/** Total API calls made */
apiCallCount: number;
/** Total duration in ms */
totalDuration: number;
}
// Export DiffProcessor types
export type { IDiffFileInfo, IProcessedDiff, IDiffProcessorOptions } from './diff-processor.js';

View File

@@ -6,6 +6,7 @@ export { path };
// pushrocks scope
import * as npmextra from '@push.rocks/npmextra';
import * as qenv from '@push.rocks/qenv';
import * as smartagent from '@push.rocks/smartagent';
import * as smartai from '@push.rocks/smartai';
import * as smartcli from '@push.rocks/smartcli';
import * as smartdelay from '@push.rocks/smartdelay';
@@ -22,6 +23,7 @@ import * as smarttime from '@push.rocks/smarttime';
export {
npmextra,
qenv,
smartagent,
smartai,
smartcli,
smartdelay,
@@ -50,6 +52,5 @@ export { tspublish };
// third party scope
import * as typedoc from 'typedoc';
import * as gptTokenizer from 'gpt-tokenizer';
export { typedoc, gptTokenizer };
export { typedoc };