Compare commits
55 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4bf7113334 | ||
6bdbeae144 | |||
09c27379cb | |||
2bc6f7ee5e | |||
0ac50d647d | |||
5f9ffc7356 | |||
502b665224 | |||
bda0d7ed7e | |||
de2a60d12f | |||
5b3a93a43a | |||
6b241f8889 | |||
0a80ac0a8a | |||
6ce442354e | |||
9b38a3c06e | |||
5dead05324 | |||
6916dd9e2a | |||
f89888a542 | |||
d93b198b09 | |||
9e390d0fdb | |||
8329ee861e | |||
b8585a0afb | |||
c96f5118cf | |||
17e1a1f1e1 | |||
de940dff75 | |||
4fc1e029e4 | |||
d0a4151a2b | |||
ad5dd4799b | |||
1c49af74ac | |||
eda8ce36df | |||
e82c510094 | |||
0378308721 | |||
189a32683f | |||
f731b9f78d | |||
3701e21284 | |||
490d4996d2 | |||
f099a8f1ed | |||
a0228a0abc | |||
a5257b52e7 | |||
a4144fc071 | |||
af46b3e81e | |||
d50427937c | |||
ffde2e0bf1 | |||
82abc06da4 | |||
3a5f2d52e5 | |||
f628a71184 | |||
d1465fc868 | |||
9e19d320e1 | |||
158d49fa95 | |||
1ce412fd00 | |||
92c382c16e | |||
63d3b7c9bb | |||
2e4c6aa80a | |||
04d505d29e | |||
a636556fdb | |||
a1558e6306 |
170
changelog.md
Normal file
170
changelog.md
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-07-25 - 0.5.5 - feat(documentation)
|
||||||
|
Comprehensive documentation enhancement and test improvements
|
||||||
|
|
||||||
|
- Completely rewrote readme.md with detailed provider comparisons, advanced usage examples, and performance tips
|
||||||
|
- Added comprehensive examples for all supported providers (OpenAI, Anthropic, Perplexity, Groq, XAI, Ollama, Exo)
|
||||||
|
- Included detailed sections on chat interactions, streaming, TTS, vision processing, and document analysis
|
||||||
|
- Added verbose flag to test script for better debugging
|
||||||
|
|
||||||
|
## 2025-05-13 - 0.5.4 - fix(provider.openai)
|
||||||
|
Update dependency versions, clean test imports, and adjust default OpenAI model configurations
|
||||||
|
|
||||||
|
- Bump dependency versions in package.json (@git.zone/tsbuild, @push.rocks/tapbundle, openai, etc.)
|
||||||
|
- Change default chatModel from 'gpt-4o' to 'o4-mini' and visionModel from 'gpt-4o' to '04-mini' in provider.openai.ts
|
||||||
|
- Remove unused 'expectAsync' import from test file
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.3 - fix(package.json)
|
||||||
|
Add explicit packageManager field to package.json
|
||||||
|
|
||||||
|
- Include the packageManager property to specify the pnpm version and checksum.
|
||||||
|
- Align package metadata with current standards.
|
||||||
|
|
||||||
|
## 2025-04-03 - 0.5.2 - fix(readme)
|
||||||
|
Remove redundant conclusion section from README to streamline documentation.
|
||||||
|
|
||||||
|
- Eliminated the conclusion block describing SmartAi's capabilities and documentation pointers.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.1 - fix(OpenAiProvider)
|
||||||
|
Corrected audio model ID in OpenAiProvider
|
||||||
|
|
||||||
|
- Fixed audio model identifier from 'o3-mini' to 'tts-1-hd' in the OpenAiProvider's audio method.
|
||||||
|
- Addressed minor code formatting issues in test suite for better readability.
|
||||||
|
- Corrected spelling errors in test documentation and comments.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.5.0 - feat(documentation and configuration)
|
||||||
|
Enhanced package and README documentation
|
||||||
|
|
||||||
|
- Expanded the package description to better reflect the library's capabilities.
|
||||||
|
- Improved README with detailed usage examples for initialization, chat interactions, streaming chat, audio generation, document analysis, and vision processing.
|
||||||
|
- Provided error handling strategies and advanced streaming customization examples.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.2 - fix(core)
|
||||||
|
Fix OpenAI chat streaming and PDF document processing logic.
|
||||||
|
|
||||||
|
- Updated OpenAI chat streaming to handle new async iterable format.
|
||||||
|
- Improved PDF document processing by filtering out empty image buffers.
|
||||||
|
- Removed unsupported temperature options from OpenAI requests.
|
||||||
|
|
||||||
|
## 2025-02-25 - 0.4.1 - fix(provider)
|
||||||
|
Fix provider modules for consistency
|
||||||
|
|
||||||
|
- Updated TypeScript interfaces and options in provider modules for better type safety.
|
||||||
|
- Modified transform stream handlers in Exo, Groq, and Ollama providers for consistency.
|
||||||
|
- Added optional model options to OpenAI provider for custom model usage.
|
||||||
|
|
||||||
|
## 2025-02-08 - 0.4.0 - feat(core)
|
||||||
|
Added support for Exo AI provider
|
||||||
|
|
||||||
|
- Introduced ExoProvider with chat functionalities.
|
||||||
|
- Updated SmartAi class to initialize ExoProvider.
|
||||||
|
- Extended Conversation class to support ExoProvider.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.3 - fix(documentation)
|
||||||
|
Update readme with detailed license and legal information.
|
||||||
|
|
||||||
|
- Added explicit section on License and Legal Information in the README.
|
||||||
|
- Clarified the use of trademarks and company information.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.2 - fix(documentation)
|
||||||
|
Remove redundant badges from readme
|
||||||
|
|
||||||
|
- Removed Build Status badge from the readme file.
|
||||||
|
- Removed License badge from the readme file.
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.1 - fix(documentation)
|
||||||
|
Updated README structure and added detailed usage examples
|
||||||
|
|
||||||
|
- Introduced a Table of Contents
|
||||||
|
- Included comprehensive sections for chat, streaming chat, audio generation, document processing, and vision processing
|
||||||
|
- Added example code and detailed configuration steps for supported AI providers
|
||||||
|
- Clarified the development setup with instructions for running tests and building the project
|
||||||
|
|
||||||
|
## 2025-02-05 - 0.3.0 - feat(integration-xai)
|
||||||
|
Add support for X.AI provider with chat and document processing capabilities.
|
||||||
|
|
||||||
|
- Introduced XAIProvider class for integrating X.AI features.
|
||||||
|
- Implemented chat streaming and synchronous chat for X.AI.
|
||||||
|
- Enabled document processing capabilities with PDF conversion in X.AI.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.2.0 - feat(provider.anthropic)
|
||||||
|
Add support for vision and document processing in Anthropic provider
|
||||||
|
|
||||||
|
- Implemented vision tasks for Anthropic provider using Claude-3-opus-20240229 model.
|
||||||
|
- Implemented document processing for Anthropic provider, supporting conversion of PDF documents to images and analysis with Claude-3-opus-20240229 model.
|
||||||
|
- Updated documentation to reflect the new capabilities of the Anthropic provider.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.1.0 - feat(providers)
|
||||||
|
Add vision and document processing capabilities to providers
|
||||||
|
|
||||||
|
- OpenAI and Ollama providers now support vision tasks using GPT-4 Vision and Llava models respectively.
|
||||||
|
- Document processing has been implemented for OpenAI and Ollama providers, converting PDFs to images for analysis.
|
||||||
|
- Introduced abstract methods for vision and document processing in the MultiModalModel class.
|
||||||
|
- Updated the readme file with examples for vision and document processing.
|
||||||
|
|
||||||
|
## 2025-02-03 - 0.0.19 - fix(core)
|
||||||
|
Enhanced chat streaming and error handling across providers
|
||||||
|
|
||||||
|
- Refactored chatStream method to properly handle input streams and processes in Perplexity, OpenAI, Ollama, and Anthropic providers.
|
||||||
|
- Improved error handling and message parsing in chatStream implementations.
|
||||||
|
- Defined distinct interfaces for chat options, messages, and responses.
|
||||||
|
- Adjusted the test logic in test/test.ts for the new classification response requirement.
|
||||||
|
|
||||||
|
## 2024-09-19 - 0.0.18 - fix(dependencies)
|
||||||
|
Update dependencies to the latest versions.
|
||||||
|
|
||||||
|
- Updated @git.zone/tsbuild from ^2.1.76 to ^2.1.84
|
||||||
|
- Updated @git.zone/tsrun from ^1.2.46 to ^1.2.49
|
||||||
|
- Updated @push.rocks/tapbundle from ^5.0.23 to ^5.3.0
|
||||||
|
- Updated @types/node from ^20.12.12 to ^22.5.5
|
||||||
|
- Updated @anthropic-ai/sdk from ^0.21.0 to ^0.27.3
|
||||||
|
- Updated @push.rocks/smartfile from ^11.0.14 to ^11.0.21
|
||||||
|
- Updated @push.rocks/smartpromise from ^4.0.3 to ^4.0.4
|
||||||
|
- Updated @push.rocks/webstream from ^1.0.8 to ^1.0.10
|
||||||
|
- Updated openai from ^4.47.1 to ^4.62.1
|
||||||
|
|
||||||
|
## 2024-05-29 - 0.0.17 - Documentation
|
||||||
|
Updated project description.
|
||||||
|
|
||||||
|
- Improved project description for clarity and details.
|
||||||
|
|
||||||
|
## 2024-05-17 - 0.0.16 to 0.0.15 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Various core updates and fixes for stability improvements.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.14 to 0.0.13 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Multiple core updates and fixes for enhanced functionality.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.12 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Core update and bug fixes.
|
||||||
|
|
||||||
|
## 2024-04-29 - 0.0.11 - Provider
|
||||||
|
Fix integration for anthropic provider.
|
||||||
|
|
||||||
|
- Correction in the integration process with anthropic provider for better compatibility.
|
||||||
|
|
||||||
|
## 2024-04-27 - 0.0.10 to 0.0.9 - Core
|
||||||
|
Fixes and updates.
|
||||||
|
|
||||||
|
- Updates and fixes to core components.
|
||||||
|
- Updated tsconfig for improved TypeScript configuration.
|
||||||
|
|
||||||
|
## 2024-04-01 - 0.0.8 to 0.0.7 - Core and npmextra
|
||||||
|
Core updates and npmextra configuration.
|
||||||
|
|
||||||
|
- Core fixes and updates.
|
||||||
|
- Updates to npmextra.json for githost configuration.
|
||||||
|
|
||||||
|
## 2024-03-31 - 0.0.6 to 0.0.2 - Core
|
||||||
|
Initial core updates and fixes.
|
||||||
|
|
||||||
|
- Multiple updates and fixes to core following initial versions.
|
||||||
|
|
||||||
|
|
||||||
|
This summarizes the relevant updates and changes based on the provided commit messages. The changelog excludes commits that are version tags without meaningful content or repeated entries.
|
19
license
Normal file
19
license
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2024 Task Venture Capital GmbH (hello@task.vc)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
@@ -5,14 +5,41 @@
|
|||||||
"githost": "code.foss.global",
|
"githost": "code.foss.global",
|
||||||
"gitscope": "push.rocks",
|
"gitscope": "push.rocks",
|
||||||
"gitrepo": "smartai",
|
"gitrepo": "smartai",
|
||||||
"description": "a standardaized interface to talk to AI models",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"npmPackagename": "@push.rocks/smartai",
|
"npmPackagename": "@push.rocks/smartai",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"projectDomain": "push.rocks"
|
"projectDomain": "push.rocks",
|
||||||
|
"keywords": [
|
||||||
|
"AI integration",
|
||||||
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
|
"OpenAI",
|
||||||
|
"Anthropic",
|
||||||
|
"multi-model",
|
||||||
|
"audio generation",
|
||||||
|
"text-to-speech",
|
||||||
|
"document processing",
|
||||||
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"npmci": {
|
"npmci": {
|
||||||
"npmGlobalTools": [],
|
"npmGlobalTools": [],
|
||||||
"npmAccessLevel": "public"
|
"npmAccessLevel": "public"
|
||||||
|
},
|
||||||
|
"tsdoc": {
|
||||||
|
"legal": "\n## License and Legal Information\n\nThis repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository. \n\n**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.\n\n### Trademarks\n\nThis project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.\n\n### Company Information\n\nTask Venture Capital GmbH \nRegistered at District court Bremen HRB 35230 HB, Germany\n\nFor any legal inquiries or if you require further information, please contact us via email at hello@task.vc.\n\nBy using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
70
package.json
70
package.json
@@ -1,41 +1,46 @@
|
|||||||
{
|
{
|
||||||
"name": "@push.rocks/smartai",
|
"name": "@push.rocks/smartai",
|
||||||
"version": "0.0.8",
|
"version": "0.5.5",
|
||||||
"private": false,
|
"private": false,
|
||||||
"description": "a standardaized interface to talk to AI models",
|
"description": "SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.",
|
||||||
"main": "dist_ts/index.js",
|
"main": "dist_ts/index.js",
|
||||||
"typings": "dist_ts/index.d.ts",
|
"typings": "dist_ts/index.d.ts",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"author": "Task Venture Capital GmbH",
|
"author": "Task Venture Capital GmbH",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "(tstest test/ --web)",
|
"test": "(tstest test/ --web --verbose)",
|
||||||
"build": "(tsbuild --web --allowimplicitany)",
|
"build": "(tsbuild --web --allowimplicitany)",
|
||||||
"buildDocs": "(tsdoc)"
|
"buildDocs": "(tsdoc)"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@git.zone/tsbuild": "^2.1.25",
|
"@git.zone/tsbuild": "^2.6.4",
|
||||||
"@git.zone/tsbundle": "^2.0.5",
|
"@git.zone/tsbundle": "^2.5.1",
|
||||||
"@git.zone/tsrun": "^1.2.46",
|
"@git.zone/tsrun": "^1.3.3",
|
||||||
"@git.zone/tstest": "^1.0.44",
|
"@git.zone/tstest": "^2.3.2",
|
||||||
"@push.rocks/tapbundle": "^5.0.15",
|
"@push.rocks/qenv": "^6.1.0",
|
||||||
"@types/node": "^20.8.7"
|
"@push.rocks/tapbundle": "^6.0.3",
|
||||||
|
"@types/node": "^22.15.17"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@push.rocks/qenv": "^6.0.5",
|
"@anthropic-ai/sdk": "^0.57.0",
|
||||||
"@push.rocks/smartfile": "^11.0.4",
|
"@push.rocks/smartarray": "^1.1.0",
|
||||||
"@push.rocks/smartpath": "^5.0.11",
|
"@push.rocks/smartfile": "^11.2.5",
|
||||||
"@push.rocks/smartpromise": "^4.0.3",
|
"@push.rocks/smartpath": "^5.0.18",
|
||||||
"openai": "^4.31.0"
|
"@push.rocks/smartpdf": "^3.2.2",
|
||||||
|
"@push.rocks/smartpromise": "^4.2.3",
|
||||||
|
"@push.rocks/smartrequest": "^2.1.0",
|
||||||
|
"@push.rocks/webstream": "^1.0.10",
|
||||||
|
"openai": "^5.10.2"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+https://code.foss.global/push.rocks/smartai.git"
|
"url": "https://code.foss.global/push.rocks/smartai.git"
|
||||||
},
|
},
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://code.foss.global/push.rocks/smartai/issues"
|
"url": "https://code.foss.global/push.rocks/smartai/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://code.foss.global/push.rocks/smartai#readme",
|
"homepage": "https://code.foss.global/push.rocks/smartai",
|
||||||
"browserslist": [
|
"browserslist": [
|
||||||
"last 1 chrome versions"
|
"last 1 chrome versions"
|
||||||
],
|
],
|
||||||
@@ -50,5 +55,36 @@
|
|||||||
"cli.js",
|
"cli.js",
|
||||||
"npmextra.json",
|
"npmextra.json",
|
||||||
"readme.md"
|
"readme.md"
|
||||||
]
|
],
|
||||||
|
"keywords": [
|
||||||
|
"AI integration",
|
||||||
|
"TypeScript",
|
||||||
|
"chatbot",
|
||||||
|
"OpenAI",
|
||||||
|
"Anthropic",
|
||||||
|
"multi-model",
|
||||||
|
"audio generation",
|
||||||
|
"text-to-speech",
|
||||||
|
"document processing",
|
||||||
|
"vision processing",
|
||||||
|
"streaming chat",
|
||||||
|
"API",
|
||||||
|
"multiple providers",
|
||||||
|
"AI models",
|
||||||
|
"synchronous chat",
|
||||||
|
"asynchronous chat",
|
||||||
|
"real-time interaction",
|
||||||
|
"content analysis",
|
||||||
|
"image description",
|
||||||
|
"document classification",
|
||||||
|
"AI toolkit",
|
||||||
|
"provider switching"
|
||||||
|
],
|
||||||
|
"pnpm": {
|
||||||
|
"onlyBuiltDependencies": [
|
||||||
|
"esbuild",
|
||||||
|
"puppeteer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"packageManager": "pnpm@10.7.0+sha512.6b865ad4b62a1d9842b61d674a393903b871d9244954f652b8842c2b553c72176b278f64c463e52d40fff8aba385c235c8c9ecf5cc7de4fd78b8bb6d49633ab6"
|
||||||
}
|
}
|
||||||
|
12942
pnpm-lock.yaml
generated
12942
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
4
qenv.yml
4
qenv.yml
@@ -1,2 +1,4 @@
|
|||||||
required:
|
required:
|
||||||
- OPENAI_TOKEN
|
- OPENAI_TOKEN
|
||||||
|
- ANTHROPIC_TOKEN
|
||||||
|
- PERPLEXITY_TOKEN
|
1
readme.hints.md
Normal file
1
readme.hints.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
425
readme.md
425
readme.md
@@ -1,31 +1,408 @@
|
|||||||
# @push.rocks/smartai
|
# @push.rocks/smartai
|
||||||
a standardaized interface to talk to AI models
|
|
||||||
|
|
||||||
## Availabililty and Links
|
SmartAi is a powerful TypeScript library that provides a unified interface for integrating with multiple AI providers including OpenAI, Anthropic, Perplexity, Ollama, Groq, XAI, and Exo. It offers comprehensive support for chat interactions, streaming conversations, text-to-speech, document analysis, and vision processing.
|
||||||
* [npmjs.org (npm package)](https://www.npmjs.com/package/@push.rocks/smartai)
|
|
||||||
* [gitlab.com (source)](https://code.foss.global/push.rocks/smartai)
|
|
||||||
* [github.com (source mirror)](https://github.com/push.rocks/smartai)
|
|
||||||
* [docs (typedoc)](https://push.rocks.gitlab.io/smartai/)
|
|
||||||
|
|
||||||
## Status for master
|
## Install
|
||||||
|
|
||||||
Status Category | Status Badge
|
To install SmartAi into your project, use pnpm:
|
||||||
-- | --
|
|
||||||
GitLab Pipelines | [](https://lossless.cloud)
|
```bash
|
||||||
GitLab Pipline Test Coverage | [](https://lossless.cloud)
|
pnpm install @push.rocks/smartai
|
||||||
npm | [](https://lossless.cloud)
|
```
|
||||||
Snyk | [](https://lossless.cloud)
|
|
||||||
TypeScript Support | [](https://lossless.cloud)
|
|
||||||
node Support | [](https://nodejs.org/dist/latest-v10.x/docs/api/)
|
|
||||||
Code Style | [](https://lossless.cloud)
|
|
||||||
PackagePhobia (total standalone install weight) | [](https://lossless.cloud)
|
|
||||||
PackagePhobia (package size on registry) | [](https://lossless.cloud)
|
|
||||||
BundlePhobia (total size when bundled) | [](https://lossless.cloud)
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
Use TypeScript for best in class intellisense
|
|
||||||
For further information read the linked docs at the top of this readme.
|
|
||||||
|
|
||||||
## Legal
|
SmartAi provides a clean, consistent API across all supported AI providers. This documentation covers all features with practical examples for each provider and capability.
|
||||||
> MIT licensed | **©** [Task Venture Capital GmbH](https://task.vc)
|
|
||||||
| By using this npm module you agree to our [privacy policy](https://lossless.gmbH/privacy)
|
### Initialization
|
||||||
|
|
||||||
|
First, initialize SmartAi with the API tokens and configuration for the providers you want to use:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { SmartAi } from '@push.rocks/smartai';
|
||||||
|
|
||||||
|
const smartAi = new SmartAi({
|
||||||
|
// OpenAI - for GPT models, DALL-E, and TTS
|
||||||
|
openaiToken: 'your-openai-api-key',
|
||||||
|
|
||||||
|
// Anthropic - for Claude models
|
||||||
|
anthropicToken: 'your-anthropic-api-key',
|
||||||
|
|
||||||
|
// Perplexity - for research-focused AI
|
||||||
|
perplexityToken: 'your-perplexity-api-key',
|
||||||
|
|
||||||
|
// Groq - for fast inference
|
||||||
|
groqToken: 'your-groq-api-key',
|
||||||
|
|
||||||
|
// XAI - for Grok models
|
||||||
|
xaiToken: 'your-xai-api-key',
|
||||||
|
|
||||||
|
// Ollama - for local models
|
||||||
|
ollama: {
|
||||||
|
baseUrl: 'http://localhost:11434',
|
||||||
|
model: 'llama2', // default model for chat
|
||||||
|
visionModel: 'llava' // default model for vision
|
||||||
|
},
|
||||||
|
|
||||||
|
// Exo - for distributed inference
|
||||||
|
exo: {
|
||||||
|
baseUrl: 'http://localhost:8080/v1',
|
||||||
|
apiKey: 'your-exo-api-key'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start the SmartAi instance
|
||||||
|
await smartAi.start();
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Providers
|
||||||
|
|
||||||
|
SmartAi supports the following AI providers:
|
||||||
|
|
||||||
|
| Provider | Use Case | Key Features |
|
||||||
|
|----------|----------|--------------|
|
||||||
|
| **OpenAI** | General purpose, GPT models | Chat, streaming, TTS, vision, documents |
|
||||||
|
| **Anthropic** | Claude models, safety-focused | Chat, streaming, vision, documents |
|
||||||
|
| **Perplexity** | Research and factual queries | Chat, streaming, documents |
|
||||||
|
| **Groq** | Fast inference | Chat, streaming |
|
||||||
|
| **XAI** | Grok models | Chat, streaming |
|
||||||
|
| **Ollama** | Local models | Chat, streaming, vision |
|
||||||
|
| **Exo** | Distributed inference | Chat, streaming |
|
||||||
|
|
||||||
|
## Core Features
|
||||||
|
|
||||||
|
### 1. Chat Interactions
|
||||||
|
|
||||||
|
SmartAi provides both synchronous and streaming chat capabilities across all supported providers.
|
||||||
|
|
||||||
|
#### Synchronous Chat
|
||||||
|
|
||||||
|
Simple request-response interactions with any provider:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI Example
|
||||||
|
const openAiResponse = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant.',
|
||||||
|
userMessage: 'What is the capital of France?',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
console.log(openAiResponse.message); // "The capital of France is Paris."
|
||||||
|
|
||||||
|
// Anthropic Example
|
||||||
|
const anthropicResponse = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a knowledgeable historian.',
|
||||||
|
userMessage: 'Tell me about the French Revolution',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
console.log(anthropicResponse.message);
|
||||||
|
|
||||||
|
// Using message history for context
|
||||||
|
const contextualResponse = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a math tutor.',
|
||||||
|
userMessage: 'What about multiplication?',
|
||||||
|
messageHistory: [
|
||||||
|
{ role: 'user', content: 'Can you teach me math?' },
|
||||||
|
{ role: 'assistant', content: 'Of course! What would you like to learn?' }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Streaming Chat
|
||||||
|
|
||||||
|
For real-time, token-by-token responses:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create a readable stream for input
|
||||||
|
const { readable, writable } = new TransformStream();
|
||||||
|
const writer = writable.getWriter();
|
||||||
|
|
||||||
|
// Send a message
|
||||||
|
const encoder = new TextEncoder();
|
||||||
|
await writer.write(encoder.encode(JSON.stringify({
|
||||||
|
role: 'user',
|
||||||
|
content: 'Write a haiku about programming'
|
||||||
|
})));
|
||||||
|
await writer.close();
|
||||||
|
|
||||||
|
// Get streaming response
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(readable);
|
||||||
|
const reader = responseStream.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
// Read the stream
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
process.stdout.write(value); // Print each chunk as it arrives
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Text-to-Speech (Audio Generation)
|
||||||
|
|
||||||
|
Convert text to natural-sounding speech (currently supported by OpenAI):
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Generate speech from text
|
||||||
|
const audioStream = await smartAi.openaiProvider.audio({
|
||||||
|
message: 'Hello world! This is a test of the text-to-speech system.'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Save to file
|
||||||
|
const writeStream = fs.createWriteStream('output.mp3');
|
||||||
|
audioStream.pipe(writeStream);
|
||||||
|
|
||||||
|
// Or use in your application directly
|
||||||
|
audioStream.on('data', (chunk) => {
|
||||||
|
// Process audio chunks
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Vision Processing
|
||||||
|
|
||||||
|
Analyze images and get detailed descriptions:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Read an image file
|
||||||
|
const imageBuffer = fs.readFileSync('image.jpg');
|
||||||
|
|
||||||
|
// OpenAI Vision
|
||||||
|
const openAiVision = await smartAi.openaiProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'What is in this image? Describe in detail.'
|
||||||
|
});
|
||||||
|
console.log('OpenAI:', openAiVision);
|
||||||
|
|
||||||
|
// Anthropic Vision
|
||||||
|
const anthropicVision = await smartAi.anthropicProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Analyze this image and identify any text or objects.'
|
||||||
|
});
|
||||||
|
console.log('Anthropic:', anthropicVision);
|
||||||
|
|
||||||
|
// Ollama Vision (using local model)
|
||||||
|
const ollamaVision = await smartAi.ollamaProvider.vision({
|
||||||
|
image: imageBuffer,
|
||||||
|
prompt: 'Describe the colors and composition of this image.'
|
||||||
|
});
|
||||||
|
console.log('Ollama:', ollamaVision);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Document Analysis
|
||||||
|
|
||||||
|
Process and analyze PDF documents with AI:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import * as fs from 'fs';
|
||||||
|
|
||||||
|
// Read PDF documents
|
||||||
|
const pdfBuffer = fs.readFileSync('document.pdf');
|
||||||
|
|
||||||
|
// Analyze with OpenAI
|
||||||
|
const openAiAnalysis = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a document analyst. Extract key information.',
|
||||||
|
userMessage: 'Summarize this document and list the main points.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer]
|
||||||
|
});
|
||||||
|
console.log('OpenAI Analysis:', openAiAnalysis.message);
|
||||||
|
|
||||||
|
// Analyze with Anthropic
|
||||||
|
const anthropicAnalysis = await smartAi.anthropicProvider.document({
|
||||||
|
systemMessage: 'You are a legal expert.',
|
||||||
|
userMessage: 'Identify any legal terms or implications in this document.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer]
|
||||||
|
});
|
||||||
|
console.log('Anthropic Analysis:', anthropicAnalysis.message);
|
||||||
|
|
||||||
|
// Process multiple documents
|
||||||
|
const doc1 = fs.readFileSync('contract1.pdf');
|
||||||
|
const doc2 = fs.readFileSync('contract2.pdf');
|
||||||
|
|
||||||
|
const comparison = await smartAi.openaiProvider.document({
|
||||||
|
systemMessage: 'You are a contract analyst.',
|
||||||
|
userMessage: 'Compare these two contracts and highlight the differences.',
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [doc1, doc2]
|
||||||
|
});
|
||||||
|
console.log('Comparison:', comparison.message);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Conversation Management
|
||||||
|
|
||||||
|
Create persistent conversation sessions with any provider:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create a conversation with OpenAI
|
||||||
|
const conversation = smartAi.createConversation('openai');
|
||||||
|
|
||||||
|
// Set the system message
|
||||||
|
await conversation.setSystemMessage('You are a helpful coding assistant.');
|
||||||
|
|
||||||
|
// Get input and output streams
|
||||||
|
const inputWriter = conversation.getInputStreamWriter();
|
||||||
|
const outputStream = conversation.getOutputStream();
|
||||||
|
|
||||||
|
// Set up output reader
|
||||||
|
const reader = outputStream.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
// Send messages
|
||||||
|
await inputWriter.write('How do I create a REST API in Node.js?');
|
||||||
|
|
||||||
|
// Read responses
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
console.log('Assistant:', decoder.decode(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue the conversation
|
||||||
|
await inputWriter.write('Can you show me an example with Express?');
|
||||||
|
|
||||||
|
// Create conversations with different providers
|
||||||
|
const anthropicConversation = smartAi.createConversation('anthropic');
|
||||||
|
const groqConversation = smartAi.createConversation('groq');
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
|
||||||
|
Always wrap AI operations in try-catch blocks for robust error handling:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
try {
|
||||||
|
const response = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are an assistant.',
|
||||||
|
userMessage: 'Hello!',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
console.log(response.message);
|
||||||
|
} catch (error) {
|
||||||
|
if (error.code === 'rate_limit_exceeded') {
|
||||||
|
console.error('Rate limit hit, please retry later');
|
||||||
|
} else if (error.code === 'invalid_api_key') {
|
||||||
|
console.error('Invalid API key provided');
|
||||||
|
} else {
|
||||||
|
console.error('Unexpected error:', error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Streaming with Custom Processing
|
||||||
|
|
||||||
|
Implement custom transformations on streaming responses:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Create a custom transform stream
|
||||||
|
const customTransform = new TransformStream({
|
||||||
|
transform(chunk, controller) {
|
||||||
|
// Example: Add timestamps to each chunk
|
||||||
|
const timestamp = new Date().toISOString();
|
||||||
|
controller.enqueue(`[${timestamp}] ${chunk}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Apply to streaming chat
|
||||||
|
const inputStream = new ReadableStream({
|
||||||
|
start(controller) {
|
||||||
|
controller.enqueue(new TextEncoder().encode(JSON.stringify({
|
||||||
|
role: 'user',
|
||||||
|
content: 'Tell me a story'
|
||||||
|
})));
|
||||||
|
controller.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const responseStream = await smartAi.openaiProvider.chatStream(inputStream);
|
||||||
|
const processedStream = responseStream.pipeThrough(customTransform);
|
||||||
|
|
||||||
|
// Read processed stream
|
||||||
|
const reader = processedStream.getReader();
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
console.log(value);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Provider-Specific Features
|
||||||
|
|
||||||
|
Each provider may have unique capabilities. Here's how to leverage them:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// OpenAI - Use specific models
|
||||||
|
const gpt4Response = await smartAi.openaiProvider.chat({
|
||||||
|
systemMessage: 'You are a helpful assistant.',
|
||||||
|
userMessage: 'Explain quantum computing',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Anthropic - Use Claude's strength in analysis
|
||||||
|
const codeReview = await smartAi.anthropicProvider.chat({
|
||||||
|
systemMessage: 'You are a code reviewer.',
|
||||||
|
userMessage: 'Review this code for security issues: ...',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perplexity - Best for research and current events
|
||||||
|
const research = await smartAi.perplexityProvider.chat({
|
||||||
|
systemMessage: 'You are a research assistant.',
|
||||||
|
userMessage: 'What are the latest developments in renewable energy?',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
|
||||||
|
// Groq - Optimized for speed
|
||||||
|
const quickResponse = await smartAi.groqProvider.chat({
|
||||||
|
systemMessage: 'You are a quick helper.',
|
||||||
|
userMessage: 'Give me a one-line summary of photosynthesis',
|
||||||
|
messageHistory: []
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Optimization
|
||||||
|
|
||||||
|
Tips for optimal performance:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// 1. Reuse providers instead of creating new instances
|
||||||
|
const smartAi = new SmartAi({ /* config */ });
|
||||||
|
await smartAi.start(); // Initialize once
|
||||||
|
|
||||||
|
// 2. Use streaming for long responses
|
||||||
|
// Streaming reduces time-to-first-token and memory usage
|
||||||
|
|
||||||
|
// 3. Batch operations when possible
|
||||||
|
const promises = [
|
||||||
|
smartAi.openaiProvider.chat({ /* ... */ }),
|
||||||
|
smartAi.anthropicProvider.chat({ /* ... */ })
|
||||||
|
];
|
||||||
|
const results = await Promise.all(promises);
|
||||||
|
|
||||||
|
// 4. Clean up resources
|
||||||
|
await smartAi.stop(); // When done
|
||||||
|
```
|
||||||
|
|
||||||
|
## License and Legal Information
|
||||||
|
|
||||||
|
This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
|
||||||
|
|
||||||
|
**Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
### Trademarks
|
||||||
|
|
||||||
|
This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
|
||||||
|
|
||||||
|
### Company Information
|
||||||
|
|
||||||
|
Task Venture Capital GmbH
|
||||||
|
Registered at District court Bremen HRB 35230 HB, Germany
|
||||||
|
|
||||||
|
For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
|
||||||
|
|
||||||
|
By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
|
||||||
|
104
test/test.ts
104
test/test.ts
@@ -1,8 +1,100 @@
|
|||||||
import { expect, expectAsync, tap } from '@push.rocks/tapbundle';
|
import { expect, tap } from '@push.rocks/tapbundle';
|
||||||
import * as smartai from '../ts/index.js'
|
import * as qenv from '@push.rocks/qenv';
|
||||||
|
import * as smartrequest from '@push.rocks/smartrequest';
|
||||||
|
import * as smartfile from '@push.rocks/smartfile';
|
||||||
|
|
||||||
tap.test('first test', async () => {
|
const testQenv = new qenv.Qenv('./', './.nogit/');
|
||||||
console.log(smartai)
|
|
||||||
})
|
|
||||||
|
|
||||||
tap.start()
|
import * as smartai from '../ts/index.js';
|
||||||
|
|
||||||
|
let testSmartai: smartai.SmartAi;
|
||||||
|
|
||||||
|
tap.test('should create a smartai instance', async () => {
|
||||||
|
testSmartai = new smartai.SmartAi({
|
||||||
|
openaiToken: await testQenv.getEnvVarOnDemand('OPENAI_TOKEN'),
|
||||||
|
});
|
||||||
|
await testSmartai.start();
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should create chat response with openai', async () => {
|
||||||
|
const userMessage = 'How are you?';
|
||||||
|
const response = await testSmartai.openaiProvider.chat({
|
||||||
|
systemMessage: 'Hello',
|
||||||
|
userMessage: userMessage,
|
||||||
|
messageHistory: [],
|
||||||
|
});
|
||||||
|
console.log(`userMessage: ${userMessage}`);
|
||||||
|
console.log(response.message);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should document a pdf', async () => {
|
||||||
|
const pdfUrl = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf';
|
||||||
|
const pdfResponse = await smartrequest.getBinary(pdfUrl);
|
||||||
|
const result = await testSmartai.openaiProvider.document({
|
||||||
|
systemMessage: 'Classify the document. Only the following answers are allowed: "invoice", "bank account statement", "contract", "other". The answer should only contain the keyword for machine use.',
|
||||||
|
userMessage: "Classify the document.",
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfResponse.body],
|
||||||
|
});
|
||||||
|
console.log(result);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should recognize companies in a pdf', async () => {
|
||||||
|
const pdfBuffer = await smartfile.fs.toBuffer('./.nogit/demo_without_textlayer.pdf');
|
||||||
|
const result = await testSmartai.openaiProvider.document({
|
||||||
|
systemMessage: `
|
||||||
|
summarize the document.
|
||||||
|
|
||||||
|
answer in JSON format, adhering to the following schema:
|
||||||
|
\`\`\`typescript
|
||||||
|
type TAnswer = {
|
||||||
|
entitySender: {
|
||||||
|
type: 'official state entity' | 'company' | 'person';
|
||||||
|
name: string;
|
||||||
|
address: string;
|
||||||
|
city: string;
|
||||||
|
country: string;
|
||||||
|
EU: boolean; // whether the entity is within EU
|
||||||
|
};
|
||||||
|
entityReceiver: {
|
||||||
|
type: 'official state entity' | 'company' | 'person';
|
||||||
|
name: string;
|
||||||
|
address: string;
|
||||||
|
city: string;
|
||||||
|
country: string;
|
||||||
|
EU: boolean; // whether the entity is within EU
|
||||||
|
};
|
||||||
|
date: string; // the date of the document as YYYY-MM-DD
|
||||||
|
title: string; // a short title, suitable for a filename
|
||||||
|
}
|
||||||
|
\`\`\`
|
||||||
|
`,
|
||||||
|
userMessage: "Classify the document.",
|
||||||
|
messageHistory: [],
|
||||||
|
pdfDocuments: [pdfBuffer],
|
||||||
|
});
|
||||||
|
console.log(result);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should create audio response with openai', async () => {
|
||||||
|
// Call the audio method with a sample message.
|
||||||
|
const audioStream = await testSmartai.openaiProvider.audio({
|
||||||
|
message: 'This is a test of audio generation.',
|
||||||
|
});
|
||||||
|
// Read all chunks from the stream.
|
||||||
|
const chunks: Uint8Array[] = [];
|
||||||
|
for await (const chunk of audioStream) {
|
||||||
|
chunks.push(chunk as Uint8Array);
|
||||||
|
}
|
||||||
|
const audioBuffer = Buffer.concat(chunks);
|
||||||
|
await smartfile.fs.toFs(audioBuffer, './.nogit/testoutput.mp3');
|
||||||
|
console.log(`Audio Buffer length: ${audioBuffer.length}`);
|
||||||
|
// Assert that the resulting buffer is not empty.
|
||||||
|
expect(audioBuffer.length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
tap.test('should stop the smartai instance', async () => {
|
||||||
|
await testSmartai.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
export default tap.start();
|
@@ -1,8 +1,8 @@
|
|||||||
/**
|
/**
|
||||||
* autocreated commitinfo by @pushrocks/commitinfo
|
* autocreated commitinfo by @push.rocks/commitinfo
|
||||||
*/
|
*/
|
||||||
export const commitinfo = {
|
export const commitinfo = {
|
||||||
name: '@push.rocks/smartai',
|
name: '@push.rocks/smartai',
|
||||||
version: '0.0.8',
|
version: '0.5.4',
|
||||||
description: 'a standardaized interface to talk to AI models'
|
description: 'SmartAi is a versatile TypeScript library designed to facilitate integration and interaction with various AI models, offering functionalities for chat, audio generation, document processing, and vision tasks.'
|
||||||
}
|
}
|
||||||
|
@@ -1,8 +1,86 @@
|
|||||||
export abstract class MultiModal {
|
/**
|
||||||
abstract start(): Promise<void>;
|
* Message format for chat interactions
|
||||||
abstract stop(): Promise<void>;
|
*/
|
||||||
|
export interface ChatMessage {
|
||||||
// Defines a streaming interface for chat interactions.
|
role: 'assistant' | 'user' | 'system';
|
||||||
// The implementation will vary based on the specific AI model.
|
content: string;
|
||||||
abstract chatStream(input: ReadableStream<string>): ReadableStream<string>;
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for chat interactions
|
||||||
|
*/
|
||||||
|
export interface ChatOptions {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response format for chat interactions
|
||||||
|
*/
|
||||||
|
export interface ChatResponse {
|
||||||
|
role: 'assistant';
|
||||||
|
message: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract base class for multi-modal AI models.
|
||||||
|
* Provides a common interface for different AI providers (OpenAI, Anthropic, Perplexity, Ollama)
|
||||||
|
*/
|
||||||
|
export abstract class MultiModalModel {
|
||||||
|
/**
|
||||||
|
* Initializes the model and any necessary resources
|
||||||
|
* Should be called before using any other methods
|
||||||
|
*/
|
||||||
|
abstract start(): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleans up any resources used by the model
|
||||||
|
* Should be called when the model is no longer needed
|
||||||
|
*/
|
||||||
|
abstract stop(): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronous chat interaction with the model
|
||||||
|
* @param optionsArg Options containing system message, user message, and message history
|
||||||
|
* @returns Promise resolving to the assistant's response
|
||||||
|
*/
|
||||||
|
public abstract chat(optionsArg: ChatOptions): Promise<ChatResponse>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streaming interface for chat interactions
|
||||||
|
* Allows for real-time responses from the model
|
||||||
|
* @param input Stream of user messages
|
||||||
|
* @returns Stream of model responses
|
||||||
|
*/
|
||||||
|
public abstract chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Text-to-speech conversion
|
||||||
|
* @param optionsArg Options containing the message to convert to speech
|
||||||
|
* @returns Promise resolving to a readable stream of audio data
|
||||||
|
* @throws Error if the provider doesn't support audio generation
|
||||||
|
*/
|
||||||
|
public abstract audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Vision-language processing
|
||||||
|
* @param optionsArg Options containing the image and prompt for analysis
|
||||||
|
* @returns Promise resolving to the model's description or analysis of the image
|
||||||
|
* @throws Error if the provider doesn't support vision tasks
|
||||||
|
*/
|
||||||
|
public abstract vision(optionsArg: { image: Buffer; prompt: string }): Promise<string>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Document analysis and processing
|
||||||
|
* @param optionsArg Options containing system message, user message, PDF documents, and message history
|
||||||
|
* @returns Promise resolving to the model's analysis of the documents
|
||||||
|
* @throws Error if the provider doesn't support document processing
|
||||||
|
*/
|
||||||
|
public abstract document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }>;
|
||||||
}
|
}
|
||||||
|
152
ts/classes.conversation.ts
Normal file
152
ts/classes.conversation.ts
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
import type { SmartAi } from "./classes.smartai.js";
|
||||||
|
import { OpenAiProvider } from "./provider.openai.js";
|
||||||
|
|
||||||
|
type TProcessFunction = (input: string) => Promise<string>;
|
||||||
|
|
||||||
|
export interface IConversationOptions {
|
||||||
|
processFunction: TProcessFunction;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* a conversation
|
||||||
|
*/
|
||||||
|
export class Conversation {
|
||||||
|
// STATIC
|
||||||
|
public static async createWithOpenAi(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.openaiProvider) {
|
||||||
|
throw new Error('OpenAI provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithAnthropic(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.anthropicProvider) {
|
||||||
|
throw new Error('Anthropic provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithPerplexity(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.perplexityProvider) {
|
||||||
|
throw new Error('Perplexity provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithExo(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.exoProvider) {
|
||||||
|
throw new Error('Exo provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithOllama(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.ollamaProvider) {
|
||||||
|
throw new Error('Ollama provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithGroq(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.groqProvider) {
|
||||||
|
throw new Error('Groq provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static async createWithXai(smartaiRefArg: SmartAi) {
|
||||||
|
if (!smartaiRefArg.xaiProvider) {
|
||||||
|
throw new Error('XAI provider not available');
|
||||||
|
}
|
||||||
|
const conversation = new Conversation(smartaiRefArg, {
|
||||||
|
processFunction: async (input) => {
|
||||||
|
return '' // TODO implement proper streaming
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return conversation;
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSTANCE
|
||||||
|
smartaiRef: SmartAi
|
||||||
|
private systemMessage: string;
|
||||||
|
private processFunction: TProcessFunction;
|
||||||
|
private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
|
||||||
|
private outputStreamController: ReadableStreamDefaultController<string> | null = null;
|
||||||
|
|
||||||
|
constructor(smartairefArg: SmartAi, options: IConversationOptions) {
|
||||||
|
this.processFunction = options.processFunction;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async setSystemMessage(systemMessageArg: string) {
|
||||||
|
this.systemMessage = systemMessageArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
private setupOutputStream(): ReadableStream<string> {
|
||||||
|
return new ReadableStream<string>({
|
||||||
|
start: (controller) => {
|
||||||
|
this.outputStreamController = controller;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private setupInputStream(): WritableStream<string> {
|
||||||
|
const writableStream = new WritableStream<string>({
|
||||||
|
write: async (chunk) => {
|
||||||
|
const processedData = await this.processFunction(chunk);
|
||||||
|
if (this.outputStreamController) {
|
||||||
|
this.outputStreamController.enqueue(processedData);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
close: () => {
|
||||||
|
this.outputStreamController?.close();
|
||||||
|
},
|
||||||
|
abort: (err) => {
|
||||||
|
console.error('Stream aborted', err);
|
||||||
|
this.outputStreamController?.error(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return writableStream;
|
||||||
|
}
|
||||||
|
|
||||||
|
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
|
||||||
|
if (!this.inputStreamWriter) {
|
||||||
|
const inputStream = this.setupInputStream();
|
||||||
|
this.inputStreamWriter = inputStream.getWriter();
|
||||||
|
}
|
||||||
|
return this.inputStreamWriter;
|
||||||
|
}
|
||||||
|
|
||||||
|
public getOutputStream(): ReadableStream<string> {
|
||||||
|
return this.setupOutputStream();
|
||||||
|
}
|
||||||
|
}
|
119
ts/classes.smartai.ts
Normal file
119
ts/classes.smartai.ts
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
import { Conversation } from './classes.conversation.js';
|
||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import { AnthropicProvider } from './provider.anthropic.js';
|
||||||
|
import { OllamaProvider } from './provider.ollama.js';
|
||||||
|
import { OpenAiProvider } from './provider.openai.js';
|
||||||
|
import { PerplexityProvider } from './provider.perplexity.js';
|
||||||
|
import { ExoProvider } from './provider.exo.js';
|
||||||
|
import { GroqProvider } from './provider.groq.js';
|
||||||
|
import { XAIProvider } from './provider.xai.js';
|
||||||
|
|
||||||
|
|
||||||
|
export interface ISmartAiOptions {
|
||||||
|
openaiToken?: string;
|
||||||
|
anthropicToken?: string;
|
||||||
|
perplexityToken?: string;
|
||||||
|
groqToken?: string;
|
||||||
|
xaiToken?: string;
|
||||||
|
exo?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
};
|
||||||
|
ollama?: {
|
||||||
|
baseUrl?: string;
|
||||||
|
model?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export type TProvider = 'openai' | 'anthropic' | 'perplexity' | 'ollama' | 'exo' | 'groq' | 'xai';
|
||||||
|
|
||||||
|
export class SmartAi {
|
||||||
|
public options: ISmartAiOptions;
|
||||||
|
|
||||||
|
public openaiProvider: OpenAiProvider;
|
||||||
|
public anthropicProvider: AnthropicProvider;
|
||||||
|
public perplexityProvider: PerplexityProvider;
|
||||||
|
public ollamaProvider: OllamaProvider;
|
||||||
|
public exoProvider: ExoProvider;
|
||||||
|
public groqProvider: GroqProvider;
|
||||||
|
public xaiProvider: XAIProvider;
|
||||||
|
|
||||||
|
constructor(optionsArg: ISmartAiOptions) {
|
||||||
|
this.options = optionsArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
if (this.options.openaiToken) {
|
||||||
|
this.openaiProvider = new OpenAiProvider({
|
||||||
|
openaiToken: this.options.openaiToken,
|
||||||
|
});
|
||||||
|
await this.openaiProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.anthropicToken) {
|
||||||
|
this.anthropicProvider = new AnthropicProvider({
|
||||||
|
anthropicToken: this.options.anthropicToken,
|
||||||
|
});
|
||||||
|
await this.anthropicProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.perplexityToken) {
|
||||||
|
this.perplexityProvider = new PerplexityProvider({
|
||||||
|
perplexityToken: this.options.perplexityToken,
|
||||||
|
});
|
||||||
|
await this.perplexityProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.groqToken) {
|
||||||
|
this.groqProvider = new GroqProvider({
|
||||||
|
groqToken: this.options.groqToken,
|
||||||
|
});
|
||||||
|
await this.groqProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.xaiToken) {
|
||||||
|
this.xaiProvider = new XAIProvider({
|
||||||
|
xaiToken: this.options.xaiToken,
|
||||||
|
});
|
||||||
|
await this.xaiProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.ollama) {
|
||||||
|
this.ollamaProvider = new OllamaProvider({
|
||||||
|
baseUrl: this.options.ollama.baseUrl,
|
||||||
|
model: this.options.ollama.model,
|
||||||
|
visionModel: this.options.ollama.visionModel,
|
||||||
|
});
|
||||||
|
await this.ollamaProvider.start();
|
||||||
|
}
|
||||||
|
if (this.options.exo) {
|
||||||
|
this.exoProvider = new ExoProvider({
|
||||||
|
exoBaseUrl: this.options.exo.baseUrl,
|
||||||
|
apiKey: this.options.exo.apiKey,
|
||||||
|
});
|
||||||
|
await this.exoProvider.start();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* create a new conversation
|
||||||
|
*/
|
||||||
|
createConversation(provider: TProvider) {
|
||||||
|
switch (provider) {
|
||||||
|
case 'exo':
|
||||||
|
return Conversation.createWithExo(this);
|
||||||
|
case 'openai':
|
||||||
|
return Conversation.createWithOpenAi(this);
|
||||||
|
case 'anthropic':
|
||||||
|
return Conversation.createWithAnthropic(this);
|
||||||
|
case 'perplexity':
|
||||||
|
return Conversation.createWithPerplexity(this);
|
||||||
|
case 'ollama':
|
||||||
|
return Conversation.createWithOllama(this);
|
||||||
|
case 'groq':
|
||||||
|
return Conversation.createWithGroq(this);
|
||||||
|
case 'xai':
|
||||||
|
return Conversation.createWithXai(this);
|
||||||
|
default:
|
||||||
|
throw new Error('Provider not available');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
15
ts/classes.tts.ts
Normal file
15
ts/classes.tts.ts
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import type { SmartAi } from './classes.smartai.js';
|
||||||
|
import * as plugins from './plugins.js';
|
||||||
|
|
||||||
|
export class TTS {
|
||||||
|
public static async createWithOpenAi(smartaiRef: SmartAi): Promise<TTS> {
|
||||||
|
return new TTS(smartaiRef);
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSTANCE
|
||||||
|
smartaiRef: SmartAi;
|
||||||
|
|
||||||
|
constructor(smartairefArg: SmartAi) {
|
||||||
|
this.smartaiRef = smartairefArg;
|
||||||
|
}
|
||||||
|
}
|
@@ -1,3 +1,3 @@
|
|||||||
export * from './smartai.classes.smartai.js';
|
export * from './classes.smartai.js';
|
||||||
export * from './abstract.classes.multimodal.js';
|
export * from './abstract.classes.multimodal.js';
|
||||||
export * from './provider.openai.js';
|
export * from './provider.openai.js';
|
||||||
|
0
ts/interfaces.ts
Normal file
0
ts/interfaces.ts
Normal file
@@ -7,20 +7,30 @@ export {
|
|||||||
|
|
||||||
// @push.rocks scope
|
// @push.rocks scope
|
||||||
import * as qenv from '@push.rocks/qenv';
|
import * as qenv from '@push.rocks/qenv';
|
||||||
import * as smartpath from '@push.rocks/smartpath';
|
import * as smartarray from '@push.rocks/smartarray';
|
||||||
import * as smartpromise from '@push.rocks/smartpromise';
|
|
||||||
import * as smartfile from '@push.rocks/smartfile';
|
import * as smartfile from '@push.rocks/smartfile';
|
||||||
|
import * as smartpath from '@push.rocks/smartpath';
|
||||||
|
import * as smartpdf from '@push.rocks/smartpdf';
|
||||||
|
import * as smartpromise from '@push.rocks/smartpromise';
|
||||||
|
import * as smartrequest from '@push.rocks/smartrequest';
|
||||||
|
import * as webstream from '@push.rocks/webstream';
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
smartarray,
|
||||||
qenv,
|
qenv,
|
||||||
smartpath,
|
|
||||||
smartpromise,
|
|
||||||
smartfile,
|
smartfile,
|
||||||
|
smartpath,
|
||||||
|
smartpdf,
|
||||||
|
smartpromise,
|
||||||
|
smartrequest,
|
||||||
|
webstream,
|
||||||
}
|
}
|
||||||
|
|
||||||
// third party
|
// third party
|
||||||
|
import * as anthropic from '@anthropic-ai/sdk';
|
||||||
import * as openai from 'openai';
|
import * as openai from 'openai';
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
anthropic,
|
||||||
openai,
|
openai,
|
||||||
}
|
}
|
||||||
|
240
ts/provider.anthropic.ts
Normal file
240
ts/provider.anthropic.ts
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ImageBlockParam, TextBlockParam } from '@anthropic-ai/sdk/resources/messages';
|
||||||
|
|
||||||
|
type ContentBlock = ImageBlockParam | TextBlockParam;
|
||||||
|
|
||||||
|
export interface IAnthropicProviderOptions {
|
||||||
|
anthropicToken: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class AnthropicProvider extends MultiModalModel {
|
||||||
|
private options: IAnthropicProviderOptions;
|
||||||
|
public anthropicApiClient: plugins.anthropic.default;
|
||||||
|
|
||||||
|
constructor(optionsArg: IAnthropicProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg // Ensure the token is stored
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {
|
||||||
|
this.anthropicApiClient = new plugins.anthropic.default({
|
||||||
|
apiKey: this.options.anthropicToken,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Anthropic
|
||||||
|
if (currentMessage) {
|
||||||
|
const stream = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-3-opus-20240229',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
system: '',
|
||||||
|
stream: true,
|
||||||
|
max_tokens: 4000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Anthropic
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const content = chunk.delta?.text;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
// Convert message history to Anthropic format
|
||||||
|
const messages = optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||||
|
content: msg.content
|
||||||
|
}));
|
||||||
|
|
||||||
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-3-opus-20240229',
|
||||||
|
system: optionsArg.systemMessage,
|
||||||
|
messages: [
|
||||||
|
...messages,
|
||||||
|
{ role: 'user' as const, content: optionsArg.userMessage }
|
||||||
|
],
|
||||||
|
max_tokens: 4000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
// Anthropic does not provide an audio API, so this method is not implemented.
|
||||||
|
throw new Error('Audio generation is not yet supported by Anthropic.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
const base64Image = optionsArg.image.toString('base64');
|
||||||
|
|
||||||
|
const content: ContentBlock[] = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: optionsArg.prompt
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: 'image/jpeg',
|
||||||
|
data: base64Image
|
||||||
|
}
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-3-opus-20240229',
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content
|
||||||
|
}],
|
||||||
|
max_tokens: 1024
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// Convert PDF documents to images using SmartPDF
|
||||||
|
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert message history to Anthropic format
|
||||||
|
const messages = optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role === 'assistant' ? 'assistant' as const : 'user' as const,
|
||||||
|
content: msg.content
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Create content array with text and images
|
||||||
|
const content: ContentBlock[] = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: optionsArg.userMessage
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add each document page as an image
|
||||||
|
for (const imageBytes of documentImageBytesArray) {
|
||||||
|
content.push({
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: 'image/jpeg',
|
||||||
|
data: Buffer.from(imageBytes).toString('base64')
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await this.anthropicApiClient.messages.create({
|
||||||
|
model: 'claude-3-opus-20240229',
|
||||||
|
system: optionsArg.systemMessage,
|
||||||
|
messages: [
|
||||||
|
...messages,
|
||||||
|
{ role: 'user', content }
|
||||||
|
],
|
||||||
|
max_tokens: 4096
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract text content from the response
|
||||||
|
let message = '';
|
||||||
|
for (const block of result.content) {
|
||||||
|
if ('text' in block) {
|
||||||
|
message += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
128
ts/provider.exo.ts
Normal file
128
ts/provider.exo.ts
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||||
|
|
||||||
|
export interface IExoProviderOptions {
|
||||||
|
exoBaseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ExoProvider extends MultiModalModel {
|
||||||
|
private options: IExoProviderOptions;
|
||||||
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
|
||||||
|
constructor(optionsArg: IExoProviderOptions = {}) {
|
||||||
|
super();
|
||||||
|
this.options = {
|
||||||
|
exoBaseUrl: 'http://localhost:8080/v1', // Default Exo API endpoint
|
||||||
|
...optionsArg
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
|
apiKey: this.options.apiKey || 'not-needed', // Exo might not require an API key for local deployment
|
||||||
|
baseURL: this.options.exoBaseUrl,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = message;
|
||||||
|
|
||||||
|
// Process the message based on its type
|
||||||
|
if (message.type === 'message') {
|
||||||
|
const response = await this.chat({
|
||||||
|
systemMessage: '',
|
||||||
|
userMessage: message.content,
|
||||||
|
messageHistory: [{ role: message.role as 'user' | 'assistant' | 'system', content: message.content }]
|
||||||
|
});
|
||||||
|
|
||||||
|
controller.enqueue(JSON.stringify(response) + '\n');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing message:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
currentMessage = message;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing remaining buffer:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chat(options: ChatOptions): Promise<ChatResponse> {
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: options.systemMessage },
|
||||||
|
...options.messageHistory,
|
||||||
|
{ role: 'user', content: options.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'local-model', // Exo uses local models
|
||||||
|
messages: messages,
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: response.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error in chat completion:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not supported by Exo provider');
|
||||||
|
}
|
||||||
|
}
|
192
ts/provider.groq.ts
Normal file
192
ts/provider.groq.ts
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
|
export interface IGroqProviderOptions {
|
||||||
|
groqToken: string;
|
||||||
|
model?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class GroqProvider extends MultiModalModel {
|
||||||
|
private options: IGroqProviderOptions;
|
||||||
|
private baseUrl = 'https://api.groq.com/v1';
|
||||||
|
|
||||||
|
constructor(optionsArg: IGroqProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = {
|
||||||
|
...optionsArg,
|
||||||
|
model: optionsArg.model || 'llama-3.3-70b-versatile', // Default model
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Groq
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.options.model,
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Groq
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith('data: ')) {
|
||||||
|
const data = line.slice(6);
|
||||||
|
if (data === '[DONE]') break;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(data);
|
||||||
|
const content = parsed.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse SSE data:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
const messages = [
|
||||||
|
// System message
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
content: optionsArg.systemMessage,
|
||||||
|
},
|
||||||
|
// Message history
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role,
|
||||||
|
content: msg.content,
|
||||||
|
})),
|
||||||
|
// User message
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.groqToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.options.model,
|
||||||
|
messages,
|
||||||
|
temperature: 0.7,
|
||||||
|
max_completion_tokens: 1024,
|
||||||
|
stream: false,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const error = await response.json();
|
||||||
|
throw new Error(`Groq API error: ${error.message || response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: result.choices[0].message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
// Groq does not provide an audio API, so this method is not implemented.
|
||||||
|
throw new Error('Audio generation is not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not yet supported by Groq.');
|
||||||
|
}
|
||||||
|
}
|
252
ts/provider.ollama.ts
Normal file
252
ts/provider.ollama.ts
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
|
export interface IOllamaProviderOptions {
|
||||||
|
baseUrl?: string;
|
||||||
|
model?: string;
|
||||||
|
visionModel?: string; // Model to use for vision tasks (e.g. 'llava')
|
||||||
|
}
|
||||||
|
|
||||||
|
export class OllamaProvider extends MultiModalModel {
|
||||||
|
private options: IOllamaProviderOptions;
|
||||||
|
private baseUrl: string;
|
||||||
|
private model: string;
|
||||||
|
private visionModel: string;
|
||||||
|
|
||||||
|
constructor(optionsArg: IOllamaProviderOptions = {}) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
this.baseUrl = optionsArg.baseUrl || 'http://localhost:11434';
|
||||||
|
this.model = optionsArg.model || 'llama2';
|
||||||
|
this.visionModel = optionsArg.visionModel || 'llava';
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {
|
||||||
|
// Verify Ollama is running
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/tags`);
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Failed to connect to Ollama server');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to connect to Ollama server at ${this.baseUrl}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
transform: async (chunk, controller) => {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Ollama
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.model,
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Ollama
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(line);
|
||||||
|
const content = parsed.message?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse Ollama response:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
// Format messages for Ollama
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
// Make API call to Ollama
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.model,
|
||||||
|
messages: messages,
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message: result.message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Ollama.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
const base64Image = optionsArg.image.toString('base64');
|
||||||
|
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.visionModel,
|
||||||
|
messages: [{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.prompt,
|
||||||
|
images: [base64Image]
|
||||||
|
}],
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return result.message.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// Convert PDF documents to images using SmartPDF
|
||||||
|
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
let documentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
documentImageBytesArray = documentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert images to base64
|
||||||
|
const base64Images = documentImageBytesArray.map(bytes => Buffer.from(bytes).toString('base64'));
|
||||||
|
|
||||||
|
// Send request to Ollama with images
|
||||||
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: this.visionModel,
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: optionsArg.userMessage,
|
||||||
|
images: base64Images
|
||||||
|
}
|
||||||
|
],
|
||||||
|
stream: false
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
return {
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: result.message.content
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
@@ -1,88 +1,232 @@
|
|||||||
import * as plugins from './plugins.js';
|
import * as plugins from './plugins.js';
|
||||||
import * as paths from './paths.js';
|
import * as paths from './paths.js';
|
||||||
|
|
||||||
import { MultiModal } from './abstract.classes.multimodal.js';
|
// Custom type definition for chat completion messages
|
||||||
|
export type TChatCompletionRequestMessage = {
|
||||||
|
role: "system" | "user" | "assistant";
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
|
|
||||||
export class OpenAiProvider extends MultiModal {
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
private openAiToken: string;
|
|
||||||
|
export interface IOpenaiProviderOptions {
|
||||||
|
openaiToken: string;
|
||||||
|
chatModel?: string;
|
||||||
|
audioModel?: string;
|
||||||
|
visionModel?: string;
|
||||||
|
// Optionally add more model options (e.g., documentModel) if needed.
|
||||||
|
}
|
||||||
|
|
||||||
|
export class OpenAiProvider extends MultiModalModel {
|
||||||
|
private options: IOpenaiProviderOptions;
|
||||||
public openAiApiClient: plugins.openai.default;
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||||
|
|
||||||
constructor(openaiToken: string) {
|
constructor(optionsArg: IOpenaiProviderOptions) {
|
||||||
super();
|
super();
|
||||||
this.openAiToken = openaiToken; // Ensure the token is stored
|
this.options = optionsArg;
|
||||||
}
|
}
|
||||||
|
|
||||||
async start() {
|
public async start() {
|
||||||
this.openAiApiClient = new plugins.openai.default({
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
apiKey: this.openAiToken,
|
apiKey: this.options.openaiToken,
|
||||||
dangerouslyAllowBrowser: true,
|
dangerouslyAllowBrowser: true,
|
||||||
});
|
});
|
||||||
|
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
}
|
}
|
||||||
|
|
||||||
async stop() {}
|
public async stop() {}
|
||||||
|
|
||||||
chatStream(input: ReadableStream<string>): ReadableStream<string> {
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
const decoder = new TextDecoder();
|
const decoder = new TextDecoder();
|
||||||
let messageHistory: { role: 'assistant' | 'user'; content: string }[] = [];
|
let buffer = '';
|
||||||
|
let currentMessage: {
|
||||||
|
role: "function" | "user" | "system" | "assistant" | "tool" | "developer";
|
||||||
|
content: string;
|
||||||
|
} | null = null;
|
||||||
|
|
||||||
return new ReadableStream({
|
// Create a TransformStream to process the input
|
||||||
async start(controller) {
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
const reader = input.getReader();
|
transform: async (chunk, controller) => {
|
||||||
try {
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
let done, value;
|
|
||||||
while ((({ done, value } = await reader.read()), !done)) {
|
|
||||||
const userMessage = decoder.decode(value, { stream: true });
|
|
||||||
messageHistory.push({ role: 'user', content: userMessage });
|
|
||||||
|
|
||||||
const aiResponse = await this.chat('', userMessage, messageHistory);
|
// Try to parse complete JSON messages from the buffer
|
||||||
messageHistory.push({ role: 'assistant', content: aiResponse.message });
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
// Directly enqueue the string response instead of encoding it first
|
const line = buffer.slice(0, newlineIndex);
|
||||||
controller.enqueue(aiResponse.message);
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: (message.role || 'user') as "function" | "user" | "system" | "assistant" | "tool" | "developer",
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
controller.close();
|
}
|
||||||
} catch (err) {
|
|
||||||
controller.error(err);
|
// If we have a complete message, send it to OpenAI
|
||||||
|
if (currentMessage) {
|
||||||
|
const messageToSend = { role: "user" as const, content: currentMessage.content };
|
||||||
|
const chatModel = this.options.chatModel ?? 'o3-mini';
|
||||||
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
|
messages: [messageToSend],
|
||||||
|
stream: true,
|
||||||
|
};
|
||||||
|
// Temperature is omitted since the model does not support it.
|
||||||
|
const stream = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
// Explicitly cast the stream as an async iterable to satisfy TypeScript.
|
||||||
|
const streamAsyncIterable = stream as unknown as AsyncIterableIterator<any>;
|
||||||
|
// Process each chunk from OpenAI
|
||||||
|
for await (const chunk of streamAsyncIterable) {
|
||||||
|
const content = chunk.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
currentMessage = null;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementing the synchronous chat interaction
|
// Implementing the synchronous chat interaction
|
||||||
public async chat(
|
public async chat(optionsArg: {
|
||||||
systemMessage: string,
|
systemMessage: string;
|
||||||
userMessage: string,
|
userMessage: string;
|
||||||
messageHistory: {
|
messageHistory: {
|
||||||
role: 'assistant' | 'user';
|
role: 'assistant' | 'user';
|
||||||
content: string;
|
content: string;
|
||||||
}[]
|
}[];
|
||||||
) {
|
}) {
|
||||||
const result = await this.openAiApiClient.chat.completions.create({
|
const chatModel = this.options.chatModel ?? 'o3-mini';
|
||||||
model: 'gpt-4-turbo-preview',
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: systemMessage },
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
...messageHistory,
|
...optionsArg.messageHistory,
|
||||||
{ role: 'user', content: userMessage },
|
{ role: 'user', content: optionsArg.userMessage },
|
||||||
],
|
],
|
||||||
|
};
|
||||||
|
// Temperature parameter removed to avoid unsupported error.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
return {
|
||||||
|
role: result.choices[0].message.role as 'assistant',
|
||||||
|
message: result.choices[0].message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
const done = plugins.smartpromise.defer<NodeJS.ReadableStream>();
|
||||||
|
const result = await this.openAiApiClient.audio.speech.create({
|
||||||
|
model: this.options.audioModel ?? 'tts-1-hd',
|
||||||
|
input: optionsArg.message,
|
||||||
|
voice: 'nova',
|
||||||
|
response_format: 'mp3',
|
||||||
|
speed: 1,
|
||||||
});
|
});
|
||||||
|
const stream = result.body;
|
||||||
|
done.resolve(stream);
|
||||||
|
return done.promise;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: {
|
||||||
|
role: 'assistant' | 'user';
|
||||||
|
content: any;
|
||||||
|
}[];
|
||||||
|
}) {
|
||||||
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
// Convert each PDF into one or more image byte arrays.
|
||||||
|
const smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
await smartpdfInstance.start();
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
await smartpdfInstance.stop();
|
||||||
|
|
||||||
|
console.log(`image smartfile array`);
|
||||||
|
console.log(pdfDocumentImageBytesArray.map((smartfile) => smartfile.length));
|
||||||
|
|
||||||
|
// Filter out any empty buffers to avoid sending invalid image URLs.
|
||||||
|
const validImageBytesArray = pdfDocumentImageBytesArray.filter(imageBytes => imageBytes && imageBytes.length > 0);
|
||||||
|
const imageAttachments = validImageBytesArray.map(imageBytes => ({
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'data:image/png;base64,' + Buffer.from(imageBytes).toString('base64'),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
const chatModel = this.options.chatModel ?? 'o4-mini';
|
||||||
|
const requestParams: any = {
|
||||||
|
model: chatModel,
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: optionsArg.userMessage },
|
||||||
|
...imageAttachments,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
// Temperature parameter removed.
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
return {
|
return {
|
||||||
message: result.choices[0].message,
|
message: result.choices[0].message,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
public async audio(messageArg: string) {
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
const done = plugins.smartpromise.defer();
|
const visionModel = this.options.visionModel ?? '04-mini';
|
||||||
const result = await this.openAiApiClient.audio.speech.create({
|
const requestParams: any = {
|
||||||
model: 'tts-1-hd',
|
model: visionModel,
|
||||||
input: messageArg,
|
messages: [
|
||||||
voice: 'nova',
|
{
|
||||||
response_format: 'mp3',
|
role: 'user',
|
||||||
speed: 1,
|
content: [
|
||||||
});
|
{ type: 'text', text: optionsArg.prompt },
|
||||||
const stream = result.body.pipe(plugins.smartfile.fsStream.createWriteStream(plugins.path.join(paths.nogitDir, 'output.mp3')));
|
{
|
||||||
stream.on('finish', () => {
|
type: 'image_url',
|
||||||
done.resolve();
|
image_url: {
|
||||||
});
|
url: `data:image/jpeg;base64,${optionsArg.image.toString('base64')}`
|
||||||
return done.promise;
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
max_tokens: 300
|
||||||
|
};
|
||||||
|
const result = await this.openAiApiClient.chat.completions.create(requestParams);
|
||||||
|
return result.choices[0].message.content || '';
|
||||||
}
|
}
|
||||||
}
|
}
|
171
ts/provider.perplexity.ts
Normal file
171
ts/provider.perplexity.ts
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
|
||||||
|
export interface IPerplexityProviderOptions {
|
||||||
|
perplexityToken: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class PerplexityProvider extends MultiModalModel {
|
||||||
|
private options: IPerplexityProviderOptions;
|
||||||
|
|
||||||
|
constructor(optionsArg: IPerplexityProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
async start() {
|
||||||
|
// Initialize any necessary clients or resources
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to Perplexity
|
||||||
|
if (currentMessage) {
|
||||||
|
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: 'mixtral-8x7b-instruct',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from Perplexity
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (reader) {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = new TextDecoder().decode(value);
|
||||||
|
const lines = chunk.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith('data: ')) {
|
||||||
|
const data = line.slice(6);
|
||||||
|
if (data === '[DONE]') break;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(data);
|
||||||
|
const content = parsed.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse SSE data:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
reader.releaseLock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementing the synchronous chat interaction
|
||||||
|
public async chat(optionsArg: ChatOptions): Promise<ChatResponse> {
|
||||||
|
// Make API call to Perplexity
|
||||||
|
const response = await fetch('https://api.perplexity.ai/chat/completions', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${this.options.perplexityToken}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: 'mixtral-8x7b-instruct', // Using Mixtral model
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory,
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Perplexity API error: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
message: result.choices[0].message.content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: ChatMessage[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
throw new Error('Document processing is not supported by Perplexity.');
|
||||||
|
}
|
||||||
|
}
|
183
ts/provider.xai.ts
Normal file
183
ts/provider.xai.ts
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
import * as plugins from './plugins.js';
|
||||||
|
import * as paths from './paths.js';
|
||||||
|
import { MultiModalModel } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
|
||||||
|
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
|
||||||
|
|
||||||
|
export interface IXAIProviderOptions {
|
||||||
|
xaiToken: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class XAIProvider extends MultiModalModel {
|
||||||
|
private options: IXAIProviderOptions;
|
||||||
|
public openAiApiClient: plugins.openai.default;
|
||||||
|
public smartpdfInstance: plugins.smartpdf.SmartPdf;
|
||||||
|
|
||||||
|
constructor(optionsArg: IXAIProviderOptions) {
|
||||||
|
super();
|
||||||
|
this.options = optionsArg;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async start() {
|
||||||
|
this.openAiApiClient = new plugins.openai.default({
|
||||||
|
apiKey: this.options.xaiToken,
|
||||||
|
baseURL: 'https://api.x.ai/v1',
|
||||||
|
});
|
||||||
|
this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
|
||||||
|
}
|
||||||
|
|
||||||
|
public async stop() {}
|
||||||
|
|
||||||
|
public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
|
||||||
|
// Create a TextDecoder to handle incoming chunks
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
let buffer = '';
|
||||||
|
let currentMessage: { role: string; content: string; } | null = null;
|
||||||
|
|
||||||
|
// Create a TransformStream to process the input
|
||||||
|
const transform = new TransformStream<Uint8Array, string>({
|
||||||
|
async transform(chunk, controller) {
|
||||||
|
buffer += decoder.decode(chunk, { stream: true });
|
||||||
|
|
||||||
|
// Try to parse complete JSON messages from the buffer
|
||||||
|
while (true) {
|
||||||
|
const newlineIndex = buffer.indexOf('\n');
|
||||||
|
if (newlineIndex === -1) break;
|
||||||
|
|
||||||
|
const line = buffer.slice(0, newlineIndex);
|
||||||
|
buffer = buffer.slice(newlineIndex + 1);
|
||||||
|
|
||||||
|
if (line.trim()) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(line);
|
||||||
|
currentMessage = {
|
||||||
|
role: message.role || 'user',
|
||||||
|
content: message.content || '',
|
||||||
|
};
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse message:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a complete message, send it to X.AI
|
||||||
|
if (currentMessage) {
|
||||||
|
const stream = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: [{ role: currentMessage.role, content: currentMessage.content }],
|
||||||
|
stream: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each chunk from X.AI
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const content = chunk.choices[0]?.delta?.content;
|
||||||
|
if (content) {
|
||||||
|
controller.enqueue(content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
flush(controller) {
|
||||||
|
if (buffer) {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(buffer);
|
||||||
|
controller.enqueue(message.content || '');
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Failed to parse remaining buffer:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect the input to our transform stream
|
||||||
|
return input.pipeThrough(transform);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async chat(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
messageHistory: { role: string; content: string; }[];
|
||||||
|
}): Promise<{ role: 'assistant'; message: string; }> {
|
||||||
|
// Prepare messages array with system message, history, and user message
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role as 'system' | 'user' | 'assistant',
|
||||||
|
content: msg.content
|
||||||
|
})),
|
||||||
|
{ role: 'user', content: optionsArg.userMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
// Call X.AI's chat completion API
|
||||||
|
const completion = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: messages,
|
||||||
|
stream: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return the assistant's response
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
message: completion.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
|
||||||
|
throw new Error('Audio generation is not supported by X.AI');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
|
||||||
|
throw new Error('Vision tasks are not supported by X.AI');
|
||||||
|
}
|
||||||
|
|
||||||
|
public async document(optionsArg: {
|
||||||
|
systemMessage: string;
|
||||||
|
userMessage: string;
|
||||||
|
pdfDocuments: Uint8Array[];
|
||||||
|
messageHistory: { role: string; content: string; }[];
|
||||||
|
}): Promise<{ message: any }> {
|
||||||
|
// First convert PDF documents to images
|
||||||
|
let pdfDocumentImageBytesArray: Uint8Array[] = [];
|
||||||
|
|
||||||
|
for (const pdfDocument of optionsArg.pdfDocuments) {
|
||||||
|
const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
|
||||||
|
pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert images to base64 for inclusion in the message
|
||||||
|
const imageBase64Array = pdfDocumentImageBytesArray.map(bytes =>
|
||||||
|
Buffer.from(bytes).toString('base64')
|
||||||
|
);
|
||||||
|
|
||||||
|
// Combine document images into the user message
|
||||||
|
const enhancedUserMessage = `
|
||||||
|
${optionsArg.userMessage}
|
||||||
|
|
||||||
|
Document contents (as images):
|
||||||
|
${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Use chat completion to analyze the documents
|
||||||
|
const messages: ChatCompletionMessageParam[] = [
|
||||||
|
{ role: 'system', content: optionsArg.systemMessage },
|
||||||
|
...optionsArg.messageHistory.map(msg => ({
|
||||||
|
role: msg.role as 'system' | 'user' | 'assistant',
|
||||||
|
content: msg.content
|
||||||
|
})),
|
||||||
|
{ role: 'user', content: enhancedUserMessage }
|
||||||
|
];
|
||||||
|
|
||||||
|
const completion = await this.openAiApiClient.chat.completions.create({
|
||||||
|
model: 'grok-2-latest',
|
||||||
|
messages: messages,
|
||||||
|
stream: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: completion.choices[0]?.message?.content || ''
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
@@ -1,53 +0,0 @@
|
|||||||
type TProcessFunction = (input: string) => Promise<string>;
|
|
||||||
|
|
||||||
export interface ISmartAiOptions {
|
|
||||||
processFunction: TProcessFunction;
|
|
||||||
}
|
|
||||||
|
|
||||||
export class SmartAi {
|
|
||||||
private processFunction: TProcessFunction;
|
|
||||||
private inputStreamWriter: WritableStreamDefaultWriter<string> | null = null;
|
|
||||||
private outputStreamController: ReadableStreamDefaultController<string> | null = null;
|
|
||||||
|
|
||||||
constructor(options: ISmartAiOptions) {
|
|
||||||
this.processFunction = options.processFunction;
|
|
||||||
}
|
|
||||||
|
|
||||||
private setupOutputStream(): ReadableStream<string> {
|
|
||||||
return new ReadableStream<string>({
|
|
||||||
start: (controller) => {
|
|
||||||
this.outputStreamController = controller;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private setupInputStream(): WritableStream<string> {
|
|
||||||
return new WritableStream<string>({
|
|
||||||
write: async (chunk) => {
|
|
||||||
const processedData = await this.processFunction(chunk);
|
|
||||||
if (this.outputStreamController) {
|
|
||||||
this.outputStreamController.enqueue(processedData);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
close: () => {
|
|
||||||
this.outputStreamController?.close();
|
|
||||||
},
|
|
||||||
abort: (err) => {
|
|
||||||
console.error('Stream aborted', err);
|
|
||||||
this.outputStreamController?.error(err);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
public getInputStreamWriter(): WritableStreamDefaultWriter<string> {
|
|
||||||
if (!this.inputStreamWriter) {
|
|
||||||
const inputStream = this.setupInputStream();
|
|
||||||
this.inputStreamWriter = inputStream.getWriter();
|
|
||||||
}
|
|
||||||
return this.inputStreamWriter;
|
|
||||||
}
|
|
||||||
|
|
||||||
public getOutputStream(): ReadableStream<string> {
|
|
||||||
return this.setupOutputStream();
|
|
||||||
}
|
|
||||||
}
|
|
Reference in New Issue
Block a user